1 /**
2 * f2fs_format.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * Dual licensed under the GPL or LGPL version 2 licenses.
8 */
9 #define _LARGEFILE64_SOURCE
10
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <string.h>
15 #include <unistd.h>
16 #ifndef ANDROID_WINDOWS_HOST
17 #include <sys/stat.h>
18 #include <sys/mount.h>
19 #endif
20 #include <time.h>
21 #include <uuid/uuid.h>
22
23 #include "f2fs_fs.h"
24 #include "quota.h"
25 #include "f2fs_format_utils.h"
26
27 extern struct f2fs_configuration c;
28 struct f2fs_super_block raw_sb;
29 struct f2fs_super_block *sb = &raw_sb;
30 struct f2fs_checkpoint *cp;
31
32 /* Return first segment number of each area */
33 #define prev_zone(cur) (c.cur_seg[cur] - c.segs_per_zone)
34 #define next_zone(cur) (c.cur_seg[cur] + c.segs_per_zone)
35 #define last_zone(cur) ((cur - 1) * c.segs_per_zone)
36 #define last_section(cur) (cur + (c.secs_per_zone - 1) * c.segs_per_sec)
37
38 static unsigned int quotatype_bits = 0;
39
40 const char *media_ext_lists[] = {
41 "jpg",
42 "gif",
43 "png",
44 "avi",
45 "divx",
46 "m4a",
47 "m4v",
48 "m4p",
49 "mp4",
50 "mp3",
51 "3gp",
52 "wmv",
53 "wma",
54 "mpeg",
55 "mkv",
56 "mov",
57 "asx",
58 "asf",
59 "wmx",
60 "svi",
61 "wvx",
62 "wv",
63 "wm",
64 "mpg",
65 "mpe",
66 "rm",
67 "ogg",
68 "opus",
69 "flac",
70 "jpeg",
71 "video",
72 "apk", /* for android system */
73 "so", /* for android system */
74 "exe",
75 NULL
76 };
77
78 const char *hot_ext_lists[] = {
79 "db",
80 NULL
81 };
82
83 const char **default_ext_list[] = {
84 media_ext_lists,
85 hot_ext_lists
86 };
87
is_extension_exist(const char * name)88 static bool is_extension_exist(const char *name)
89 {
90 int i;
91
92 for (i = 0; i < F2FS_MAX_EXTENSION; i++) {
93 char *ext = (char *)sb->extension_list[i];
94 if (!strcmp(ext, name))
95 return 1;
96 }
97
98 return 0;
99 }
100
cure_extension_list(void)101 static void cure_extension_list(void)
102 {
103 const char **extlist;
104 char *ext_str;
105 char *ue;
106 int name_len;
107 int i, pos = 0;
108
109 set_sb(extension_count, 0);
110 memset(sb->extension_list, 0, sizeof(sb->extension_list));
111
112 for (i = 0; i < 2; i++) {
113 ext_str = c.extension_list[i];
114 extlist = default_ext_list[i];
115
116 while (*extlist) {
117 name_len = strlen(*extlist);
118 memcpy(sb->extension_list[pos++], *extlist, name_len);
119 extlist++;
120 }
121 if (i == 0)
122 set_sb(extension_count, pos);
123 else
124 sb->hot_ext_count = pos - get_sb(extension_count);;
125
126 if (!ext_str)
127 continue;
128
129 /* add user ext list */
130 ue = strtok(ext_str, ", ");
131 while (ue != NULL) {
132 name_len = strlen(ue);
133 if (name_len >= 8) {
134 MSG(0, "\tWarn: Extension name (%s) is too long\n", ue);
135 goto next;
136 }
137 if (!is_extension_exist(ue))
138 memcpy(sb->extension_list[pos++], ue, name_len);
139 next:
140 ue = strtok(NULL, ", ");
141 if (pos >= F2FS_MAX_EXTENSION)
142 break;
143 }
144
145 if (i == 0)
146 set_sb(extension_count, pos);
147 else
148 sb->hot_ext_count = pos - get_sb(extension_count);
149
150 free(c.extension_list[i]);
151 }
152 }
153
verify_cur_segs(void)154 static void verify_cur_segs(void)
155 {
156 int i, j;
157 int reorder = 0;
158
159 for (i = 0; i < NR_CURSEG_TYPE; i++) {
160 for (j = i + 1; j < NR_CURSEG_TYPE; j++) {
161 if (c.cur_seg[i] == c.cur_seg[j]) {
162 reorder = 1;
163 break;
164 }
165 }
166 }
167
168 if (!reorder)
169 return;
170
171 c.cur_seg[0] = 0;
172 for (i = 1; i < NR_CURSEG_TYPE; i++)
173 c.cur_seg[i] = next_zone(i - 1);
174 }
175
f2fs_prepare_super_block(void)176 static int f2fs_prepare_super_block(void)
177 {
178 u_int32_t blk_size_bytes;
179 u_int32_t log_sectorsize, log_sectors_per_block;
180 u_int32_t log_blocksize, log_blks_per_seg;
181 u_int32_t segment_size_bytes, zone_size_bytes;
182 u_int32_t sit_segments, nat_segments;
183 u_int32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa;
184 u_int32_t total_valid_blks_available;
185 u_int64_t zone_align_start_offset, diff;
186 u_int64_t total_meta_zones, total_meta_segments;
187 u_int32_t sit_bitmap_size, max_sit_bitmap_size;
188 u_int32_t max_nat_bitmap_size, max_nat_segments;
189 u_int32_t total_zones;
190 enum quota_type qtype;
191 int i;
192
193 set_sb(magic, F2FS_SUPER_MAGIC);
194 set_sb(major_ver, F2FS_MAJOR_VERSION);
195 set_sb(minor_ver, F2FS_MINOR_VERSION);
196
197 log_sectorsize = log_base_2(c.sector_size);
198 log_sectors_per_block = log_base_2(c.sectors_per_blk);
199 log_blocksize = log_sectorsize + log_sectors_per_block;
200 log_blks_per_seg = log_base_2(c.blks_per_seg);
201
202 set_sb(log_sectorsize, log_sectorsize);
203 set_sb(log_sectors_per_block, log_sectors_per_block);
204
205 set_sb(log_blocksize, log_blocksize);
206 set_sb(log_blocks_per_seg, log_blks_per_seg);
207
208 set_sb(segs_per_sec, c.segs_per_sec);
209 set_sb(secs_per_zone, c.secs_per_zone);
210
211 blk_size_bytes = 1 << log_blocksize;
212 segment_size_bytes = blk_size_bytes * c.blks_per_seg;
213 zone_size_bytes =
214 blk_size_bytes * c.secs_per_zone *
215 c.segs_per_sec * c.blks_per_seg;
216
217 set_sb(checksum_offset, 0);
218
219 set_sb(block_count, c.total_sectors >> log_sectors_per_block);
220
221 zone_align_start_offset =
222 ((u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE +
223 2 * F2FS_BLKSIZE + zone_size_bytes - 1) /
224 zone_size_bytes * zone_size_bytes -
225 (u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE;
226
227 if (c.start_sector % DEFAULT_SECTORS_PER_BLOCK) {
228 MSG(1, "\t%s: Align start sector number to the page unit\n",
229 c.zoned_mode ? "FAIL" : "WARN");
230 MSG(1, "\ti.e., start sector: %d, ofs:%d (sects/page: %d)\n",
231 c.start_sector,
232 c.start_sector % DEFAULT_SECTORS_PER_BLOCK,
233 DEFAULT_SECTORS_PER_BLOCK);
234 if (c.zoned_mode)
235 return -1;
236 }
237
238 set_sb(segment0_blkaddr, zone_align_start_offset / blk_size_bytes);
239 sb->cp_blkaddr = sb->segment0_blkaddr;
240
241 MSG(0, "Info: zone aligned segment0 blkaddr: %u\n",
242 get_sb(segment0_blkaddr));
243
244 if (c.zoned_mode && (get_sb(segment0_blkaddr) + c.start_sector /
245 DEFAULT_SECTORS_PER_BLOCK) % c.zone_blocks) {
246 MSG(1, "\tError: Unaligned segment0 block address %u\n",
247 get_sb(segment0_blkaddr));
248 return -1;
249 }
250
251 for (i = 0; i < c.ndevs; i++) {
252 if (i == 0) {
253 c.devices[i].total_segments =
254 (c.devices[i].total_sectors *
255 c.sector_size - zone_align_start_offset) /
256 segment_size_bytes;
257 c.devices[i].start_blkaddr = 0;
258 c.devices[i].end_blkaddr = c.devices[i].total_segments *
259 c.blks_per_seg - 1 +
260 sb->segment0_blkaddr;
261 } else {
262 c.devices[i].total_segments =
263 c.devices[i].total_sectors /
264 (c.sectors_per_blk * c.blks_per_seg);
265 c.devices[i].start_blkaddr =
266 c.devices[i - 1].end_blkaddr + 1;
267 c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
268 c.devices[i].total_segments *
269 c.blks_per_seg - 1;
270 }
271 if (c.ndevs > 1) {
272 memcpy(sb->devs[i].path, c.devices[i].path, MAX_PATH_LEN);
273 sb->devs[i].total_segments =
274 cpu_to_le32(c.devices[i].total_segments);
275 }
276
277 c.total_segments += c.devices[i].total_segments;
278 }
279 set_sb(segment_count, (c.total_segments / c.segs_per_zone *
280 c.segs_per_zone));
281 set_sb(segment_count_ckpt, F2FS_NUMBER_OF_CHECKPOINT_PACK);
282
283 set_sb(sit_blkaddr, get_sb(segment0_blkaddr) +
284 get_sb(segment_count_ckpt) * c.blks_per_seg);
285
286 blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK);
287
288 sit_segments = SEG_ALIGN(blocks_for_sit);
289
290 set_sb(segment_count_sit, sit_segments * 2);
291
292 set_sb(nat_blkaddr, get_sb(sit_blkaddr) + get_sb(segment_count_sit) *
293 c.blks_per_seg);
294
295 total_valid_blks_available = (get_sb(segment_count) -
296 (get_sb(segment_count_ckpt) +
297 get_sb(segment_count_sit))) * c.blks_per_seg;
298
299 blocks_for_nat = SIZE_ALIGN(total_valid_blks_available,
300 NAT_ENTRY_PER_BLOCK);
301
302 if (c.large_nat_bitmap) {
303 nat_segments = SEG_ALIGN(blocks_for_nat) *
304 DEFAULT_NAT_ENTRY_RATIO / 100;
305 set_sb(segment_count_nat, nat_segments ? nat_segments : 1);
306 max_nat_bitmap_size = (get_sb(segment_count_nat) <<
307 log_blks_per_seg) / 8;
308 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
309 } else {
310 set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat));
311 max_nat_bitmap_size = 0;
312 }
313
314 /*
315 * The number of node segments should not be exceeded a "Threshold".
316 * This number resizes NAT bitmap area in a CP page.
317 * So the threshold is determined not to overflow one CP page
318 */
319 sit_bitmap_size = ((get_sb(segment_count_sit) / 2) <<
320 log_blks_per_seg) / 8;
321
322 if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE)
323 max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE;
324 else
325 max_sit_bitmap_size = sit_bitmap_size;
326
327 if (c.large_nat_bitmap) {
328 /* use cp_payload if free space of f2fs_checkpoint is not enough */
329 if (max_sit_bitmap_size + max_nat_bitmap_size >
330 MAX_BITMAP_SIZE_IN_CKPT) {
331 u_int32_t diff = max_sit_bitmap_size +
332 max_nat_bitmap_size -
333 MAX_BITMAP_SIZE_IN_CKPT;
334 set_sb(cp_payload, F2FS_BLK_ALIGN(diff));
335 } else {
336 set_sb(cp_payload, 0);
337 }
338 } else {
339 /*
340 * It should be reserved minimum 1 segment for nat.
341 * When sit is too large, we should expand cp area.
342 * It requires more pages for cp.
343 */
344 if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) {
345 max_nat_bitmap_size = CP_CHKSUM_OFFSET -
346 sizeof(struct f2fs_checkpoint) + 1;
347 set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size));
348 } else {
349 max_nat_bitmap_size =
350 CP_CHKSUM_OFFSET - sizeof(struct f2fs_checkpoint) + 1
351 - max_sit_bitmap_size;
352 set_sb(cp_payload, 0);
353 }
354 max_nat_segments = (max_nat_bitmap_size * 8) >> log_blks_per_seg;
355
356 if (get_sb(segment_count_nat) > max_nat_segments)
357 set_sb(segment_count_nat, max_nat_segments);
358
359 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2);
360 }
361
362 set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + get_sb(segment_count_nat) *
363 c.blks_per_seg);
364
365 total_valid_blks_available = (get_sb(segment_count) -
366 (get_sb(segment_count_ckpt) +
367 get_sb(segment_count_sit) +
368 get_sb(segment_count_nat))) *
369 c.blks_per_seg;
370
371 blocks_for_ssa = total_valid_blks_available /
372 c.blks_per_seg + 1;
373
374 set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa));
375
376 total_meta_segments = get_sb(segment_count_ckpt) +
377 get_sb(segment_count_sit) +
378 get_sb(segment_count_nat) +
379 get_sb(segment_count_ssa);
380 diff = total_meta_segments % (c.segs_per_zone);
381 if (diff)
382 set_sb(segment_count_ssa, get_sb(segment_count_ssa) +
383 (c.segs_per_zone - diff));
384
385 total_meta_zones = ZONE_ALIGN(total_meta_segments *
386 c.blks_per_seg);
387
388 set_sb(main_blkaddr, get_sb(segment0_blkaddr) + total_meta_zones *
389 c.segs_per_zone * c.blks_per_seg);
390
391 if (c.zoned_mode) {
392 /*
393 * Make sure there is enough randomly writeable
394 * space at the beginning of the disk.
395 */
396 unsigned long main_blkzone = get_sb(main_blkaddr) / c.zone_blocks;
397
398 if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
399 c.devices[0].nr_rnd_zones < main_blkzone) {
400 MSG(0, "\tError: Device does not have enough random "
401 "write zones for F2FS volume (%lu needed)\n",
402 main_blkzone);
403 return -1;
404 }
405 }
406
407 total_zones = get_sb(segment_count) / (c.segs_per_zone) -
408 total_meta_zones;
409
410 set_sb(section_count, total_zones * c.secs_per_zone);
411
412 set_sb(segment_count_main, get_sb(section_count) * c.segs_per_sec);
413
414 /* Let's determine the best reserved and overprovisioned space */
415 if (c.overprovision == 0)
416 c.overprovision = get_best_overprovision(sb);
417
418 c.reserved_segments =
419 (2 * (100 / c.overprovision + 1) + NR_CURSEG_TYPE)
420 * c.segs_per_sec;
421
422 if (c.overprovision == 0 || c.total_segments < F2FS_MIN_SEGMENTS ||
423 (c.devices[0].total_sectors *
424 c.sector_size < zone_align_start_offset) ||
425 (get_sb(segment_count_main) - NR_CURSEG_TYPE) <
426 c.reserved_segments) {
427 MSG(0, "\tError: Device size is not sufficient for F2FS volume\n");
428 return -1;
429 }
430
431 uuid_generate(sb->uuid);
432
433 /* precompute checksum seed for metadata */
434 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
435 c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
436
437 utf8_to_utf16(sb->volume_name, (const char *)c.vol_label,
438 MAX_VOLUME_NAME, strlen(c.vol_label));
439 set_sb(node_ino, 1);
440 set_sb(meta_ino, 2);
441 set_sb(root_ino, 3);
442 c.next_free_nid = 4;
443
444 if (c.feature & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
445 quotatype_bits = QUOTA_USR_BIT | QUOTA_GRP_BIT;
446 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
447 quotatype_bits |= QUOTA_PRJ_BIT;
448 }
449
450 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
451 if (!((1 << qtype) & quotatype_bits))
452 continue;
453 sb->qf_ino[qtype] = cpu_to_le32(c.next_free_nid++);
454 MSG(0, "Info: add quota type = %u => %u\n",
455 qtype, c.next_free_nid - 1);
456 }
457
458 if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND))
459 c.lpf_ino = c.next_free_nid++;
460
461 if (total_zones <= 6) {
462 MSG(1, "\tError: %d zones: Need more zones "
463 "by shrinking zone size\n", total_zones);
464 return -1;
465 }
466
467 if (c.heap) {
468 c.cur_seg[CURSEG_HOT_NODE] =
469 last_section(last_zone(total_zones));
470 c.cur_seg[CURSEG_WARM_NODE] = prev_zone(CURSEG_HOT_NODE);
471 c.cur_seg[CURSEG_COLD_NODE] = prev_zone(CURSEG_WARM_NODE);
472 c.cur_seg[CURSEG_HOT_DATA] = prev_zone(CURSEG_COLD_NODE);
473 c.cur_seg[CURSEG_COLD_DATA] = 0;
474 c.cur_seg[CURSEG_WARM_DATA] = next_zone(CURSEG_COLD_DATA);
475 } else {
476 c.cur_seg[CURSEG_HOT_NODE] = 0;
477 c.cur_seg[CURSEG_WARM_NODE] = next_zone(CURSEG_HOT_NODE);
478 c.cur_seg[CURSEG_COLD_NODE] = next_zone(CURSEG_WARM_NODE);
479 c.cur_seg[CURSEG_HOT_DATA] = next_zone(CURSEG_COLD_NODE);
480 c.cur_seg[CURSEG_COLD_DATA] =
481 max(last_zone((total_zones >> 2)),
482 next_zone(CURSEG_HOT_DATA));
483 c.cur_seg[CURSEG_WARM_DATA] =
484 max(last_zone((total_zones >> 1)),
485 next_zone(CURSEG_COLD_DATA));
486 }
487
488 /* if there is redundancy, reassign it */
489 verify_cur_segs();
490
491 cure_extension_list();
492
493 /* get kernel version */
494 if (c.kd >= 0) {
495 dev_read_version(c.version, 0, VERSION_LEN);
496 get_kernel_version(c.version);
497 MSG(0, "Info: format version with\n \"%s\"\n", c.version);
498 } else {
499 get_kernel_uname_version(c.version);
500 }
501
502 memcpy(sb->version, c.version, VERSION_LEN);
503 memcpy(sb->init_version, c.version, VERSION_LEN);
504
505 sb->feature = c.feature;
506
507 if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
508 set_sb(checksum_offset, SB_CHKSUM_OFFSET);
509 set_sb(crc, f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
510 SB_CHKSUM_OFFSET));
511 MSG(1, "Info: SB CRC is set: offset (%d), crc (0x%x)\n",
512 get_sb(checksum_offset), get_sb(crc));
513 }
514
515 return 0;
516 }
517
f2fs_init_sit_area(void)518 static int f2fs_init_sit_area(void)
519 {
520 u_int32_t blk_size, seg_size;
521 u_int32_t index = 0;
522 u_int64_t sit_seg_addr = 0;
523 u_int8_t *zero_buf = NULL;
524
525 blk_size = 1 << get_sb(log_blocksize);
526 seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
527
528 zero_buf = calloc(sizeof(u_int8_t), seg_size);
529 if(zero_buf == NULL) {
530 MSG(1, "\tError: Calloc Failed for sit_zero_buf!!!\n");
531 return -1;
532 }
533
534 sit_seg_addr = get_sb(sit_blkaddr);
535 sit_seg_addr *= blk_size;
536
537 DBG(1, "\tFilling sit area at offset 0x%08"PRIx64"\n", sit_seg_addr);
538 for (index = 0; index < (get_sb(segment_count_sit) / 2); index++) {
539 if (dev_fill(zero_buf, sit_seg_addr, seg_size)) {
540 MSG(1, "\tError: While zeroing out the sit area "
541 "on disk!!!\n");
542 free(zero_buf);
543 return -1;
544 }
545 sit_seg_addr += seg_size;
546 }
547
548 free(zero_buf);
549 return 0 ;
550 }
551
f2fs_init_nat_area(void)552 static int f2fs_init_nat_area(void)
553 {
554 u_int32_t blk_size, seg_size;
555 u_int32_t index = 0;
556 u_int64_t nat_seg_addr = 0;
557 u_int8_t *nat_buf = NULL;
558
559 blk_size = 1 << get_sb(log_blocksize);
560 seg_size = (1 << get_sb(log_blocks_per_seg)) * blk_size;
561
562 nat_buf = calloc(sizeof(u_int8_t), seg_size);
563 if (nat_buf == NULL) {
564 MSG(1, "\tError: Calloc Failed for nat_zero_blk!!!\n");
565 return -1;
566 }
567
568 nat_seg_addr = get_sb(nat_blkaddr);
569 nat_seg_addr *= blk_size;
570
571 DBG(1, "\tFilling nat area at offset 0x%08"PRIx64"\n", nat_seg_addr);
572 for (index = 0; index < get_sb(segment_count_nat) / 2; index++) {
573 if (dev_fill(nat_buf, nat_seg_addr, seg_size)) {
574 MSG(1, "\tError: While zeroing out the nat area "
575 "on disk!!!\n");
576 free(nat_buf);
577 return -1;
578 }
579 nat_seg_addr = nat_seg_addr + (2 * seg_size);
580 }
581
582 free(nat_buf);
583 return 0 ;
584 }
585
f2fs_write_check_point_pack(void)586 static int f2fs_write_check_point_pack(void)
587 {
588 struct f2fs_summary_block *sum = NULL;
589 struct f2fs_journal *journal;
590 u_int32_t blk_size_bytes;
591 u_int32_t nat_bits_bytes, nat_bits_blocks;
592 unsigned char *nat_bits = NULL, *empty_nat_bits;
593 u_int64_t cp_seg_blk = 0;
594 u_int32_t crc = 0, flags;
595 unsigned int i;
596 char *cp_payload = NULL;
597 char *sum_compact, *sum_compact_p;
598 struct f2fs_summary *sum_entry;
599 enum quota_type qtype;
600 int off;
601 int ret = -1;
602
603 cp = calloc(F2FS_BLKSIZE, 1);
604 if (cp == NULL) {
605 MSG(1, "\tError: Calloc failed for f2fs_checkpoint!!!\n");
606 return ret;
607 }
608
609 sum = calloc(F2FS_BLKSIZE, 1);
610 if (sum == NULL) {
611 MSG(1, "\tError: Calloc failed for summary_node!!!\n");
612 goto free_cp;
613 }
614
615 sum_compact = calloc(F2FS_BLKSIZE, 1);
616 if (sum_compact == NULL) {
617 MSG(1, "\tError: Calloc failed for summary buffer!!!\n");
618 goto free_sum;
619 }
620 sum_compact_p = sum_compact;
621
622 nat_bits_bytes = get_sb(segment_count_nat) << 5;
623 nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
624 F2FS_BLKSIZE - 1);
625 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
626 if (nat_bits == NULL) {
627 MSG(1, "\tError: Calloc failed for nat bits buffer!!!\n");
628 goto free_sum_compact;
629 }
630
631 cp_payload = calloc(F2FS_BLKSIZE, 1);
632 if (cp_payload == NULL) {
633 MSG(1, "\tError: Calloc failed for cp_payload!!!\n");
634 goto free_nat_bits;
635 }
636
637 /* 1. cp page 1 of checkpoint pack 1 */
638 srand(time(NULL));
639 cp->checkpoint_ver = cpu_to_le64(rand() | 0x1);
640 set_cp(cur_node_segno[0], c.cur_seg[CURSEG_HOT_NODE]);
641 set_cp(cur_node_segno[1], c.cur_seg[CURSEG_WARM_NODE]);
642 set_cp(cur_node_segno[2], c.cur_seg[CURSEG_COLD_NODE]);
643 set_cp(cur_data_segno[0], c.cur_seg[CURSEG_HOT_DATA]);
644 set_cp(cur_data_segno[1], c.cur_seg[CURSEG_WARM_DATA]);
645 set_cp(cur_data_segno[2], c.cur_seg[CURSEG_COLD_DATA]);
646 for (i = 3; i < MAX_ACTIVE_NODE_LOGS; i++) {
647 set_cp(cur_node_segno[i], 0xffffffff);
648 set_cp(cur_data_segno[i], 0xffffffff);
649 }
650
651 set_cp(cur_node_blkoff[0], 1 + c.quota_inum + c.lpf_inum);
652 set_cp(cur_data_blkoff[0], 1 + c.quota_dnum + c.lpf_dnum);
653 set_cp(valid_block_count, 2 + c.quota_inum + c.quota_dnum +
654 c.lpf_inum + c.lpf_dnum);
655 set_cp(rsvd_segment_count, c.reserved_segments);
656 set_cp(overprov_segment_count, (get_sb(segment_count_main) -
657 get_cp(rsvd_segment_count)) *
658 c.overprovision / 100);
659 set_cp(overprov_segment_count, get_cp(overprov_segment_count) +
660 get_cp(rsvd_segment_count));
661
662 MSG(0, "Info: Overprovision ratio = %.3lf%%\n", c.overprovision);
663 MSG(0, "Info: Overprovision segments = %u (GC reserved = %u)\n",
664 get_cp(overprov_segment_count),
665 c.reserved_segments);
666
667 /* main segments - reserved segments - (node + data segments) */
668 set_cp(free_segment_count, get_sb(segment_count_main) - 6);
669 set_cp(user_block_count, ((get_cp(free_segment_count) + 6 -
670 get_cp(overprov_segment_count)) * c.blks_per_seg));
671 /* cp page (2), data summaries (1), node summaries (3) */
672 set_cp(cp_pack_total_block_count, 6 + get_sb(cp_payload));
673 flags = CP_UMOUNT_FLAG | CP_COMPACT_SUM_FLAG;
674 if (get_cp(cp_pack_total_block_count) <=
675 (1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
676 flags |= CP_NAT_BITS_FLAG;
677
678 if (c.trimmed)
679 flags |= CP_TRIMMED_FLAG;
680
681 if (c.large_nat_bitmap)
682 flags |= CP_LARGE_NAT_BITMAP_FLAG;
683
684 set_cp(ckpt_flags, flags);
685 set_cp(cp_pack_start_sum, 1 + get_sb(cp_payload));
686 set_cp(valid_node_count, 1 + c.quota_inum + c.lpf_inum);
687 set_cp(valid_inode_count, 1 + c.quota_inum + c.lpf_inum);
688 set_cp(next_free_nid, c.next_free_nid);
689 set_cp(sit_ver_bitmap_bytesize, ((get_sb(segment_count_sit) / 2) <<
690 get_sb(log_blocks_per_seg)) / 8);
691
692 set_cp(nat_ver_bitmap_bytesize, ((get_sb(segment_count_nat) / 2) <<
693 get_sb(log_blocks_per_seg)) / 8);
694
695 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
696
697 crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CP_CHKSUM_OFFSET);
698 *((__le32 *)((unsigned char *)cp + CP_CHKSUM_OFFSET)) =
699 cpu_to_le32(crc);
700
701 blk_size_bytes = 1 << get_sb(log_blocksize);
702
703 if (blk_size_bytes != F2FS_BLKSIZE) {
704 MSG(1, "\tError: Wrong block size %d / %d!!!\n",
705 blk_size_bytes, F2FS_BLKSIZE);
706 goto free_cp_payload;
707 }
708
709 cp_seg_blk = get_sb(segment0_blkaddr);
710
711 DBG(1, "\tWriting main segments, cp at offset 0x%08"PRIx64"\n",
712 cp_seg_blk);
713 if (dev_write_block(cp, cp_seg_blk)) {
714 MSG(1, "\tError: While writing the cp to disk!!!\n");
715 goto free_cp_payload;
716 }
717
718 for (i = 0; i < get_sb(cp_payload); i++) {
719 cp_seg_blk++;
720 if (dev_fill_block(cp_payload, cp_seg_blk)) {
721 MSG(1, "\tError: While zeroing out the sit bitmap area "
722 "on disk!!!\n");
723 goto free_cp_payload;
724 }
725 }
726
727 /* Prepare and write Segment summary for HOT/WARM/COLD DATA
728 *
729 * The structure of compact summary
730 * +-------------------+
731 * | nat_journal |
732 * +-------------------+
733 * | sit_journal |
734 * +-------------------+
735 * | hot data summary |
736 * +-------------------+
737 * | warm data summary |
738 * +-------------------+
739 * | cold data summary |
740 * +-------------------+
741 */
742 memset(sum, 0, sizeof(struct f2fs_summary_block));
743 SET_SUM_TYPE((&sum->footer), SUM_TYPE_DATA);
744
745 journal = &sum->journal;
746 journal->n_nats = cpu_to_le16(1 + c.quota_inum + c.lpf_inum);
747 journal->nat_j.entries[0].nid = sb->root_ino;
748 journal->nat_j.entries[0].ne.version = 0;
749 journal->nat_j.entries[0].ne.ino = sb->root_ino;
750 journal->nat_j.entries[0].ne.block_addr = cpu_to_le32(
751 get_sb(main_blkaddr) +
752 get_cp(cur_node_segno[0]) * c.blks_per_seg);
753
754 for (qtype = 0, i = 1; qtype < F2FS_MAX_QUOTAS; qtype++) {
755 if (sb->qf_ino[qtype] == 0)
756 continue;
757 journal->nat_j.entries[i].nid = sb->qf_ino[qtype];
758 journal->nat_j.entries[i].ne.version = 0;
759 journal->nat_j.entries[i].ne.ino = sb->qf_ino[qtype];
760 journal->nat_j.entries[i].ne.block_addr = cpu_to_le32(
761 get_sb(main_blkaddr) +
762 get_cp(cur_node_segno[0]) *
763 c.blks_per_seg + i);
764 i++;
765 }
766
767 if (c.lpf_inum) {
768 journal->nat_j.entries[i].nid = cpu_to_le32(c.lpf_ino);
769 journal->nat_j.entries[i].ne.version = 0;
770 journal->nat_j.entries[i].ne.ino = cpu_to_le32(c.lpf_ino);
771 journal->nat_j.entries[i].ne.block_addr = cpu_to_le32(
772 get_sb(main_blkaddr) +
773 get_cp(cur_node_segno[0]) *
774 c.blks_per_seg + i);
775 }
776
777 memcpy(sum_compact_p, &journal->n_nats, SUM_JOURNAL_SIZE);
778 sum_compact_p += SUM_JOURNAL_SIZE;
779
780 memset(sum, 0, sizeof(struct f2fs_summary_block));
781 /* inode sit for root */
782 journal->n_sits = cpu_to_le16(6);
783 journal->sit_j.entries[0].segno = cp->cur_node_segno[0];
784 journal->sit_j.entries[0].se.vblocks =
785 cpu_to_le16((CURSEG_HOT_NODE << 10) |
786 (1 + c.quota_inum + c.lpf_inum));
787 f2fs_set_bit(0, (char *)journal->sit_j.entries[0].se.valid_map);
788 for (i = 1; i <= c.quota_inum; i++)
789 f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
790 if (c.lpf_inum)
791 f2fs_set_bit(i, (char *)journal->sit_j.entries[0].se.valid_map);
792
793 journal->sit_j.entries[1].segno = cp->cur_node_segno[1];
794 journal->sit_j.entries[1].se.vblocks =
795 cpu_to_le16((CURSEG_WARM_NODE << 10));
796 journal->sit_j.entries[2].segno = cp->cur_node_segno[2];
797 journal->sit_j.entries[2].se.vblocks =
798 cpu_to_le16((CURSEG_COLD_NODE << 10));
799
800 /* data sit for root */
801 journal->sit_j.entries[3].segno = cp->cur_data_segno[0];
802 journal->sit_j.entries[3].se.vblocks =
803 cpu_to_le16((CURSEG_HOT_DATA << 10) |
804 (1 + c.quota_dnum + c.lpf_dnum));
805 f2fs_set_bit(0, (char *)journal->sit_j.entries[3].se.valid_map);
806 for (i = 1; i <= c.quota_dnum; i++)
807 f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
808 if (c.lpf_dnum)
809 f2fs_set_bit(i, (char *)journal->sit_j.entries[3].se.valid_map);
810
811 journal->sit_j.entries[4].segno = cp->cur_data_segno[1];
812 journal->sit_j.entries[4].se.vblocks =
813 cpu_to_le16((CURSEG_WARM_DATA << 10));
814 journal->sit_j.entries[5].segno = cp->cur_data_segno[2];
815 journal->sit_j.entries[5].se.vblocks =
816 cpu_to_le16((CURSEG_COLD_DATA << 10));
817
818 memcpy(sum_compact_p, &journal->n_sits, SUM_JOURNAL_SIZE);
819 sum_compact_p += SUM_JOURNAL_SIZE;
820
821 /* hot data summary */
822 sum_entry = (struct f2fs_summary *)sum_compact_p;
823 sum_entry->nid = sb->root_ino;
824 sum_entry->ofs_in_node = 0;
825
826 off = 1;
827 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
828 if (sb->qf_ino[qtype] == 0)
829 continue;
830 int j;
831
832 for (j = 0; j < QUOTA_DATA(qtype); j++) {
833 (sum_entry + off + j)->nid = sb->qf_ino[qtype];
834 (sum_entry + off + j)->ofs_in_node = cpu_to_le16(j);
835 }
836 off += QUOTA_DATA(qtype);
837 }
838
839 if (c.lpf_dnum) {
840 (sum_entry + off)->nid = cpu_to_le32(c.lpf_ino);
841 (sum_entry + off)->ofs_in_node = 0;
842 }
843
844 /* warm data summary, nothing to do */
845 /* cold data summary, nothing to do */
846
847 cp_seg_blk++;
848 DBG(1, "\tWriting Segment summary for HOT/WARM/COLD_DATA, at offset 0x%08"PRIx64"\n",
849 cp_seg_blk);
850 if (dev_write_block(sum_compact, cp_seg_blk)) {
851 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
852 goto free_cp_payload;
853 }
854
855 /* Prepare and write Segment summary for HOT_NODE */
856 memset(sum, 0, sizeof(struct f2fs_summary_block));
857 SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
858
859 sum->entries[0].nid = sb->root_ino;
860 sum->entries[0].ofs_in_node = 0;
861 for (qtype = i = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
862 if (sb->qf_ino[qtype] == 0)
863 continue;
864 sum->entries[1 + i].nid = sb->qf_ino[qtype];
865 sum->entries[1 + i].ofs_in_node = 0;
866 i++;
867 }
868 if (c.lpf_inum) {
869 i++;
870 sum->entries[i].nid = cpu_to_le32(c.lpf_ino);
871 sum->entries[i].ofs_in_node = 0;
872 }
873
874 cp_seg_blk++;
875 DBG(1, "\tWriting Segment summary for HOT_NODE, at offset 0x%08"PRIx64"\n",
876 cp_seg_blk);
877 if (dev_write_block(sum, cp_seg_blk)) {
878 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
879 goto free_cp_payload;
880 }
881
882 /* Fill segment summary for WARM_NODE to zero. */
883 memset(sum, 0, sizeof(struct f2fs_summary_block));
884 SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
885
886 cp_seg_blk++;
887 DBG(1, "\tWriting Segment summary for WARM_NODE, at offset 0x%08"PRIx64"\n",
888 cp_seg_blk);
889 if (dev_write_block(sum, cp_seg_blk)) {
890 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
891 goto free_cp_payload;
892 }
893
894 /* Fill segment summary for COLD_NODE to zero. */
895 memset(sum, 0, sizeof(struct f2fs_summary_block));
896 SET_SUM_TYPE((&sum->footer), SUM_TYPE_NODE);
897 cp_seg_blk++;
898 DBG(1, "\tWriting Segment summary for COLD_NODE, at offset 0x%08"PRIx64"\n",
899 cp_seg_blk);
900 if (dev_write_block(sum, cp_seg_blk)) {
901 MSG(1, "\tError: While writing the sum_blk to disk!!!\n");
902 goto free_cp_payload;
903 }
904
905 /* cp page2 */
906 cp_seg_blk++;
907 DBG(1, "\tWriting cp page2, at offset 0x%08"PRIx64"\n", cp_seg_blk);
908 if (dev_write_block(cp, cp_seg_blk)) {
909 MSG(1, "\tError: While writing the cp to disk!!!\n");
910 goto free_cp_payload;
911 }
912
913 /* write NAT bits, if possible */
914 if (flags & CP_NAT_BITS_FLAG) {
915 uint32_t i;
916
917 *(__le64 *)nat_bits = get_cp_crc(cp);
918 empty_nat_bits = nat_bits + 8 + nat_bits_bytes;
919 memset(empty_nat_bits, 0xff, nat_bits_bytes);
920 test_and_clear_bit_le(0, empty_nat_bits);
921
922 /* write the last blocks in cp pack */
923 cp_seg_blk = get_sb(segment0_blkaddr) + (1 <<
924 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
925
926 DBG(1, "\tWriting NAT bits pages, at offset 0x%08"PRIx64"\n",
927 cp_seg_blk);
928
929 for (i = 0; i < nat_bits_blocks; i++) {
930 if (dev_write_block(nat_bits + i *
931 F2FS_BLKSIZE, cp_seg_blk + i)) {
932 MSG(1, "\tError: write NAT bits to disk!!!\n");
933 goto free_cp_payload;
934 }
935 }
936 }
937
938 /* cp page 1 of check point pack 2
939 * Initialize other checkpoint pack with version zero
940 */
941 cp->checkpoint_ver = 0;
942
943 crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, cp, CP_CHKSUM_OFFSET);
944 *((__le32 *)((unsigned char *)cp + CP_CHKSUM_OFFSET)) =
945 cpu_to_le32(crc);
946 cp_seg_blk = get_sb(segment0_blkaddr) + c.blks_per_seg;
947 DBG(1, "\tWriting cp page 1 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
948 cp_seg_blk);
949 if (dev_write_block(cp, cp_seg_blk)) {
950 MSG(1, "\tError: While writing the cp to disk!!!\n");
951 goto free_cp_payload;
952 }
953
954 for (i = 0; i < get_sb(cp_payload); i++) {
955 cp_seg_blk++;
956 if (dev_fill_block(cp_payload, cp_seg_blk)) {
957 MSG(1, "\tError: While zeroing out the sit bitmap area "
958 "on disk!!!\n");
959 goto free_cp_payload;
960 }
961 }
962
963 /* cp page 2 of check point pack 2 */
964 cp_seg_blk += (le32_to_cpu(cp->cp_pack_total_block_count) -
965 get_sb(cp_payload) - 1);
966 DBG(1, "\tWriting cp page 2 of checkpoint pack 2, at offset 0x%08"PRIx64"\n",
967 cp_seg_blk);
968 if (dev_write_block(cp, cp_seg_blk)) {
969 MSG(1, "\tError: While writing the cp to disk!!!\n");
970 goto free_cp_payload;
971 }
972
973 ret = 0;
974
975 free_cp_payload:
976 free(cp_payload);
977 free_nat_bits:
978 free(nat_bits);
979 free_sum_compact:
980 free(sum_compact);
981 free_sum:
982 free(sum);
983 free_cp:
984 free(cp);
985 return ret;
986 }
987
f2fs_write_super_block(void)988 static int f2fs_write_super_block(void)
989 {
990 int index;
991 u_int8_t *zero_buff;
992
993 zero_buff = calloc(F2FS_BLKSIZE, 1);
994 if (zero_buff == NULL) {
995 MSG(1, "\tError: Calloc Failed for super_blk_zero_buf!!!\n");
996 return -1;
997 }
998
999 memcpy(zero_buff + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
1000 DBG(1, "\tWriting super block, at offset 0x%08x\n", 0);
1001 for (index = 0; index < 2; index++) {
1002 if (dev_write_block(zero_buff, index)) {
1003 MSG(1, "\tError: While while writing super_blk "
1004 "on disk!!! index : %d\n", index);
1005 free(zero_buff);
1006 return -1;
1007 }
1008 }
1009
1010 free(zero_buff);
1011 return 0;
1012 }
1013
1014 #ifndef WITH_ANDROID
f2fs_discard_obsolete_dnode(void)1015 static int f2fs_discard_obsolete_dnode(void)
1016 {
1017 struct f2fs_node *raw_node;
1018 u_int64_t next_blkaddr = 0, offset;
1019 u64 end_blkaddr = (get_sb(segment_count_main) <<
1020 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
1021 u_int64_t start_inode_pos = get_sb(main_blkaddr);
1022 u_int64_t last_inode_pos;
1023
1024 if (c.zoned_mode)
1025 return 0;
1026
1027 raw_node = calloc(sizeof(struct f2fs_node), 1);
1028 if (raw_node == NULL) {
1029 MSG(1, "\tError: Calloc Failed for discard_raw_node!!!\n");
1030 return -1;
1031 }
1032
1033 /* avoid power-off-recovery based on roll-forward policy */
1034 offset = get_sb(main_blkaddr);
1035 offset += c.cur_seg[CURSEG_WARM_NODE] * c.blks_per_seg;
1036
1037 last_inode_pos = start_inode_pos +
1038 c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg + c.quota_inum + c.lpf_inum;
1039
1040 do {
1041 if (offset < get_sb(main_blkaddr) || offset >= end_blkaddr)
1042 break;
1043
1044 if (dev_read_block(raw_node, offset)) {
1045 MSG(1, "\tError: While traversing direct node!!!\n");
1046 free(raw_node);
1047 return -1;
1048 }
1049
1050 next_blkaddr = le32_to_cpu(raw_node->footer.next_blkaddr);
1051 memset(raw_node, 0, F2FS_BLKSIZE);
1052
1053 DBG(1, "\tDiscard dnode, at offset 0x%08"PRIx64"\n", offset);
1054 if (dev_write_block(raw_node, offset)) {
1055 MSG(1, "\tError: While discarding direct node!!!\n");
1056 free(raw_node);
1057 return -1;
1058 }
1059 offset = next_blkaddr;
1060 /* should avoid recursive chain due to stale data */
1061 if (offset >= start_inode_pos || offset <= last_inode_pos)
1062 break;
1063 } while (1);
1064
1065 free(raw_node);
1066 return 0;
1067 }
1068 #endif
1069
f2fs_write_root_inode(void)1070 static int f2fs_write_root_inode(void)
1071 {
1072 struct f2fs_node *raw_node = NULL;
1073 u_int64_t blk_size_bytes, data_blk_nor;
1074 u_int64_t main_area_node_seg_blk_offset = 0;
1075
1076 raw_node = calloc(F2FS_BLKSIZE, 1);
1077 if (raw_node == NULL) {
1078 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1079 return -1;
1080 }
1081
1082 raw_node->footer.nid = sb->root_ino;
1083 raw_node->footer.ino = sb->root_ino;
1084 raw_node->footer.cp_ver = cpu_to_le64(1);
1085 raw_node->footer.next_blkaddr = cpu_to_le32(
1086 get_sb(main_blkaddr) +
1087 c.cur_seg[CURSEG_HOT_NODE] *
1088 c.blks_per_seg + 1);
1089
1090 raw_node->i.i_mode = cpu_to_le16(0x41ed);
1091 if (c.lpf_ino)
1092 raw_node->i.i_links = cpu_to_le32(3);
1093 else
1094 raw_node->i.i_links = cpu_to_le32(2);
1095 raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1096 raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1097
1098 blk_size_bytes = 1 << get_sb(log_blocksize);
1099 raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes); /* dentry */
1100 raw_node->i.i_blocks = cpu_to_le64(2);
1101
1102 raw_node->i.i_atime = cpu_to_le32(time(NULL));
1103 raw_node->i.i_atime_nsec = 0;
1104 raw_node->i.i_ctime = cpu_to_le32(time(NULL));
1105 raw_node->i.i_ctime_nsec = 0;
1106 raw_node->i.i_mtime = cpu_to_le32(time(NULL));
1107 raw_node->i.i_mtime_nsec = 0;
1108 raw_node->i.i_generation = 0;
1109 raw_node->i.i_xattr_nid = 0;
1110 raw_node->i.i_flags = 0;
1111 raw_node->i.i_current_depth = cpu_to_le32(1);
1112 raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1113
1114 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1115 raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1116 raw_node->i.i_extra_isize =
1117 cpu_to_le16(F2FS_TOTAL_EXTRA_ATTR_SIZE);
1118 }
1119
1120 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1121 raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1122
1123 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
1124 raw_node->i.i_crtime = cpu_to_le32(time(NULL));
1125 raw_node->i.i_crtime_nsec = 0;
1126 }
1127
1128 data_blk_nor = get_sb(main_blkaddr) +
1129 c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg;
1130 raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
1131
1132 raw_node->i.i_ext.fofs = 0;
1133 raw_node->i.i_ext.blk_addr = 0;
1134 raw_node->i.i_ext.len = 0;
1135
1136 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1137 raw_node->i.i_inode_checksum =
1138 cpu_to_le32(f2fs_inode_chksum(raw_node));
1139
1140 main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1141 main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1142 c.blks_per_seg;
1143
1144 DBG(1, "\tWriting root inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1145 get_sb(main_blkaddr),
1146 c.cur_seg[CURSEG_HOT_NODE],
1147 c.blks_per_seg, main_area_node_seg_blk_offset);
1148 if (dev_write_block(raw_node, main_area_node_seg_blk_offset)) {
1149 MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1150 free(raw_node);
1151 return -1;
1152 }
1153
1154 free(raw_node);
1155 return 0;
1156 }
1157
f2fs_write_default_quota(int qtype,unsigned int blkaddr,__le32 raw_id)1158 static int f2fs_write_default_quota(int qtype, unsigned int blkaddr,
1159 __le32 raw_id)
1160 {
1161 char *filebuf = calloc(F2FS_BLKSIZE, 2);
1162 int file_magics[] = INITQMAGICS;
1163 struct v2_disk_dqheader ddqheader;
1164 struct v2_disk_dqinfo ddqinfo;
1165 struct v2r1_disk_dqblk dqblk;
1166
1167 if (filebuf == NULL) {
1168 MSG(1, "\tError: Calloc Failed for filebuf!!!\n");
1169 return -1;
1170 }
1171
1172 /* Write basic quota header */
1173 ddqheader.dqh_magic = cpu_to_le32(file_magics[qtype]);
1174 /* only support QF_VFSV1 */
1175 ddqheader.dqh_version = cpu_to_le32(1);
1176
1177 memcpy(filebuf, &ddqheader, sizeof(ddqheader));
1178
1179 /* Fill Initial quota file content */
1180 ddqinfo.dqi_bgrace = cpu_to_le32(MAX_DQ_TIME);
1181 ddqinfo.dqi_igrace = cpu_to_le32(MAX_IQ_TIME);
1182 ddqinfo.dqi_flags = cpu_to_le32(0);
1183 ddqinfo.dqi_blocks = cpu_to_le32(QT_TREEOFF + 5);
1184 ddqinfo.dqi_free_blk = cpu_to_le32(0);
1185 ddqinfo.dqi_free_entry = cpu_to_le32(5);
1186
1187 memcpy(filebuf + V2_DQINFOOFF, &ddqinfo, sizeof(ddqinfo));
1188
1189 filebuf[1024] = 2;
1190 filebuf[2048] = 3;
1191 filebuf[3072] = 4;
1192 filebuf[4096] = 5;
1193
1194 filebuf[5120 + 8] = 1;
1195
1196 dqblk.dqb_id = raw_id;
1197 dqblk.dqb_pad = cpu_to_le32(0);
1198 dqblk.dqb_ihardlimit = cpu_to_le64(0);
1199 dqblk.dqb_isoftlimit = cpu_to_le64(0);
1200 if (c.lpf_ino)
1201 dqblk.dqb_curinodes = cpu_to_le64(2);
1202 else
1203 dqblk.dqb_curinodes = cpu_to_le64(1);
1204 dqblk.dqb_bhardlimit = cpu_to_le64(0);
1205 dqblk.dqb_bsoftlimit = cpu_to_le64(0);
1206 if (c.lpf_ino)
1207 dqblk.dqb_curspace = cpu_to_le64(8192);
1208 else
1209 dqblk.dqb_curspace = cpu_to_le64(4096);
1210 dqblk.dqb_btime = cpu_to_le64(0);
1211 dqblk.dqb_itime = cpu_to_le64(0);
1212
1213 memcpy(filebuf + 5136, &dqblk, sizeof(struct v2r1_disk_dqblk));
1214
1215 /* Write two blocks */
1216 if (dev_write_block(filebuf, blkaddr) ||
1217 dev_write_block(filebuf + F2FS_BLKSIZE, blkaddr + 1)) {
1218 MSG(1, "\tError: While writing the quota_blk to disk!!!\n");
1219 free(filebuf);
1220 return -1;
1221 }
1222 DBG(1, "\tWriting quota data, at offset %08x, %08x\n",
1223 blkaddr, blkaddr + 1);
1224 free(filebuf);
1225 c.quota_dnum += QUOTA_DATA(qtype);
1226 return 0;
1227 }
1228
f2fs_write_qf_inode(int qtype)1229 static int f2fs_write_qf_inode(int qtype)
1230 {
1231 struct f2fs_node *raw_node = NULL;
1232 u_int64_t data_blk_nor;
1233 u_int64_t main_area_node_seg_blk_offset = 0;
1234 __le32 raw_id;
1235 int i;
1236
1237 raw_node = calloc(F2FS_BLKSIZE, 1);
1238 if (raw_node == NULL) {
1239 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1240 return -1;
1241 }
1242
1243 raw_node->footer.nid = sb->qf_ino[qtype];
1244 raw_node->footer.ino = sb->qf_ino[qtype];
1245 raw_node->footer.cp_ver = cpu_to_le64(1);
1246 raw_node->footer.next_blkaddr = cpu_to_le32(
1247 get_sb(main_blkaddr) +
1248 c.cur_seg[CURSEG_HOT_NODE] *
1249 c.blks_per_seg + 1 + qtype + 1);
1250
1251 raw_node->i.i_mode = cpu_to_le16(0x8180);
1252 raw_node->i.i_links = cpu_to_le32(1);
1253 raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1254 raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1255
1256 raw_node->i.i_size = cpu_to_le64(1024 * 6); /* Hard coded */
1257 raw_node->i.i_blocks = cpu_to_le64(1 + QUOTA_DATA(qtype));
1258
1259 raw_node->i.i_atime = cpu_to_le32(time(NULL));
1260 raw_node->i.i_atime_nsec = 0;
1261 raw_node->i.i_ctime = cpu_to_le32(time(NULL));
1262 raw_node->i.i_ctime_nsec = 0;
1263 raw_node->i.i_mtime = cpu_to_le32(time(NULL));
1264 raw_node->i.i_mtime_nsec = 0;
1265 raw_node->i.i_generation = 0;
1266 raw_node->i.i_xattr_nid = 0;
1267 raw_node->i.i_flags = FS_IMMUTABLE_FL;
1268 raw_node->i.i_current_depth = cpu_to_le32(0);
1269 raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1270
1271 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1272 raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1273 raw_node->i.i_extra_isize =
1274 cpu_to_le16(F2FS_TOTAL_EXTRA_ATTR_SIZE);
1275 }
1276
1277 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1278 raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1279
1280 data_blk_nor = get_sb(main_blkaddr) +
1281 c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg + 1;
1282
1283 for (i = 0; i < qtype; i++)
1284 if (sb->qf_ino[i])
1285 data_blk_nor += QUOTA_DATA(i);
1286 if (qtype == 0)
1287 raw_id = raw_node->i.i_uid;
1288 else if (qtype == 1)
1289 raw_id = raw_node->i.i_gid;
1290 else if (qtype == 2)
1291 raw_id = raw_node->i.i_projid;
1292 else
1293 ASSERT(0);
1294
1295 /* write two blocks */
1296 if (f2fs_write_default_quota(qtype, data_blk_nor, raw_id)) {
1297 free(raw_node);
1298 return -1;
1299 }
1300
1301 for (i = 0; i < QUOTA_DATA(qtype); i++)
1302 raw_node->i.i_addr[get_extra_isize(raw_node) + i] =
1303 cpu_to_le32(data_blk_nor + i);
1304 raw_node->i.i_ext.fofs = 0;
1305 raw_node->i.i_ext.blk_addr = 0;
1306 raw_node->i.i_ext.len = 0;
1307
1308 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1309 raw_node->i.i_inode_checksum =
1310 cpu_to_le32(f2fs_inode_chksum(raw_node));
1311
1312 main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1313 main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1314 c.blks_per_seg + qtype + 1;
1315
1316 DBG(1, "\tWriting quota inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1317 get_sb(main_blkaddr),
1318 c.cur_seg[CURSEG_HOT_NODE],
1319 c.blks_per_seg, main_area_node_seg_blk_offset);
1320 if (dev_write_block(raw_node, main_area_node_seg_blk_offset)) {
1321 MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1322 free(raw_node);
1323 return -1;
1324 }
1325
1326 free(raw_node);
1327 c.quota_inum++;
1328 return 0;
1329 }
1330
f2fs_update_nat_root(void)1331 static int f2fs_update_nat_root(void)
1332 {
1333 struct f2fs_nat_block *nat_blk = NULL;
1334 u_int64_t nat_seg_blk_offset = 0;
1335 enum quota_type qtype;
1336 int i;
1337
1338 nat_blk = calloc(F2FS_BLKSIZE, 1);
1339 if(nat_blk == NULL) {
1340 MSG(1, "\tError: Calloc Failed for nat_blk!!!\n");
1341 return -1;
1342 }
1343
1344 /* update quota */
1345 for (qtype = i = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1346 if (sb->qf_ino[qtype] == 0)
1347 continue;
1348 nat_blk->entries[sb->qf_ino[qtype]].block_addr =
1349 cpu_to_le32(get_sb(main_blkaddr) +
1350 c.cur_seg[CURSEG_HOT_NODE] *
1351 c.blks_per_seg + i + 1);
1352 nat_blk->entries[sb->qf_ino[qtype]].ino = sb->qf_ino[qtype];
1353 i++;
1354 }
1355
1356 /* update root */
1357 nat_blk->entries[get_sb(root_ino)].block_addr = cpu_to_le32(
1358 get_sb(main_blkaddr) +
1359 c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg);
1360 nat_blk->entries[get_sb(root_ino)].ino = sb->root_ino;
1361
1362 /* update node nat */
1363 nat_blk->entries[get_sb(node_ino)].block_addr = cpu_to_le32(1);
1364 nat_blk->entries[get_sb(node_ino)].ino = sb->node_ino;
1365
1366 /* update meta nat */
1367 nat_blk->entries[get_sb(meta_ino)].block_addr = cpu_to_le32(1);
1368 nat_blk->entries[get_sb(meta_ino)].ino = sb->meta_ino;
1369
1370 nat_seg_blk_offset = get_sb(nat_blkaddr);
1371
1372 DBG(1, "\tWriting nat root, at offset 0x%08"PRIx64"\n",
1373 nat_seg_blk_offset);
1374 if (dev_write_block(nat_blk, nat_seg_blk_offset)) {
1375 MSG(1, "\tError: While writing the nat_blk set0 to disk!\n");
1376 free(nat_blk);
1377 return -1;
1378 }
1379
1380 free(nat_blk);
1381 return 0;
1382 }
1383
f2fs_add_default_dentry_lpf(void)1384 static block_t f2fs_add_default_dentry_lpf(void)
1385 {
1386 struct f2fs_dentry_block *dent_blk;
1387 uint64_t data_blk_offset;
1388
1389 dent_blk = calloc(F2FS_BLKSIZE, 1);
1390 if (dent_blk == NULL) {
1391 MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1392 return 0;
1393 }
1394
1395 dent_blk->dentry[0].hash_code = 0;
1396 dent_blk->dentry[0].ino = cpu_to_le32(c.lpf_ino);
1397 dent_blk->dentry[0].name_len = cpu_to_le16(1);
1398 dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1399 memcpy(dent_blk->filename[0], ".", 1);
1400
1401 dent_blk->dentry[1].hash_code = 0;
1402 dent_blk->dentry[1].ino = sb->root_ino;
1403 dent_blk->dentry[1].name_len = cpu_to_le16(2);
1404 dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1405 memcpy(dent_blk->filename[1], "..", 2);
1406
1407 test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1408 test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1409
1410 data_blk_offset = get_sb(main_blkaddr);
1411 data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] * c.blks_per_seg +
1412 1 + c.quota_dnum;
1413
1414 DBG(1, "\tWriting default dentry lost+found, at offset 0x%08"PRIx64"\n",
1415 data_blk_offset);
1416 if (dev_write_block(dent_blk, data_blk_offset)) {
1417 MSG(1, "\tError While writing the dentry_blk to disk!!!\n");
1418 free(dent_blk);
1419 return 0;
1420 }
1421
1422 free(dent_blk);
1423 c.lpf_dnum++;
1424 return data_blk_offset;
1425 }
1426
f2fs_write_lpf_inode(void)1427 static int f2fs_write_lpf_inode(void)
1428 {
1429 struct f2fs_node *raw_node;
1430 u_int64_t blk_size_bytes, main_area_node_seg_blk_offset;
1431 block_t data_blk_nor;
1432 int err = 0;
1433
1434 ASSERT(c.lpf_ino);
1435
1436 raw_node = calloc(F2FS_BLKSIZE, 1);
1437 if (raw_node == NULL) {
1438 MSG(1, "\tError: Calloc Failed for raw_node!!!\n");
1439 return -1;
1440 }
1441
1442 raw_node->footer.nid = cpu_to_le32(c.lpf_ino);
1443 raw_node->footer.ino = raw_node->footer.nid;
1444 raw_node->footer.cp_ver = cpu_to_le64(1);
1445 raw_node->footer.next_blkaddr = cpu_to_le32(
1446 get_sb(main_blkaddr) +
1447 c.cur_seg[CURSEG_HOT_NODE] * c.blks_per_seg +
1448 1 + c.quota_inum + 1);
1449
1450 raw_node->i.i_mode = cpu_to_le16(0x41c0); /* 0700 */
1451 raw_node->i.i_links = cpu_to_le32(2);
1452 raw_node->i.i_uid = cpu_to_le32(c.root_uid);
1453 raw_node->i.i_gid = cpu_to_le32(c.root_gid);
1454
1455 blk_size_bytes = 1 << get_sb(log_blocksize);
1456 raw_node->i.i_size = cpu_to_le64(1 * blk_size_bytes);
1457 raw_node->i.i_blocks = cpu_to_le64(2);
1458
1459 raw_node->i.i_atime = cpu_to_le32(time(NULL));
1460 raw_node->i.i_atime_nsec = 0;
1461 raw_node->i.i_ctime = cpu_to_le32(time(NULL));
1462 raw_node->i.i_ctime_nsec = 0;
1463 raw_node->i.i_mtime = cpu_to_le32(time(NULL));
1464 raw_node->i.i_mtime_nsec = 0;
1465 raw_node->i.i_generation = 0;
1466 raw_node->i.i_xattr_nid = 0;
1467 raw_node->i.i_flags = 0;
1468 raw_node->i.i_pino = le32_to_cpu(sb->root_ino);
1469 raw_node->i.i_namelen = le32_to_cpu(strlen(LPF));
1470 memcpy(raw_node->i.i_name, LPF, strlen(LPF));
1471 raw_node->i.i_current_depth = cpu_to_le32(1);
1472 raw_node->i.i_dir_level = DEF_DIR_LEVEL;
1473
1474 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
1475 raw_node->i.i_inline = F2FS_EXTRA_ATTR;
1476 raw_node->i.i_extra_isize =
1477 cpu_to_le16(F2FS_TOTAL_EXTRA_ATTR_SIZE);
1478 }
1479
1480 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
1481 raw_node->i.i_projid = cpu_to_le32(F2FS_DEF_PROJID);
1482
1483 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
1484 raw_node->i.i_crtime = cpu_to_le32(time(NULL));
1485 raw_node->i.i_crtime_nsec = 0;
1486 }
1487
1488 data_blk_nor = f2fs_add_default_dentry_lpf();
1489 if (data_blk_nor == 0) {
1490 MSG(1, "\tError: Failed to add default dentries for lost+found!!!\n");
1491 err = -1;
1492 goto exit;
1493 }
1494 raw_node->i.i_addr[get_extra_isize(raw_node)] = cpu_to_le32(data_blk_nor);
1495
1496 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
1497 raw_node->i.i_inode_checksum =
1498 cpu_to_le32(f2fs_inode_chksum(raw_node));
1499
1500 main_area_node_seg_blk_offset = get_sb(main_blkaddr);
1501 main_area_node_seg_blk_offset += c.cur_seg[CURSEG_HOT_NODE] *
1502 c.blks_per_seg + c.quota_inum + 1;
1503
1504 DBG(1, "\tWriting lost+found inode (hot node), %x %x %x at offset 0x%08"PRIu64"\n",
1505 get_sb(main_blkaddr),
1506 c.cur_seg[CURSEG_HOT_NODE],
1507 c.blks_per_seg, main_area_node_seg_blk_offset);
1508 if (dev_write_block(raw_node, main_area_node_seg_blk_offset)) {
1509 MSG(1, "\tError: While writing the raw_node to disk!!!\n");
1510 err = -1;
1511 goto exit;
1512 }
1513
1514 c.lpf_inum++;
1515 exit:
1516 free(raw_node);
1517 return err;
1518 }
1519
f2fs_add_default_dentry_root(void)1520 static int f2fs_add_default_dentry_root(void)
1521 {
1522 struct f2fs_dentry_block *dent_blk = NULL;
1523 u_int64_t data_blk_offset = 0;
1524
1525 dent_blk = calloc(F2FS_BLKSIZE, 1);
1526 if(dent_blk == NULL) {
1527 MSG(1, "\tError: Calloc Failed for dent_blk!!!\n");
1528 return -1;
1529 }
1530
1531 dent_blk->dentry[0].hash_code = 0;
1532 dent_blk->dentry[0].ino = sb->root_ino;
1533 dent_blk->dentry[0].name_len = cpu_to_le16(1);
1534 dent_blk->dentry[0].file_type = F2FS_FT_DIR;
1535 memcpy(dent_blk->filename[0], ".", 1);
1536
1537 dent_blk->dentry[1].hash_code = 0;
1538 dent_blk->dentry[1].ino = sb->root_ino;
1539 dent_blk->dentry[1].name_len = cpu_to_le16(2);
1540 dent_blk->dentry[1].file_type = F2FS_FT_DIR;
1541 memcpy(dent_blk->filename[1], "..", 2);
1542
1543 /* bitmap for . and .. */
1544 test_and_set_bit_le(0, dent_blk->dentry_bitmap);
1545 test_and_set_bit_le(1, dent_blk->dentry_bitmap);
1546
1547 if (c.lpf_ino) {
1548 int len = strlen(LPF);
1549 f2fs_hash_t hash = f2fs_dentry_hash((unsigned char *)LPF, len);
1550
1551 dent_blk->dentry[2].hash_code = cpu_to_le32(hash);
1552 dent_blk->dentry[2].ino = cpu_to_le32(c.lpf_ino);
1553 dent_blk->dentry[2].name_len = cpu_to_le16(len);
1554 dent_blk->dentry[2].file_type = F2FS_FT_DIR;
1555 memcpy(dent_blk->filename[2], LPF, F2FS_SLOT_LEN);
1556
1557 memcpy(dent_blk->filename[3], LPF + F2FS_SLOT_LEN,
1558 len - F2FS_SLOT_LEN);
1559
1560 test_and_set_bit_le(2, dent_blk->dentry_bitmap);
1561 test_and_set_bit_le(3, dent_blk->dentry_bitmap);
1562 }
1563
1564 data_blk_offset = get_sb(main_blkaddr);
1565 data_blk_offset += c.cur_seg[CURSEG_HOT_DATA] *
1566 c.blks_per_seg;
1567
1568 DBG(1, "\tWriting default dentry root, at offset 0x%08"PRIx64"\n",
1569 data_blk_offset);
1570 if (dev_write_block(dent_blk, data_blk_offset)) {
1571 MSG(1, "\tError: While writing the dentry_blk to disk!!!\n");
1572 free(dent_blk);
1573 return -1;
1574 }
1575
1576 free(dent_blk);
1577 return 0;
1578 }
1579
f2fs_create_root_dir(void)1580 static int f2fs_create_root_dir(void)
1581 {
1582 enum quota_type qtype;
1583 int err = 0;
1584
1585 err = f2fs_write_root_inode();
1586 if (err < 0) {
1587 MSG(1, "\tError: Failed to write root inode!!!\n");
1588 goto exit;
1589 }
1590
1591 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
1592 if (sb->qf_ino[qtype] == 0)
1593 continue;
1594 err = f2fs_write_qf_inode(qtype);
1595 if (err < 0) {
1596 MSG(1, "\tError: Failed to write quota inode!!!\n");
1597 goto exit;
1598 }
1599 }
1600
1601 if (c.feature & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
1602 err = f2fs_write_lpf_inode();
1603 if (err < 0) {
1604 MSG(1, "\tError: Failed to write lost+found inode!!!\n");
1605 goto exit;
1606 }
1607 }
1608
1609 #ifndef WITH_ANDROID
1610 err = f2fs_discard_obsolete_dnode();
1611 if (err < 0) {
1612 MSG(1, "\tError: Failed to discard obsolete dnode!!!\n");
1613 goto exit;
1614 }
1615 #endif
1616
1617 err = f2fs_update_nat_root();
1618 if (err < 0) {
1619 MSG(1, "\tError: Failed to update NAT for root!!!\n");
1620 goto exit;
1621 }
1622
1623 err = f2fs_add_default_dentry_root();
1624 if (err < 0) {
1625 MSG(1, "\tError: Failed to add default dentries for root!!!\n");
1626 goto exit;
1627 }
1628 exit:
1629 if (err)
1630 MSG(1, "\tError: Could not create the root directory!!!\n");
1631
1632 return err;
1633 }
1634
f2fs_format_device(void)1635 int f2fs_format_device(void)
1636 {
1637 int err = 0;
1638
1639 err= f2fs_prepare_super_block();
1640 if (err < 0) {
1641 MSG(0, "\tError: Failed to prepare a super block!!!\n");
1642 goto exit;
1643 }
1644
1645 if (c.trim) {
1646 err = f2fs_trim_devices();
1647 if (err < 0) {
1648 MSG(0, "\tError: Failed to trim whole device!!!\n");
1649 goto exit;
1650 }
1651 }
1652
1653 err = f2fs_init_sit_area();
1654 if (err < 0) {
1655 MSG(0, "\tError: Failed to initialise the SIT AREA!!!\n");
1656 goto exit;
1657 }
1658
1659 err = f2fs_init_nat_area();
1660 if (err < 0) {
1661 MSG(0, "\tError: Failed to initialise the NAT AREA!!!\n");
1662 goto exit;
1663 }
1664
1665 err = f2fs_create_root_dir();
1666 if (err < 0) {
1667 MSG(0, "\tError: Failed to create the root directory!!!\n");
1668 goto exit;
1669 }
1670
1671 err = f2fs_write_check_point_pack();
1672 if (err < 0) {
1673 MSG(0, "\tError: Failed to write the check point pack!!!\n");
1674 goto exit;
1675 }
1676
1677 err = f2fs_write_super_block();
1678 if (err < 0) {
1679 MSG(0, "\tError: Failed to write the super block!!!\n");
1680 goto exit;
1681 }
1682 exit:
1683 if (err)
1684 MSG(0, "\tError: Could not format the device!!!\n");
1685
1686 return err;
1687 }
1688