1 /** 2 * resize.c 3 * 4 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include "fsck.h" 11 12 static int get_new_sb(struct f2fs_super_block *sb) 13 { 14 u_int32_t zone_size_bytes; 15 u_int64_t zone_align_start_offset; 16 u_int32_t blocks_for_sit, blocks_for_nat, blocks_for_ssa; 17 u_int32_t sit_segments, nat_segments, diff, total_meta_segments; 18 u_int32_t total_valid_blks_available; 19 u_int32_t sit_bitmap_size, max_sit_bitmap_size; 20 u_int32_t max_nat_bitmap_size, max_nat_segments; 21 u_int32_t segment_size_bytes = 1 << (get_sb(log_blocksize) + 22 get_sb(log_blocks_per_seg)); 23 u_int32_t blks_per_seg = 1 << get_sb(log_blocks_per_seg); 24 u_int32_t segs_per_zone = get_sb(segs_per_sec) * get_sb(secs_per_zone); 25 26 set_sb(block_count, c.target_sectors >> 27 get_sb(log_sectors_per_block)); 28 29 zone_size_bytes = segment_size_bytes * segs_per_zone; 30 zone_align_start_offset = 31 ((u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE + 32 2 * F2FS_BLKSIZE + zone_size_bytes - 1) / 33 zone_size_bytes * zone_size_bytes - 34 (u_int64_t) c.start_sector * DEFAULT_SECTOR_SIZE; 35 36 set_sb(segment_count, (c.target_sectors * c.sector_size - 37 zone_align_start_offset) / segment_size_bytes / 38 c.segs_per_sec * c.segs_per_sec); 39 40 if (c.safe_resize) 41 goto safe_resize; 42 43 blocks_for_sit = SIZE_ALIGN(get_sb(segment_count), SIT_ENTRY_PER_BLOCK); 44 sit_segments = SEG_ALIGN(blocks_for_sit); 45 set_sb(segment_count_sit, sit_segments * 2); 46 set_sb(nat_blkaddr, get_sb(sit_blkaddr) + 47 get_sb(segment_count_sit) * blks_per_seg); 48 49 total_valid_blks_available = (get_sb(segment_count) - 50 (get_sb(segment_count_ckpt) + 51 get_sb(segment_count_sit))) * blks_per_seg; 52 blocks_for_nat = SIZE_ALIGN(total_valid_blks_available, 53 NAT_ENTRY_PER_BLOCK); 54 55 if (c.large_nat_bitmap) { 56 nat_segments = SEG_ALIGN(blocks_for_nat) * 57 DEFAULT_NAT_ENTRY_RATIO / 100; 58 set_sb(segment_count_nat, nat_segments ? nat_segments : 1); 59 60 max_nat_bitmap_size = (get_sb(segment_count_nat) << 61 get_sb(log_blocks_per_seg)) / 8; 62 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2); 63 } else { 64 set_sb(segment_count_nat, SEG_ALIGN(blocks_for_nat)); 65 max_nat_bitmap_size = 0; 66 } 67 68 sit_bitmap_size = ((get_sb(segment_count_sit) / 2) << 69 get_sb(log_blocks_per_seg)) / 8; 70 if (sit_bitmap_size > MAX_SIT_BITMAP_SIZE) 71 max_sit_bitmap_size = MAX_SIT_BITMAP_SIZE; 72 else 73 max_sit_bitmap_size = sit_bitmap_size; 74 75 if (c.large_nat_bitmap) { 76 /* use cp_payload if free space of f2fs_checkpoint is not enough */ 77 if (max_sit_bitmap_size + max_nat_bitmap_size > 78 MAX_BITMAP_SIZE_IN_CKPT) { 79 u_int32_t diff = max_sit_bitmap_size + 80 max_nat_bitmap_size - 81 MAX_BITMAP_SIZE_IN_CKPT; 82 set_sb(cp_payload, F2FS_BLK_ALIGN(diff)); 83 } else { 84 set_sb(cp_payload, 0); 85 } 86 } else { 87 /* 88 * It should be reserved minimum 1 segment for nat. 89 * When sit is too large, we should expand cp area. 90 * It requires more pages for cp. 91 */ 92 if (max_sit_bitmap_size > MAX_SIT_BITMAP_SIZE_IN_CKPT) { 93 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT; 94 set_sb(cp_payload, F2FS_BLK_ALIGN(max_sit_bitmap_size)); 95 } else { 96 max_nat_bitmap_size = MAX_BITMAP_SIZE_IN_CKPT - 97 max_sit_bitmap_size; 98 set_sb(cp_payload, 0); 99 } 100 101 max_nat_segments = (max_nat_bitmap_size * 8) >> 102 get_sb(log_blocks_per_seg); 103 104 if (get_sb(segment_count_nat) > max_nat_segments) 105 set_sb(segment_count_nat, max_nat_segments); 106 107 set_sb(segment_count_nat, get_sb(segment_count_nat) * 2); 108 } 109 110 set_sb(ssa_blkaddr, get_sb(nat_blkaddr) + 111 get_sb(segment_count_nat) * blks_per_seg); 112 113 total_valid_blks_available = (get_sb(segment_count) - 114 (get_sb(segment_count_ckpt) + 115 get_sb(segment_count_sit) + 116 get_sb(segment_count_nat))) * blks_per_seg; 117 118 blocks_for_ssa = total_valid_blks_available / blks_per_seg + 1; 119 120 set_sb(segment_count_ssa, SEG_ALIGN(blocks_for_ssa)); 121 122 total_meta_segments = get_sb(segment_count_ckpt) + 123 get_sb(segment_count_sit) + 124 get_sb(segment_count_nat) + 125 get_sb(segment_count_ssa); 126 127 diff = total_meta_segments % segs_per_zone; 128 if (diff) 129 set_sb(segment_count_ssa, get_sb(segment_count_ssa) + 130 (segs_per_zone - diff)); 131 132 set_sb(main_blkaddr, get_sb(ssa_blkaddr) + get_sb(segment_count_ssa) * 133 blks_per_seg); 134 135 safe_resize: 136 set_sb(segment_count_main, get_sb(segment_count) - 137 (get_sb(segment_count_ckpt) + 138 get_sb(segment_count_sit) + 139 get_sb(segment_count_nat) + 140 get_sb(segment_count_ssa))); 141 142 set_sb(section_count, get_sb(segment_count_main) / 143 get_sb(segs_per_sec)); 144 145 set_sb(segment_count_main, get_sb(section_count) * 146 get_sb(segs_per_sec)); 147 148 /* Let's determine the best reserved and overprovisioned space */ 149 c.new_overprovision = get_best_overprovision(sb); 150 c.new_reserved_segments = 151 (2 * (100 / c.new_overprovision + 1) + 6) * 152 get_sb(segs_per_sec); 153 154 if ((get_sb(segment_count_main) - 2) < c.new_reserved_segments || 155 get_sb(segment_count_main) * blks_per_seg > 156 get_sb(block_count)) { 157 MSG(0, "\tError: Device size is not sufficient for F2FS volume, " 158 "more segment needed =%u", 159 c.new_reserved_segments - 160 (get_sb(segment_count_main) - 2)); 161 return -1; 162 } 163 return 0; 164 } 165 166 static void migrate_main(struct f2fs_sb_info *sbi, unsigned int offset) 167 { 168 void *raw = calloc(BLOCK_SZ, 1); 169 struct seg_entry *se; 170 block_t from, to; 171 int i, j, ret; 172 struct f2fs_summary sum; 173 174 ASSERT(raw != NULL); 175 176 for (i = TOTAL_SEGS(sbi) - 1; i >= 0; i--) { 177 se = get_seg_entry(sbi, i); 178 if (!se->valid_blocks) 179 continue; 180 181 for (j = sbi->blocks_per_seg - 1; j >= 0; j--) { 182 if (!f2fs_test_bit(j, (const char *)se->cur_valid_map)) 183 continue; 184 185 from = START_BLOCK(sbi, i) + j; 186 ret = dev_read_block(raw, from); 187 ASSERT(ret >= 0); 188 189 to = from + offset; 190 ret = dev_write_block(raw, to); 191 ASSERT(ret >= 0); 192 193 get_sum_entry(sbi, from, &sum); 194 195 if (IS_DATASEG(se->type)) 196 update_data_blkaddr(sbi, le32_to_cpu(sum.nid), 197 le16_to_cpu(sum.ofs_in_node), to); 198 else 199 update_nat_blkaddr(sbi, 0, 200 le32_to_cpu(sum.nid), to); 201 } 202 } 203 free(raw); 204 DBG(0, "Info: Done to migrate Main area: main_blkaddr = 0x%x -> 0x%x\n", 205 START_BLOCK(sbi, 0), 206 START_BLOCK(sbi, 0) + offset); 207 } 208 209 static void move_ssa(struct f2fs_sb_info *sbi, unsigned int segno, 210 block_t new_sum_blk_addr) 211 { 212 struct f2fs_summary_block *sum_blk; 213 int type; 214 215 sum_blk = get_sum_block(sbi, segno, &type); 216 if (type < SEG_TYPE_MAX) { 217 int ret; 218 219 ret = dev_write_block(sum_blk, new_sum_blk_addr); 220 ASSERT(ret >= 0); 221 DBG(1, "Write summary block: (%d) segno=%x/%x --> (%d) %x\n", 222 type, segno, GET_SUM_BLKADDR(sbi, segno), 223 IS_SUM_NODE_SEG(sum_blk->footer), 224 new_sum_blk_addr); 225 } 226 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA || 227 type == SEG_TYPE_MAX) { 228 free(sum_blk); 229 } 230 DBG(1, "Info: Done to migrate SSA blocks\n"); 231 } 232 233 static void migrate_ssa(struct f2fs_sb_info *sbi, 234 struct f2fs_super_block *new_sb, unsigned int offset) 235 { 236 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 237 block_t old_sum_blkaddr = get_sb(ssa_blkaddr); 238 block_t new_sum_blkaddr = get_newsb(ssa_blkaddr); 239 block_t end_sum_blkaddr = get_newsb(main_blkaddr); 240 block_t expand_sum_blkaddr = new_sum_blkaddr + 241 TOTAL_SEGS(sbi) - offset; 242 block_t blkaddr; 243 int ret; 244 void *zero_block = calloc(BLOCK_SZ, 1); 245 ASSERT(zero_block); 246 247 if (offset && new_sum_blkaddr < old_sum_blkaddr + offset) { 248 blkaddr = new_sum_blkaddr; 249 while (blkaddr < end_sum_blkaddr) { 250 if (blkaddr < expand_sum_blkaddr) { 251 move_ssa(sbi, offset++, blkaddr++); 252 } else { 253 ret = dev_write_block(zero_block, blkaddr++); 254 ASSERT(ret >=0); 255 } 256 } 257 } else { 258 blkaddr = end_sum_blkaddr - 1; 259 offset = TOTAL_SEGS(sbi) - 1; 260 while (blkaddr >= new_sum_blkaddr) { 261 if (blkaddr >= expand_sum_blkaddr) { 262 ret = dev_write_block(zero_block, blkaddr--); 263 ASSERT(ret >=0); 264 } else { 265 move_ssa(sbi, offset--, blkaddr--); 266 } 267 } 268 } 269 270 DBG(0, "Info: Done to migrate SSA blocks: sum_blkaddr = 0x%x -> 0x%x\n", 271 old_sum_blkaddr, new_sum_blkaddr); 272 free(zero_block); 273 } 274 275 static int shrink_nats(struct f2fs_sb_info *sbi, 276 struct f2fs_super_block *new_sb) 277 { 278 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 279 struct f2fs_nm_info *nm_i = NM_I(sbi); 280 block_t old_nat_blkaddr = get_sb(nat_blkaddr); 281 unsigned int nat_blocks; 282 void *nat_block, *zero_block; 283 int nid, ret, new_max_nid; 284 pgoff_t block_off; 285 pgoff_t block_addr; 286 int seg_off; 287 288 nat_block = malloc(BLOCK_SZ); 289 ASSERT(nat_block); 290 zero_block = calloc(BLOCK_SZ, 1); 291 ASSERT(zero_block); 292 293 nat_blocks = get_newsb(segment_count_nat) >> 1; 294 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg); 295 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 296 297 for (nid = nm_i->max_nid - 1; nid > new_max_nid; nid -= NAT_ENTRY_PER_BLOCK) { 298 block_off = nid / NAT_ENTRY_PER_BLOCK; 299 seg_off = block_off >> sbi->log_blocks_per_seg; 300 block_addr = (pgoff_t)(old_nat_blkaddr + 301 (seg_off << sbi->log_blocks_per_seg << 1) + 302 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 303 304 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) 305 block_addr += sbi->blocks_per_seg; 306 307 ret = dev_read_block(nat_block, block_addr); 308 ASSERT(ret >= 0); 309 310 if (memcmp(zero_block, nat_block, BLOCK_SZ)) { 311 ret = -1; 312 goto not_avail; 313 } 314 } 315 ret = 0; 316 nm_i->max_nid = new_max_nid; 317 not_avail: 318 free(nat_block); 319 free(zero_block); 320 return ret; 321 } 322 323 static void migrate_nat(struct f2fs_sb_info *sbi, 324 struct f2fs_super_block *new_sb) 325 { 326 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 327 struct f2fs_nm_info *nm_i = NM_I(sbi); 328 block_t old_nat_blkaddr = get_sb(nat_blkaddr); 329 block_t new_nat_blkaddr = get_newsb(nat_blkaddr); 330 unsigned int nat_blocks; 331 void *nat_block; 332 int nid, ret, new_max_nid; 333 pgoff_t block_off; 334 pgoff_t block_addr; 335 int seg_off; 336 337 nat_block = malloc(BLOCK_SZ); 338 ASSERT(nat_block); 339 340 for (nid = nm_i->max_nid - 1; nid >= 0; nid -= NAT_ENTRY_PER_BLOCK) { 341 block_off = nid / NAT_ENTRY_PER_BLOCK; 342 seg_off = block_off >> sbi->log_blocks_per_seg; 343 block_addr = (pgoff_t)(old_nat_blkaddr + 344 (seg_off << sbi->log_blocks_per_seg << 1) + 345 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 346 347 /* move to set #0 */ 348 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) { 349 block_addr += sbi->blocks_per_seg; 350 f2fs_clear_bit(block_off, nm_i->nat_bitmap); 351 } 352 353 ret = dev_read_block(nat_block, block_addr); 354 ASSERT(ret >= 0); 355 356 block_addr = (pgoff_t)(new_nat_blkaddr + 357 (seg_off << sbi->log_blocks_per_seg << 1) + 358 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 359 360 /* new bitmap should be zeros */ 361 ret = dev_write_block(nat_block, block_addr); 362 ASSERT(ret >= 0); 363 } 364 /* zero out newly assigned nids */ 365 memset(nat_block, 0, BLOCK_SZ); 366 nat_blocks = get_newsb(segment_count_nat) >> 1; 367 nat_blocks = nat_blocks << get_sb(log_blocks_per_seg); 368 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; 369 370 DBG(1, "Write NAT block: %x->%x, max_nid=%x->%x\n", 371 old_nat_blkaddr, new_nat_blkaddr, 372 get_sb(segment_count_nat), 373 get_newsb(segment_count_nat)); 374 375 for (nid = nm_i->max_nid; nid < new_max_nid; 376 nid += NAT_ENTRY_PER_BLOCK) { 377 block_off = nid / NAT_ENTRY_PER_BLOCK; 378 seg_off = block_off >> sbi->log_blocks_per_seg; 379 block_addr = (pgoff_t)(new_nat_blkaddr + 380 (seg_off << sbi->log_blocks_per_seg << 1) + 381 (block_off & ((1 << sbi->log_blocks_per_seg) - 1))); 382 ret = dev_write_block(nat_block, block_addr); 383 ASSERT(ret >= 0); 384 DBG(3, "Write NAT: %lx\n", block_addr); 385 } 386 DBG(0, "Info: Done to migrate NAT blocks: nat_blkaddr = 0x%x -> 0x%x\n", 387 old_nat_blkaddr, new_nat_blkaddr); 388 } 389 390 static void migrate_sit(struct f2fs_sb_info *sbi, 391 struct f2fs_super_block *new_sb, unsigned int offset) 392 { 393 struct sit_info *sit_i = SIT_I(sbi); 394 unsigned int ofs = 0, pre_ofs = 0; 395 unsigned int segno, index; 396 struct f2fs_sit_block *sit_blk = calloc(BLOCK_SZ, 1); 397 block_t sit_blks = get_newsb(segment_count_sit) << 398 (sbi->log_blocks_per_seg - 1); 399 struct seg_entry *se; 400 block_t blk_addr = 0; 401 int ret; 402 403 ASSERT(sit_blk); 404 405 /* initialize with zeros */ 406 for (index = 0; index < sit_blks; index++) { 407 ret = dev_write_block(sit_blk, get_newsb(sit_blkaddr) + index); 408 ASSERT(ret >= 0); 409 DBG(3, "Write zero sit: %x\n", get_newsb(sit_blkaddr) + index); 410 } 411 412 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) { 413 struct f2fs_sit_entry *sit; 414 415 se = get_seg_entry(sbi, segno); 416 if (segno < offset) { 417 ASSERT(se->valid_blocks == 0); 418 continue; 419 } 420 421 ofs = SIT_BLOCK_OFFSET(sit_i, segno - offset); 422 423 if (ofs != pre_ofs) { 424 blk_addr = get_newsb(sit_blkaddr) + pre_ofs; 425 ret = dev_write_block(sit_blk, blk_addr); 426 ASSERT(ret >= 0); 427 DBG(1, "Write valid sit: %x\n", blk_addr); 428 429 pre_ofs = ofs; 430 memset(sit_blk, 0, BLOCK_SZ); 431 } 432 433 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno - offset)]; 434 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE); 435 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) | 436 se->valid_blocks); 437 } 438 blk_addr = get_newsb(sit_blkaddr) + ofs; 439 ret = dev_write_block(sit_blk, blk_addr); 440 DBG(1, "Write valid sit: %x\n", blk_addr); 441 ASSERT(ret >= 0); 442 443 free(sit_blk); 444 DBG(0, "Info: Done to restore new SIT blocks: 0x%x\n", 445 get_newsb(sit_blkaddr)); 446 } 447 448 static void rebuild_checkpoint(struct f2fs_sb_info *sbi, 449 struct f2fs_super_block *new_sb, unsigned int offset) 450 { 451 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi); 452 unsigned long long cp_ver = get_cp(checkpoint_ver); 453 struct f2fs_checkpoint *new_cp; 454 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 455 unsigned int free_segment_count, new_segment_count; 456 block_t new_cp_blks = 1 + get_newsb(cp_payload); 457 block_t orphan_blks = 0; 458 block_t new_cp_blk_no, old_cp_blk_no; 459 u_int32_t crc = 0; 460 u32 flags; 461 void *buf; 462 int i, ret; 463 464 new_cp = calloc(new_cp_blks * BLOCK_SZ, 1); 465 ASSERT(new_cp); 466 467 buf = malloc(BLOCK_SZ); 468 ASSERT(buf); 469 470 /* ovp / free segments */ 471 set_cp(rsvd_segment_count, c.new_reserved_segments); 472 set_cp(overprov_segment_count, (get_newsb(segment_count_main) - 473 get_cp(rsvd_segment_count)) * 474 c.new_overprovision / 100); 475 set_cp(overprov_segment_count, get_cp(overprov_segment_count) + 476 get_cp(rsvd_segment_count)); 477 478 free_segment_count = get_free_segments(sbi); 479 new_segment_count = get_newsb(segment_count_main) - 480 get_sb(segment_count_main); 481 482 set_cp(free_segment_count, free_segment_count + new_segment_count); 483 set_cp(user_block_count, ((get_newsb(segment_count_main) - 484 get_cp(overprov_segment_count)) * c.blks_per_seg)); 485 486 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) 487 orphan_blks = __start_sum_addr(sbi) - 1; 488 489 set_cp(cp_pack_start_sum, 1 + get_newsb(cp_payload)); 490 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_newsb(cp_payload)); 491 492 /* cur->segno - offset */ 493 for (i = 0; i < NO_CHECK_TYPE; i++) { 494 if (i < CURSEG_HOT_NODE) { 495 set_cp(cur_data_segno[i], 496 CURSEG_I(sbi, i)->segno - offset); 497 } else { 498 int n = i - CURSEG_HOT_NODE; 499 500 set_cp(cur_node_segno[n], 501 CURSEG_I(sbi, i)->segno - offset); 502 } 503 } 504 505 /* sit / nat ver bitmap bytesize */ 506 set_cp(sit_ver_bitmap_bytesize, 507 ((get_newsb(segment_count_sit) / 2) << 508 get_newsb(log_blocks_per_seg)) / 8); 509 set_cp(nat_ver_bitmap_bytesize, 510 ((get_newsb(segment_count_nat) / 2) << 511 get_newsb(log_blocks_per_seg)) / 8); 512 513 /* update nat_bits flag */ 514 flags = update_nat_bits_flags(new_sb, cp, get_cp(ckpt_flags)); 515 if (c.large_nat_bitmap) 516 flags |= CP_LARGE_NAT_BITMAP_FLAG; 517 518 if (flags & CP_COMPACT_SUM_FLAG) 519 flags &= ~CP_COMPACT_SUM_FLAG; 520 if (flags & CP_LARGE_NAT_BITMAP_FLAG) 521 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET); 522 else 523 set_cp(checksum_offset, CP_CHKSUM_OFFSET); 524 525 set_cp(ckpt_flags, flags); 526 527 memcpy(new_cp, cp, (unsigned char *)cp->sit_nat_version_bitmap - 528 (unsigned char *)cp); 529 new_cp->checkpoint_ver = cpu_to_le64(cp_ver + 1); 530 531 crc = f2fs_checkpoint_chksum(new_cp); 532 *((__le32 *)((unsigned char *)new_cp + get_cp(checksum_offset))) = 533 cpu_to_le32(crc); 534 535 /* Write a new checkpoint in the other set */ 536 new_cp_blk_no = old_cp_blk_no = get_sb(cp_blkaddr); 537 if (sbi->cur_cp == 2) 538 old_cp_blk_no += 1 << get_sb(log_blocks_per_seg); 539 else 540 new_cp_blk_no += 1 << get_sb(log_blocks_per_seg); 541 542 /* write first cp */ 543 ret = dev_write_block(new_cp, new_cp_blk_no++); 544 ASSERT(ret >= 0); 545 546 memset(buf, 0, BLOCK_SZ); 547 for (i = 0; i < get_newsb(cp_payload); i++) { 548 ret = dev_write_block(buf, new_cp_blk_no++); 549 ASSERT(ret >= 0); 550 } 551 552 for (i = 0; i < orphan_blks; i++) { 553 block_t orphan_blk_no = old_cp_blk_no + 1 + get_sb(cp_payload); 554 555 ret = dev_read_block(buf, orphan_blk_no++); 556 ASSERT(ret >= 0); 557 558 ret = dev_write_block(buf, new_cp_blk_no++); 559 ASSERT(ret >= 0); 560 } 561 562 /* update summary blocks having nullified journal entries */ 563 for (i = 0; i < NO_CHECK_TYPE; i++) { 564 struct curseg_info *curseg = CURSEG_I(sbi, i); 565 566 ret = dev_write_block(curseg->sum_blk, new_cp_blk_no++); 567 ASSERT(ret >= 0); 568 } 569 570 /* write the last cp */ 571 ret = dev_write_block(new_cp, new_cp_blk_no++); 572 ASSERT(ret >= 0); 573 574 /* Write nat bits */ 575 if (flags & CP_NAT_BITS_FLAG) 576 write_nat_bits(sbi, new_sb, new_cp, sbi->cur_cp == 1 ? 2 : 1); 577 578 /* disable old checkpoint */ 579 memset(buf, 0, BLOCK_SZ); 580 ret = dev_write_block(buf, old_cp_blk_no); 581 ASSERT(ret >= 0); 582 583 free(buf); 584 free(new_cp); 585 DBG(0, "Info: Done to rebuild checkpoint blocks\n"); 586 } 587 588 static int f2fs_resize_grow(struct f2fs_sb_info *sbi) 589 { 590 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 591 struct f2fs_super_block new_sb_raw; 592 struct f2fs_super_block *new_sb = &new_sb_raw; 593 block_t end_blkaddr, old_main_blkaddr, new_main_blkaddr; 594 unsigned int offset; 595 unsigned int offset_seg = 0; 596 int err = -1; 597 598 /* flush NAT/SIT journal entries */ 599 flush_journal_entries(sbi); 600 601 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb)); 602 if (get_new_sb(new_sb)) 603 return -1; 604 605 /* check nat availability */ 606 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) { 607 err = shrink_nats(sbi, new_sb); 608 if (err) { 609 MSG(0, "\tError: Failed to shrink NATs\n"); 610 return err; 611 } 612 } 613 614 old_main_blkaddr = get_sb(main_blkaddr); 615 new_main_blkaddr = get_newsb(main_blkaddr); 616 offset = new_main_blkaddr - old_main_blkaddr; 617 end_blkaddr = (get_sb(segment_count_main) << 618 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr); 619 620 err = -EAGAIN; 621 if (new_main_blkaddr < end_blkaddr) { 622 err = f2fs_defragment(sbi, old_main_blkaddr, offset, 623 new_main_blkaddr, 0); 624 if (!err) 625 offset_seg = offset >> get_sb(log_blocks_per_seg); 626 MSG(0, "Try to do defragement: %s\n", err ? "Skip": "Done"); 627 } 628 /* move whole data region */ 629 if (err) 630 migrate_main(sbi, offset); 631 632 migrate_ssa(sbi, new_sb, offset_seg); 633 migrate_nat(sbi, new_sb); 634 migrate_sit(sbi, new_sb, offset_seg); 635 rebuild_checkpoint(sbi, new_sb, offset_seg); 636 update_superblock(new_sb, SB_MASK_ALL); 637 print_raw_sb_info(sb); 638 print_raw_sb_info(new_sb); 639 640 return 0; 641 } 642 643 static int f2fs_resize_shrink(struct f2fs_sb_info *sbi) 644 { 645 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 646 struct f2fs_super_block new_sb_raw; 647 struct f2fs_super_block *new_sb = &new_sb_raw; 648 block_t old_end_blkaddr, old_main_blkaddr; 649 block_t new_end_blkaddr, new_main_blkaddr, tmp_end_blkaddr; 650 unsigned int offset; 651 int err = -1; 652 653 /* flush NAT/SIT journal entries */ 654 flush_journal_entries(sbi); 655 656 memcpy(new_sb, F2FS_RAW_SUPER(sbi), sizeof(*new_sb)); 657 if (get_new_sb(new_sb)) 658 return -1; 659 660 /* check nat availability */ 661 if (get_sb(segment_count_nat) > get_newsb(segment_count_nat)) { 662 err = shrink_nats(sbi, new_sb); 663 if (err) { 664 MSG(0, "\tError: Failed to shrink NATs\n"); 665 return err; 666 } 667 } 668 669 old_main_blkaddr = get_sb(main_blkaddr); 670 new_main_blkaddr = get_newsb(main_blkaddr); 671 offset = old_main_blkaddr - new_main_blkaddr; 672 old_end_blkaddr = (get_sb(segment_count_main) << 673 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr); 674 new_end_blkaddr = (get_newsb(segment_count_main) << 675 get_newsb(log_blocks_per_seg)) + get_newsb(main_blkaddr); 676 677 tmp_end_blkaddr = new_end_blkaddr + offset; 678 err = f2fs_defragment(sbi, tmp_end_blkaddr, 679 old_end_blkaddr - tmp_end_blkaddr, 680 tmp_end_blkaddr, 1); 681 MSG(0, "Try to do defragement: %s\n", err ? "Insufficient Space": "Done"); 682 683 if (err) { 684 return -ENOSPC; 685 } 686 687 update_superblock(new_sb, SB_MASK_ALL); 688 rebuild_checkpoint(sbi, new_sb, 0); 689 /*if (!c.safe_resize) { 690 migrate_sit(sbi, new_sb, offset_seg); 691 migrate_nat(sbi, new_sb); 692 migrate_ssa(sbi, new_sb, offset_seg); 693 }*/ 694 695 /* move whole data region */ 696 //if (err) 697 // migrate_main(sbi, offset); 698 print_raw_sb_info(sb); 699 print_raw_sb_info(new_sb); 700 701 return 0; 702 } 703 704 int f2fs_resize(struct f2fs_sb_info *sbi) 705 { 706 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi); 707 708 /* may different sector size */ 709 if ((c.target_sectors * c.sector_size >> 710 get_sb(log_blocksize)) < get_sb(block_count)) 711 if (!c.safe_resize) { 712 ASSERT_MSG("Nothing to resize, now only supports resizing with safe resize flag\n"); 713 return -1; 714 } else { 715 return f2fs_resize_shrink(sbi); 716 } 717 else if ((c.target_sectors * c.sector_size >> 718 get_sb(log_blocksize)) > get_sb(block_count)) 719 return f2fs_resize_grow(sbi); 720 else { 721 MSG(0, "Nothing to resize.\n"); 722 return 0; 723 } 724 } 725