1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *	  http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "allocate.h"
18 
19 #include <stdio.h>
20 #include <stdlib.h>
21 
22 #include <sparse/sparse.h>
23 
24 #include "ext4_utils/ext4_utils.h"
25 
26 struct xattr_list_element {
27 	struct ext4_inode *inode;
28 	struct ext4_xattr_header *header;
29 	struct xattr_list_element *next;
30 };
31 
create_allocation()32 struct block_allocation *create_allocation()
33 {
34 	struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
35 	alloc->list.first = NULL;
36 	alloc->list.last = NULL;
37 	alloc->oob_list.first = NULL;
38 	alloc->oob_list.last = NULL;
39 	alloc->list.iter = NULL;
40 	alloc->list.partial_iter = 0;
41 	alloc->oob_list.iter = NULL;
42 	alloc->oob_list.partial_iter = 0;
43 	alloc->filename = NULL;
44 	alloc->next = NULL;
45 	return alloc;
46 }
47 
xattr_list_find(struct ext4_inode * inode)48 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
49 {
50 	struct xattr_list_element *element;
51 	for (element = aux_info.xattrs; element != NULL; element = element->next) {
52 		if (element->inode == inode)
53 			return element->header;
54 	}
55 	return NULL;
56 }
57 
xattr_list_insert(struct ext4_inode * inode,struct ext4_xattr_header * header)58 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
59 {
60 	struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
61 	element->inode = inode;
62 	element->header = header;
63 	element->next = aux_info.xattrs;
64 	aux_info.xattrs = element;
65 }
66 
region_list_remove(struct region_list * list,struct region * reg)67 static void region_list_remove(struct region_list *list, struct region *reg)
68 {
69 	if (reg->prev)
70 		reg->prev->next = reg->next;
71 
72 	if (reg->next)
73 		reg->next->prev = reg->prev;
74 
75 	if (list->first == reg)
76 		list->first = reg->next;
77 
78 	if (list->last == reg)
79 		list->last = reg->prev;
80 
81 	reg->next = NULL;
82 	reg->prev = NULL;
83 }
84 
region_list_append(struct region_list * list,struct region * reg)85 void region_list_append(struct region_list *list, struct region *reg)
86 {
87 	if (list->first == NULL) {
88 		list->first = reg;
89 		list->last = reg;
90 		list->iter = reg;
91 		list->partial_iter = 0;
92 		reg->prev = NULL;
93 	} else {
94 		list->last->next = reg;
95 		reg->prev = list->last;
96 		list->last = reg;
97 	}
98 	reg->next = NULL;
99 }
100 
region_list_merge(struct region_list * list1,struct region_list * list2)101 void region_list_merge(struct region_list *list1, struct region_list *list2)
102 {
103 	if (list1->first == NULL) {
104 		list1->first = list2->first;
105 		list1->last = list2->last;
106 		list1->iter = list2->first;
107 		list1->partial_iter = 0;
108 		list1->first->prev = NULL;
109 	} else {
110 		list1->last->next = list2->first;
111 		list2->first->prev = list1->last;
112 		list1->last = list2->last;
113 	}
114 }
115 #if 0
116 static void dump_starting_from(struct region *reg)
117 {
118 	for (; reg; reg = reg->next) {
119 		printf("%p: Blocks %d-%d (%d)\n", reg,
120 			   reg->block, reg->block + reg->len - 1, reg->len)
121 	}
122 }
123 
124 static void dump_region_lists(struct block_allocation *alloc) {
125 
126 	printf("Main list:\n");
127 	dump_starting_from(alloc->list.first);
128 
129 	printf("OOB list:\n");
130 	dump_starting_from(alloc->oob_list.first);
131 }
132 #endif
133 
print_blocks(FILE * f,struct block_allocation * alloc,char separator)134 void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
135 {
136 	struct region *reg;
137 	fputc(' ', f);
138 	for (reg = alloc->list.first; reg; reg = reg->next) {
139 		if (reg->len == 1) {
140 			fprintf(f, "%d", reg->block);
141 		} else {
142 			fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
143 		}
144 		fputc(separator, f);
145 	}
146 	fputc('\n', f);
147 }
148 
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)149 void append_region(struct block_allocation *alloc,
150 		u32 block, u32 len, int bg_num)
151 {
152 	struct region *reg;
153 	reg = malloc(sizeof(struct region));
154 	reg->block = block;
155 	reg->len = len;
156 	reg->bg = bg_num;
157 	reg->next = NULL;
158 
159 	region_list_append(&alloc->list, reg);
160 }
161 
allocate_bg_inode_table(struct block_group_info * bg)162 static void allocate_bg_inode_table(struct block_group_info *bg)
163 {
164 	if (bg->inode_table != NULL)
165 		return;
166 
167 	u32 block = bg->first_block + 2;
168 
169 	if (bg->has_superblock)
170 		block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
171 
172 	bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
173 	if (bg->inode_table == NULL)
174 		critical_error_errno("calloc");
175 
176 	sparse_file_add_data(ext4_sparse_file, bg->inode_table,
177 			aux_info.inode_table_blocks	* info.block_size, block);
178 
179 	bg->flags &= ~EXT4_BG_INODE_UNINIT;
180 }
181 
bitmap_set_bit(u8 * bitmap,u32 bit)182 static int bitmap_set_bit(u8 *bitmap, u32 bit)
183 {
184 	if (bitmap[bit / 8] & 1 << (bit % 8))
185 		return 1;
186 
187 	bitmap[bit / 8] |= 1 << (bit % 8);
188 	return 0;
189 }
190 
bitmap_set_8_bits(u8 * bitmap,u32 bit)191 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
192 {
193 	int ret = bitmap[bit / 8];
194 	bitmap[bit / 8] = 0xFF;
195 	return ret;
196 }
197 
198 /* Marks a the first num_blocks blocks in a block group as used, and accounts
199  for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 bg_num,u32 start,u32 num)200 static int reserve_blocks(struct block_group_info *bg, u32 bg_num, u32 start, u32 num)
201 {
202 	unsigned int i = 0;
203 
204 	u32 block = start;
205 	for (i = 0; i < num && block % 8 != 0; i++, block++) {
206 		if (bitmap_set_bit(bg->block_bitmap, block)) {
207 			error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
208 			return -1;
209 		}
210 	}
211 
212 	for (; i + 8 <= (num & ~7); i += 8, block += 8) {
213 		if (bitmap_set_8_bits(bg->block_bitmap, block)) {
214 			error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
215 			return -1;
216 		}
217 	}
218 
219 	for (; i < num; i++, block++) {
220 		if (bitmap_set_bit(bg->block_bitmap, block)) {
221 			error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
222 			return -1;
223 		}
224 	}
225 
226 	bg->free_blocks -= num;
227 
228 	return 0;
229 }
230 
free_blocks(struct block_group_info * bg,u32 block,u32 num_blocks)231 static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
232 {
233 	unsigned int i;
234 	for (i = 0; i < num_blocks; i++, block--)
235 		bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
236 	bg->free_blocks += num_blocks;
237 	for (i = bg->chunk_count; i > 0 ;) {
238 		--i;
239 		if (bg->chunks[i].len >= num_blocks && bg->chunks[i].block <= block) {
240 			if (bg->chunks[i].block == block) {
241 				bg->chunks[i].block += num_blocks;
242 				bg->chunks[i].len -= num_blocks;
243 			} else if (bg->chunks[i].block + bg->chunks[i].len - 1 == block + num_blocks) {
244 				bg->chunks[i].len -= num_blocks;
245 			}
246 			break;
247 		}
248 	}
249 }
250 
251 /* Reduces an existing allocation by len blocks by return the last blocks
252    to the free pool in their block group. Assumes that the blocks being
253    returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)254 void reduce_allocation(struct block_allocation *alloc, u32 len)
255 {
256 	while (len) {
257 		struct region *last_reg = alloc->list.last;
258 		struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
259 
260 		if (last_reg->len > len) {
261 			free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
262 			last_reg->len -= len;
263 			len = 0;
264 		} else {
265 			struct region *reg = alloc->list.last->prev;
266 			free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
267 			len -= last_reg->len;
268 			if (reg) {
269 				reg->next = NULL;
270 			} else {
271 				alloc->list.first = NULL;
272 				alloc->list.last = NULL;
273 				alloc->list.iter = NULL;
274 				alloc->list.partial_iter = 0;
275 			}
276 			free(last_reg);
277 		}
278 	}
279 }
280 
init_bg(struct block_group_info * bg,unsigned int i)281 static void init_bg(struct block_group_info *bg, unsigned int i)
282 {
283 	int header_blocks = 2 + aux_info.inode_table_blocks;
284 
285 	bg->has_superblock = ext4_bg_has_super_block(i);
286 
287 	if (bg->has_superblock)
288 		header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
289 
290 	bg->bitmaps = calloc(info.block_size, 2);
291 	bg->block_bitmap = bg->bitmaps;
292 	bg->inode_bitmap = bg->bitmaps + info.block_size;
293 
294 	bg->header_blocks = header_blocks;
295 	bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
296 
297 	u32 block = bg->first_block;
298 	if (bg->has_superblock)
299 		block += 1 + aux_info.bg_desc_blocks +  info.bg_desc_reserve_blocks;
300 	sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
301 			block);
302 
303 	bg->data_blocks_used = 0;
304 	bg->free_blocks = info.blocks_per_group;
305 	bg->free_inodes = info.inodes_per_group;
306 	bg->first_free_inode = 1;
307 	bg->flags = EXT4_BG_INODE_UNINIT;
308 
309 	bg->chunk_count = 0;
310 	bg->max_chunk_count = 1;
311 	bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
312 
313 	if (reserve_blocks(bg, i, 0, bg->header_blocks) < 0)
314 		error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
315 	// Add empty starting delimiter chunk
316 	reserve_bg_chunk(i, bg->header_blocks, 0);
317 
318 	if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
319 		u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
320 		reserve_blocks(bg, i, info.blocks_per_group - overrun, overrun);
321 		// Add empty ending delimiter chunk
322 		reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
323 	} else {
324 		reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
325 	}
326 
327 }
328 
block_allocator_init()329 void block_allocator_init()
330 {
331 	unsigned int i;
332 
333 	aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
334 	if (aux_info.bgs == NULL)
335 		critical_error_errno("calloc");
336 
337 	for (i = 0; i < aux_info.groups; i++)
338 		init_bg(&aux_info.bgs[i], i);
339 }
340 
block_allocator_free()341 void block_allocator_free()
342 {
343 	unsigned int i;
344 
345 	for (i = 0; i < aux_info.groups; i++) {
346 		free(aux_info.bgs[i].bitmaps);
347 		free(aux_info.bgs[i].inode_table);
348 	}
349 	free(aux_info.bgs);
350 }
351 
352 /* Allocate a single block and return its block number */
allocate_block()353 u32 allocate_block()
354 {
355 	u32 block;
356 	struct block_allocation *blk_alloc = allocate_blocks(1);
357 	if (!blk_alloc) {
358 		return EXT4_ALLOCATE_FAILED;
359 	}
360 	block = blk_alloc->list.first->block;
361 	free_alloc(blk_alloc);
362 	return block;
363 }
364 
ext4_allocate_best_fit_partial(u32 len)365 static struct region *ext4_allocate_best_fit_partial(u32 len)
366 {
367 	unsigned int i;
368 	int j;
369 	unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
370 	u32 found_allocate_len = 0;
371 	bool minimize = false;
372 	struct block_group_info *bgs = aux_info.bgs;
373 	struct region *reg;
374 
375 	for (i = 0; i < aux_info.groups; i++) {
376 		for (j = 1; j < bgs[i].chunk_count; j++) {
377 			u32 hole_start, hole_size;
378 			hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
379 			hole_size =  bgs[i].chunks[j].block - hole_start;
380 			if (hole_size == len) {
381 				// Perfect fit i.e. right between 2 chunks no need to keep searching
382 				found_bg = i;
383 				found_prev_chunk = j - 1;
384 				found_block = hole_start;
385 				found_allocate_len = hole_size;
386 				goto done;
387 			} else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
388 				found_bg = i;
389 				found_prev_chunk = j - 1;
390 				found_block = hole_start;
391 				found_allocate_len = hole_size;
392 				minimize = true;
393 			} else if (!minimize) {
394 				if (found_allocate_len < hole_size) {
395 					found_bg = i;
396 					found_prev_chunk = j - 1;
397 					found_block = hole_start;
398 					found_allocate_len = hole_size;
399 				}
400 			}
401 		}
402 	}
403 
404 	if (found_allocate_len == 0) {
405 		error("failed to allocate %u blocks, out of space?", len);
406 		return NULL;
407 	}
408 	if (found_allocate_len > len) found_allocate_len = len;
409 done:
410 	// reclaim allocated space in chunk
411 	bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
412 	if (reserve_blocks(&bgs[found_bg],
413 				found_bg,
414 				found_block,
415 				found_allocate_len) < 0) {
416 		error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
417 		return NULL;
418 	}
419 	bgs[found_bg].data_blocks_used += found_allocate_len;
420 	reg = malloc(sizeof(struct region));
421 	reg->block = found_block + bgs[found_bg].first_block;
422 	reg->len = found_allocate_len;
423 	reg->next = NULL;
424 	reg->prev = NULL;
425 	reg->bg = found_bg;
426 	return reg;
427 }
428 
ext4_allocate_best_fit(u32 len)429 static struct region *ext4_allocate_best_fit(u32 len)
430 {
431 	struct region *first_reg = NULL;
432 	struct region *prev_reg = NULL;
433 	struct region *reg;
434 
435 	while (len > 0) {
436 		reg = ext4_allocate_best_fit_partial(len);
437 		if (reg == NULL)
438 			return NULL;
439 
440 		if (first_reg == NULL)
441 			first_reg = reg;
442 
443 		if (prev_reg) {
444 			prev_reg->next = reg;
445 			reg->prev = prev_reg;
446 		}
447 
448 		prev_reg = reg;
449 		len -= reg->len;
450 	}
451 
452 	return first_reg;
453 }
454 
455 /* Allocate len blocks.  The blocks may be spread across multiple block groups,
456    and are returned in a linked list of the blocks in each block group.  The
457    allocation algorithm is:
458 	  1.  If the remaining allocation is larger than any available contiguous region,
459 		  allocate the largest contiguous region and loop
460 	  2.  Otherwise, allocate the smallest contiguous region that it fits in
461 */
allocate_blocks(u32 len)462 struct block_allocation *allocate_blocks(u32 len)
463 {
464 	struct region *reg = ext4_allocate_best_fit(len);
465 
466 	if (reg == NULL)
467 		return NULL;
468 
469 	struct block_allocation *alloc = create_allocation();
470 	alloc->list.first = reg;
471 	while (reg->next != NULL)
472 		reg = reg->next;
473 	alloc->list.last = reg;
474 	alloc->list.iter = alloc->list.first;
475 	alloc->list.partial_iter = 0;
476 	return alloc;
477 }
478 
479 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)480 int block_allocation_num_regions(struct block_allocation *alloc)
481 {
482 	unsigned int i;
483 	struct region *reg = alloc->list.first;
484 
485 	for (i = 0; reg != NULL; reg = reg->next)
486 		i++;
487 
488 	return i;
489 }
490 
block_allocation_len(struct block_allocation * alloc)491 int block_allocation_len(struct block_allocation *alloc)
492 {
493 	unsigned int i;
494 	struct region *reg = alloc->list.first;
495 
496 	for (i = 0; reg != NULL; reg = reg->next)
497 		i += reg->len;
498 
499 	return i;
500 }
501 
502 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)503 u32 get_block(struct block_allocation *alloc, u32 block)
504 {
505 	struct region *reg = alloc->list.iter;
506 	block += alloc->list.partial_iter;
507 
508 	for (; reg; reg = reg->next) {
509 		if (block < reg->len)
510 			return reg->block + block;
511 		block -= reg->len;
512 	}
513 	return EXT4_ALLOCATE_FAILED;
514 }
515 
get_oob_block(struct block_allocation * alloc,u32 block)516 u32 get_oob_block(struct block_allocation *alloc, u32 block)
517 {
518 	struct region *reg = alloc->oob_list.iter;
519 	block += alloc->oob_list.partial_iter;
520 
521 	for (; reg; reg = reg->next) {
522 		if (block < reg->len)
523 			return reg->block + block;
524 		block -= reg->len;
525 	}
526 	return EXT4_ALLOCATE_FAILED;
527 }
528 
529 /* Gets the starting block and length in blocks of the first region
530    of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)531 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
532 {
533 	*block = alloc->list.iter->block;
534 	*len = alloc->list.iter->len - alloc->list.partial_iter;
535 }
536 
537 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)538 void get_next_region(struct block_allocation *alloc)
539 {
540 	alloc->list.iter = alloc->list.iter->next;
541 	alloc->list.partial_iter = 0;
542 }
543 
544 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)545 u32 get_free_blocks(u32 bg)
546 {
547 	return aux_info.bgs[bg].free_blocks;
548 }
549 
last_region(struct block_allocation * alloc)550 int last_region(struct block_allocation *alloc)
551 {
552 	return (alloc->list.iter == NULL);
553 }
554 
rewind_alloc(struct block_allocation * alloc)555 void rewind_alloc(struct block_allocation *alloc)
556 {
557 	alloc->list.iter = alloc->list.first;
558 	alloc->list.partial_iter = 0;
559 }
560 
do_split_allocation(struct block_allocation * alloc,u32 len)561 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
562 {
563 	struct region *reg = alloc->list.iter;
564 	struct region *new;
565 	struct region *tmp;
566 
567 	while (reg && len >= reg->len) {
568 		len -= reg->len;
569 		reg = reg->next;
570 	}
571 
572 	if (reg == NULL && len > 0)
573 		return NULL;
574 
575 	if (len > 0) {
576 		new = malloc(sizeof(struct region));
577 
578 		new->bg = reg->bg;
579 		new->block = reg->block + len;
580 		new->len = reg->len - len;
581 		new->next = reg->next;
582 		new->prev = reg;
583 
584 		reg->next = new;
585 		reg->len = len;
586 
587 		tmp = alloc->list.iter;
588 		alloc->list.iter = new;
589 		return tmp;
590 	} else {
591 		return reg;
592 	}
593 }
594 
595 /* Splits an allocation into two allocations.  The returned allocation will
596    point to the first half, and the original allocation ptr will point to the
597    second half. */
split_allocation(struct block_allocation * alloc,u32 len)598 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
599 {
600 	/* First make sure there is a split at the current ptr */
601 	do_split_allocation(alloc, alloc->list.partial_iter);
602 
603 	/* Then split off len blocks */
604 	struct region *middle = do_split_allocation(alloc, len);
605 	alloc->list.partial_iter = 0;
606 	return middle;
607 }
608 
609 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)610 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
611 {
612 	struct region *oob = split_allocation(alloc, blocks);
613 	struct region *next;
614 
615 	if (oob == NULL)
616 		return -1;
617 
618 	while (oob && oob != alloc->list.iter) {
619 		next = oob->next;
620 		region_list_remove(&alloc->list, oob);
621 		region_list_append(&alloc->oob_list, oob);
622 		oob = next;
623 	}
624 
625 	return 0;
626 }
627 
advance_list_ptr(struct region_list * list,int blocks)628 static int advance_list_ptr(struct region_list *list, int blocks)
629 {
630 	struct region *reg = list->iter;
631 
632 	while (reg != NULL && blocks > 0) {
633 		if (reg->len > list->partial_iter + blocks) {
634 			list->partial_iter += blocks;
635 			return 0;
636 		}
637 
638 		blocks -= (reg->len - list->partial_iter);
639 		list->partial_iter = 0;
640 		reg = reg->next;
641 	}
642 
643 	if (blocks > 0)
644 		return -1;
645 
646 	return 0;
647 }
648 
649 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)650 int advance_blocks(struct block_allocation *alloc, int blocks)
651 {
652 	return advance_list_ptr(&alloc->list, blocks);
653 }
654 
advance_oob_blocks(struct block_allocation * alloc,int blocks)655 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
656 {
657 	return advance_list_ptr(&alloc->oob_list, blocks);
658 }
659 
append_oob_allocation(struct block_allocation * alloc,u32 len)660 int append_oob_allocation(struct block_allocation *alloc, u32 len)
661 {
662 	struct region *reg = ext4_allocate_best_fit(len);
663 
664 	if (reg == NULL) {
665 		error("failed to allocate %d blocks", len);
666 		return -1;
667 	}
668 
669 	for (; reg; reg = reg->next)
670 		region_list_append(&alloc->oob_list, reg);
671 
672 	return 0;
673 }
674 
675 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)676 struct ext4_inode *get_inode(u32 inode)
677 {
678 	inode -= 1;
679 	int bg = inode / info.inodes_per_group;
680 	inode %= info.inodes_per_group;
681 
682 	allocate_bg_inode_table(&aux_info.bgs[bg]);
683 	return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
684 		info.inode_size);
685 }
686 
get_xattr_block_for_inode(struct ext4_inode * inode)687 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
688 {
689 	struct ext4_xattr_header *block = xattr_list_find(inode);
690 	if (block != NULL)
691 		return block;
692 
693 	u32 block_num = allocate_block();
694 	block = calloc(info.block_size, 1);
695 	if (block == NULL) {
696 		error("get_xattr: failed to allocate %d", info.block_size);
697 		return NULL;
698 	}
699 
700 	block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
701 	block->h_refcount = cpu_to_le32(1);
702 	block->h_blocks = cpu_to_le32(1);
703 	inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
704 	inode->i_file_acl_lo = cpu_to_le32(block_num);
705 
706 	int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
707 	if (result != 0) {
708 		error("get_xattr: sparse_file_add_data failure %d", result);
709 		free(block);
710 		return NULL;
711 	}
712 	xattr_list_insert(inode, block);
713 	return block;
714 }
715 
716 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)717 u32 reserve_inodes(int bg, u32 num)
718 {
719 	unsigned int i;
720 	u32 inode;
721 
722 	if (get_free_inodes(bg) < num)
723 		return EXT4_ALLOCATE_FAILED;
724 
725 	for (i = 0; i < num; i++) {
726 		inode = aux_info.bgs[bg].first_free_inode + i - 1;
727 		aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
728 	}
729 
730 	inode = aux_info.bgs[bg].first_free_inode;
731 
732 	aux_info.bgs[bg].first_free_inode += num;
733 	aux_info.bgs[bg].free_inodes -= num;
734 
735 	return inode;
736 }
737 
738 /* Returns the first free inode number
739    TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()740 u32 allocate_inode()
741 {
742 	unsigned int bg;
743 	u32 inode;
744 
745 	for (bg = 0; bg < aux_info.groups; bg++) {
746 		inode = reserve_inodes(bg, 1);
747 		if (inode != EXT4_ALLOCATE_FAILED)
748 			return bg * info.inodes_per_group + inode;
749 	}
750 
751 	return EXT4_ALLOCATE_FAILED;
752 }
753 
754 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)755 u32 get_free_inodes(u32 bg)
756 {
757 	return aux_info.bgs[bg].free_inodes;
758 }
759 
760 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)761 void add_directory(u32 inode)
762 {
763 	int bg = (inode - 1) / info.inodes_per_group;
764 	aux_info.bgs[bg].used_dirs += 1;
765 }
766 
767 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)768 u16 get_directories(int bg)
769 {
770 	return aux_info.bgs[bg].used_dirs;
771 }
772 
773 /* Returns the flags for a block group */
get_bg_flags(int bg)774 u16 get_bg_flags(int bg)
775 {
776 	return aux_info.bgs[bg].flags;
777 }
778 
779 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)780 void free_alloc(struct block_allocation *alloc)
781 {
782 	struct region *reg;
783 
784 	reg = alloc->list.first;
785 	while (reg) {
786 		struct region *next = reg->next;
787 		free(reg);
788 		reg = next;
789 	}
790 
791 	reg = alloc->oob_list.first;
792 	while (reg) {
793 		struct region *next = reg->next;
794 		free(reg);
795 		reg = next;
796 	}
797 
798 	free(alloc);
799 }
800 
reserve_bg_chunk(int bg,u32 start_block,u32 size)801 void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
802 	struct block_group_info *bgs = aux_info.bgs;
803 	int chunk_count;
804 	if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
805 		bgs[bg].max_chunk_count *= 2;
806 		bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
807 		if (!bgs[bg].chunks)
808 			critical_error("realloc failed");
809 	}
810 	chunk_count = bgs[bg].chunk_count;
811 	bgs[bg].chunks[chunk_count].block = start_block;
812 	bgs[bg].chunks[chunk_count].len = size;
813 	bgs[bg].chunks[chunk_count].bg = bg;
814 	bgs[bg].chunk_count++;
815 }
816 
reserve_blocks_for_allocation(struct block_allocation * alloc)817 int reserve_blocks_for_allocation(struct block_allocation *alloc) {
818 	struct region *reg;
819 	struct block_group_info *bgs = aux_info.bgs;
820 
821 	if (!alloc) return 0;
822 	reg = alloc->list.first;
823 	while (reg != NULL) {
824 		if (reserve_blocks(&bgs[reg->bg], reg->bg, reg->block - bgs[reg->bg].first_block, reg->len) < 0) {
825 			return -1;
826 		}
827 		reg = reg->next;
828 	}
829 	return 0;
830 }
831 
832