1 /*
2  * Copyright (C) 2010 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *	  http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "ext4_utils.h"
18 #include "allocate.h"
19 
20 #include <sparse/sparse.h>
21 
22 #include <stdio.h>
23 #include <stdlib.h>
24 
25 struct xattr_list_element {
26 	struct ext4_inode *inode;
27 	struct ext4_xattr_header *header;
28 	struct xattr_list_element *next;
29 };
30 
create_allocation()31 struct block_allocation *create_allocation()
32 {
33 	struct block_allocation *alloc = malloc(sizeof(struct block_allocation));
34 	alloc->list.first = NULL;
35 	alloc->list.last = NULL;
36 	alloc->oob_list.first = NULL;
37 	alloc->oob_list.last = NULL;
38 	alloc->list.iter = NULL;
39 	alloc->list.partial_iter = 0;
40 	alloc->oob_list.iter = NULL;
41 	alloc->oob_list.partial_iter = 0;
42 	alloc->filename = NULL;
43 	alloc->next = NULL;
44 	return alloc;
45 }
46 
xattr_list_find(struct ext4_inode * inode)47 static struct ext4_xattr_header *xattr_list_find(struct ext4_inode *inode)
48 {
49 	struct xattr_list_element *element;
50 	for (element = aux_info.xattrs; element != NULL; element = element->next) {
51 		if (element->inode == inode)
52 			return element->header;
53 	}
54 	return NULL;
55 }
56 
xattr_list_insert(struct ext4_inode * inode,struct ext4_xattr_header * header)57 static void xattr_list_insert(struct ext4_inode *inode, struct ext4_xattr_header *header)
58 {
59 	struct xattr_list_element *element = malloc(sizeof(struct xattr_list_element));
60 	element->inode = inode;
61 	element->header = header;
62 	element->next = aux_info.xattrs;
63 	aux_info.xattrs = element;
64 }
65 
region_list_remove(struct region_list * list,struct region * reg)66 static void region_list_remove(struct region_list *list, struct region *reg)
67 {
68 	if (reg->prev)
69 		reg->prev->next = reg->next;
70 
71 	if (reg->next)
72 		reg->next->prev = reg->prev;
73 
74 	if (list->first == reg)
75 		list->first = reg->next;
76 
77 	if (list->last == reg)
78 		list->last = reg->prev;
79 
80 	reg->next = NULL;
81 	reg->prev = NULL;
82 }
83 
region_list_append(struct region_list * list,struct region * reg)84 void region_list_append(struct region_list *list, struct region *reg)
85 {
86 	if (list->first == NULL) {
87 		list->first = reg;
88 		list->last = reg;
89 		list->iter = reg;
90 		list->partial_iter = 0;
91 		reg->prev = NULL;
92 	} else {
93 		list->last->next = reg;
94 		reg->prev = list->last;
95 		list->last = reg;
96 	}
97 	reg->next = NULL;
98 }
99 
region_list_merge(struct region_list * list1,struct region_list * list2)100 void region_list_merge(struct region_list *list1, struct region_list *list2)
101 {
102 	if (list1->first == NULL) {
103 		list1->first = list2->first;
104 		list1->last = list2->last;
105 		list1->iter = list2->first;
106 		list1->partial_iter = 0;
107 		list1->first->prev = NULL;
108 	} else {
109 		list1->last->next = list2->first;
110 		list2->first->prev = list1->last;
111 		list1->last = list2->last;
112 	}
113 }
114 #if 0
115 static void dump_starting_from(struct region *reg)
116 {
117 	for (; reg; reg = reg->next) {
118 		printf("%p: Blocks %d-%d (%d)\n", reg,
119 			   reg->block, reg->block + reg->len - 1, reg->len)
120 	}
121 }
122 
123 static void dump_region_lists(struct block_allocation *alloc) {
124 
125 	printf("Main list:\n");
126 	dump_starting_from(alloc->list.first);
127 
128 	printf("OOB list:\n");
129 	dump_starting_from(alloc->oob_list.first);
130 }
131 #endif
132 
print_blocks(FILE * f,struct block_allocation * alloc,char separator)133 void print_blocks(FILE* f, struct block_allocation *alloc, char separator)
134 {
135 	struct region *reg;
136 	fputc(' ', f);
137 	for (reg = alloc->list.first; reg; reg = reg->next) {
138 		if (reg->len == 1) {
139 			fprintf(f, "%d", reg->block);
140 		} else {
141 			fprintf(f, "%d-%d", reg->block, reg->block + reg->len - 1);
142 		}
143 		fputc(separator, f);
144 	}
145 	fputc('\n', f);
146 }
147 
append_region(struct block_allocation * alloc,u32 block,u32 len,int bg_num)148 void append_region(struct block_allocation *alloc,
149 		u32 block, u32 len, int bg_num)
150 {
151 	struct region *reg;
152 	reg = malloc(sizeof(struct region));
153 	reg->block = block;
154 	reg->len = len;
155 	reg->bg = bg_num;
156 	reg->next = NULL;
157 
158 	region_list_append(&alloc->list, reg);
159 }
160 
allocate_bg_inode_table(struct block_group_info * bg)161 static void allocate_bg_inode_table(struct block_group_info *bg)
162 {
163 	if (bg->inode_table != NULL)
164 		return;
165 
166 	u32 block = bg->first_block + 2;
167 
168 	if (bg->has_superblock)
169 		block += aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks + 1;
170 
171 	bg->inode_table = calloc(aux_info.inode_table_blocks, info.block_size);
172 	if (bg->inode_table == NULL)
173 		critical_error_errno("calloc");
174 
175 	sparse_file_add_data(ext4_sparse_file, bg->inode_table,
176 			aux_info.inode_table_blocks	* info.block_size, block);
177 
178 	bg->flags &= ~EXT4_BG_INODE_UNINIT;
179 }
180 
bitmap_set_bit(u8 * bitmap,u32 bit)181 static int bitmap_set_bit(u8 *bitmap, u32 bit)
182 {
183 	if (bitmap[bit / 8] & 1 << (bit % 8))
184 		return 1;
185 
186 	bitmap[bit / 8] |= 1 << (bit % 8);
187 	return 0;
188 }
189 
bitmap_set_8_bits(u8 * bitmap,u32 bit)190 static int bitmap_set_8_bits(u8 *bitmap, u32 bit)
191 {
192 	int ret = bitmap[bit / 8];
193 	bitmap[bit / 8] = 0xFF;
194 	return ret;
195 }
196 
197 /* Marks a the first num_blocks blocks in a block group as used, and accounts
198  for them in the block group free block info. */
reserve_blocks(struct block_group_info * bg,u32 bg_num,u32 start,u32 num)199 static int reserve_blocks(struct block_group_info *bg, u32 bg_num, u32 start, u32 num)
200 {
201 	unsigned int i = 0;
202 
203 	u32 block = start;
204 	for (i = 0; i < num && block % 8 != 0; i++, block++) {
205 		if (bitmap_set_bit(bg->block_bitmap, block)) {
206 			error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
207 			return -1;
208 		}
209 	}
210 
211 	for (; i + 8 <= (num & ~7); i += 8, block += 8) {
212 		if (bitmap_set_8_bits(bg->block_bitmap, block)) {
213 			error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
214 			return -1;
215 		}
216 	}
217 
218 	for (; i < num; i++, block++) {
219 		if (bitmap_set_bit(bg->block_bitmap, block)) {
220 			error("attempted to reserve already reserved block %d in block group %d", block, bg_num);
221 			return -1;
222 		}
223 	}
224 
225 	bg->free_blocks -= num;
226 
227 	return 0;
228 }
229 
free_blocks(struct block_group_info * bg,u32 block,u32 num_blocks)230 static void free_blocks(struct block_group_info *bg, u32 block, u32 num_blocks)
231 {
232 	unsigned int i;
233 	for (i = 0; i < num_blocks; i++, block--)
234 		bg->block_bitmap[block / 8] &= ~(1 << (block % 8));
235 	bg->free_blocks += num_blocks;
236 }
237 
238 /* Reduces an existing allocation by len blocks by return the last blocks
239    to the free pool in their block group. Assumes that the blocks being
240    returned were the last ones allocated out of the block group */
reduce_allocation(struct block_allocation * alloc,u32 len)241 void reduce_allocation(struct block_allocation *alloc, u32 len)
242 {
243 	while (len) {
244 		struct region *last_reg = alloc->list.last;
245 		struct block_group_info *bg = &aux_info.bgs[last_reg->bg];
246 
247 		if (last_reg->len > len) {
248 			free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, len);
249 			last_reg->len -= len;
250 			len = 0;
251 		} else {
252 			struct region *reg = alloc->list.last->prev;
253 			free_blocks(bg, last_reg->block + last_reg->len - bg->first_block - 1, last_reg->len);
254 			len -= last_reg->len;
255 			if (reg) {
256 				reg->next = NULL;
257 			} else {
258 				alloc->list.first = NULL;
259 				alloc->list.last = NULL;
260 				alloc->list.iter = NULL;
261 				alloc->list.partial_iter = 0;
262 			}
263 			free(last_reg);
264 		}
265 	}
266 }
267 
init_bg(struct block_group_info * bg,unsigned int i)268 static void init_bg(struct block_group_info *bg, unsigned int i)
269 {
270 	int header_blocks = 2 + aux_info.inode_table_blocks;
271 
272 	bg->has_superblock = ext4_bg_has_super_block(i);
273 
274 	if (bg->has_superblock)
275 		header_blocks += 1 + aux_info.bg_desc_blocks + info.bg_desc_reserve_blocks;
276 
277 	bg->bitmaps = calloc(info.block_size, 2);
278 	bg->block_bitmap = bg->bitmaps;
279 	bg->inode_bitmap = bg->bitmaps + info.block_size;
280 
281 	bg->header_blocks = header_blocks;
282 	bg->first_block = aux_info.first_data_block + i * info.blocks_per_group;
283 
284 	u32 block = bg->first_block;
285 	if (bg->has_superblock)
286 		block += 1 + aux_info.bg_desc_blocks +  info.bg_desc_reserve_blocks;
287 	sparse_file_add_data(ext4_sparse_file, bg->bitmaps, 2 * info.block_size,
288 			block);
289 
290 	bg->data_blocks_used = 0;
291 	bg->free_blocks = info.blocks_per_group;
292 	bg->free_inodes = info.inodes_per_group;
293 	bg->first_free_inode = 1;
294 	bg->flags = EXT4_BG_INODE_UNINIT;
295 
296 	bg->chunk_count = 0;
297 	bg->max_chunk_count = 1;
298 	bg->chunks = (struct region*) calloc(bg->max_chunk_count, sizeof(struct region));
299 
300 	if (reserve_blocks(bg, i, 0, bg->header_blocks) < 0)
301 		error("failed to reserve %u blocks in block group %u\n", bg->header_blocks, i);
302 	// Add empty starting delimiter chunk
303 	reserve_bg_chunk(i, bg->header_blocks, 0);
304 
305 	if (bg->first_block + info.blocks_per_group > aux_info.len_blocks) {
306 		u32 overrun = bg->first_block + info.blocks_per_group - aux_info.len_blocks;
307 		reserve_blocks(bg, i, info.blocks_per_group - overrun, overrun);
308 		// Add empty ending delimiter chunk
309 		reserve_bg_chunk(i, info.blocks_per_group - overrun, 0);
310 	} else {
311 		reserve_bg_chunk(i, info.blocks_per_group - 1, 0);
312 	}
313 
314 }
315 
block_allocator_init()316 void block_allocator_init()
317 {
318 	unsigned int i;
319 
320 	aux_info.bgs = calloc(sizeof(struct block_group_info), aux_info.groups);
321 	if (aux_info.bgs == NULL)
322 		critical_error_errno("calloc");
323 
324 	for (i = 0; i < aux_info.groups; i++)
325 		init_bg(&aux_info.bgs[i], i);
326 }
327 
block_allocator_free()328 void block_allocator_free()
329 {
330 	unsigned int i;
331 
332 	for (i = 0; i < aux_info.groups; i++) {
333 		free(aux_info.bgs[i].bitmaps);
334 		free(aux_info.bgs[i].inode_table);
335 	}
336 	free(aux_info.bgs);
337 }
338 
339 /* Allocate a single block and return its block number */
allocate_block()340 u32 allocate_block()
341 {
342 	u32 block;
343 	struct block_allocation *blk_alloc = allocate_blocks(1);
344 	if (!blk_alloc) {
345 		return EXT4_ALLOCATE_FAILED;
346 	}
347 	block = blk_alloc->list.first->block;
348 	free_alloc(blk_alloc);
349 	return block;
350 }
351 
ext4_allocate_best_fit_partial(u32 len)352 static struct region *ext4_allocate_best_fit_partial(u32 len)
353 {
354 	unsigned int i, j;
355 	unsigned int found_bg = 0, found_prev_chunk = 0, found_block = 0;
356 	u32 found_allocate_len = 0;
357 	bool minimize = false;
358 	struct block_group_info *bgs = aux_info.bgs;
359 	struct region *reg;
360 
361 	for (i = 0; i < aux_info.groups; i++) {
362 		for (j = 1; j < bgs[i].chunk_count; j++) {
363 			u32 hole_start, hole_size;
364 			hole_start = bgs[i].chunks[j-1].block + bgs[i].chunks[j-1].len;
365 			hole_size =  bgs[i].chunks[j].block - hole_start;
366 			if (hole_size == len) {
367 				// Perfect fit i.e. right between 2 chunks no need to keep searching
368 				found_bg = i;
369 				found_prev_chunk = j - 1;
370 				found_block = hole_start;
371 				found_allocate_len = hole_size;
372 				goto done;
373 			} else if (hole_size > len && (found_allocate_len == 0 || (found_allocate_len > hole_size))) {
374 				found_bg = i;
375 				found_prev_chunk = j - 1;
376 				found_block = hole_start;
377 				found_allocate_len = hole_size;
378 				minimize = true;
379 			} else if (!minimize) {
380 				if (found_allocate_len < hole_size) {
381 					found_bg = i;
382 					found_prev_chunk = j - 1;
383 					found_block = hole_start;
384 					found_allocate_len = hole_size;
385 				}
386 			}
387 		}
388 	}
389 
390 	if (found_allocate_len == 0) {
391 		error("failed to allocate %u blocks, out of space?", len);
392 		return NULL;
393 	}
394 	if (found_allocate_len > len) found_allocate_len = len;
395 done:
396 	// reclaim allocated space in chunk
397 	bgs[found_bg].chunks[found_prev_chunk].len += found_allocate_len;
398 	if (reserve_blocks(&bgs[found_bg],
399 				found_bg,
400 				found_block,
401 				found_allocate_len) < 0) {
402 		error("failed to reserve %u blocks in block group %u\n", found_allocate_len, found_bg);
403 		return NULL;
404 	}
405 	bgs[found_bg].data_blocks_used += found_allocate_len;
406 	reg = malloc(sizeof(struct region));
407 	reg->block = found_block + bgs[found_bg].first_block;
408 	reg->len = found_allocate_len;
409 	reg->next = NULL;
410 	reg->prev = NULL;
411 	reg->bg = found_bg;
412 	return reg;
413 }
414 
ext4_allocate_best_fit(u32 len)415 static struct region *ext4_allocate_best_fit(u32 len)
416 {
417 	struct region *first_reg = NULL;
418 	struct region *prev_reg = NULL;
419 	struct region *reg;
420 
421 	while (len > 0) {
422 		reg = ext4_allocate_best_fit_partial(len);
423 		if (reg == NULL)
424 			return NULL;
425 
426 		if (first_reg == NULL)
427 			first_reg = reg;
428 
429 		if (prev_reg) {
430 			prev_reg->next = reg;
431 			reg->prev = prev_reg;
432 		}
433 
434 		prev_reg = reg;
435 		len -= reg->len;
436 	}
437 
438 	return first_reg;
439 }
440 
441 /* Allocate len blocks.  The blocks may be spread across multiple block groups,
442    and are returned in a linked list of the blocks in each block group.  The
443    allocation algorithm is:
444 	  1.  If the remaining allocation is larger than any available contiguous region,
445 		  allocate the largest contiguous region and loop
446 	  2.  Otherwise, allocate the smallest contiguous region that it fits in
447 */
allocate_blocks(u32 len)448 struct block_allocation *allocate_blocks(u32 len)
449 {
450 	struct region *reg = ext4_allocate_best_fit(len);
451 
452 	if (reg == NULL)
453 		return NULL;
454 
455 	struct block_allocation *alloc = create_allocation();
456 	alloc->list.first = reg;
457 	while (reg->next != NULL)
458 		reg = reg->next;
459 	alloc->list.last = reg;
460 	alloc->list.iter = alloc->list.first;
461 	alloc->list.partial_iter = 0;
462 	return alloc;
463 }
464 
465 /* Returns the number of discontiguous regions used by an allocation */
block_allocation_num_regions(struct block_allocation * alloc)466 int block_allocation_num_regions(struct block_allocation *alloc)
467 {
468 	unsigned int i;
469 	struct region *reg = alloc->list.first;
470 
471 	for (i = 0; reg != NULL; reg = reg->next)
472 		i++;
473 
474 	return i;
475 }
476 
block_allocation_len(struct block_allocation * alloc)477 int block_allocation_len(struct block_allocation *alloc)
478 {
479 	unsigned int i;
480 	struct region *reg = alloc->list.first;
481 
482 	for (i = 0; reg != NULL; reg = reg->next)
483 		i += reg->len;
484 
485 	return i;
486 }
487 
488 /* Returns the block number of the block'th block in an allocation */
get_block(struct block_allocation * alloc,u32 block)489 u32 get_block(struct block_allocation *alloc, u32 block)
490 {
491 	struct region *reg = alloc->list.iter;
492 	block += alloc->list.partial_iter;
493 
494 	for (; reg; reg = reg->next) {
495 		if (block < reg->len)
496 			return reg->block + block;
497 		block -= reg->len;
498 	}
499 	return EXT4_ALLOCATE_FAILED;
500 }
501 
get_oob_block(struct block_allocation * alloc,u32 block)502 u32 get_oob_block(struct block_allocation *alloc, u32 block)
503 {
504 	struct region *reg = alloc->oob_list.iter;
505 	block += alloc->oob_list.partial_iter;
506 
507 	for (; reg; reg = reg->next) {
508 		if (block < reg->len)
509 			return reg->block + block;
510 		block -= reg->len;
511 	}
512 	return EXT4_ALLOCATE_FAILED;
513 }
514 
515 /* Gets the starting block and length in blocks of the first region
516    of an allocation */
get_region(struct block_allocation * alloc,u32 * block,u32 * len)517 void get_region(struct block_allocation *alloc, u32 *block, u32 *len)
518 {
519 	*block = alloc->list.iter->block;
520 	*len = alloc->list.iter->len - alloc->list.partial_iter;
521 }
522 
523 /* Move to the next region in an allocation */
get_next_region(struct block_allocation * alloc)524 void get_next_region(struct block_allocation *alloc)
525 {
526 	alloc->list.iter = alloc->list.iter->next;
527 	alloc->list.partial_iter = 0;
528 }
529 
530 /* Returns the number of free blocks in a block group */
get_free_blocks(u32 bg)531 u32 get_free_blocks(u32 bg)
532 {
533 	return aux_info.bgs[bg].free_blocks;
534 }
535 
last_region(struct block_allocation * alloc)536 int last_region(struct block_allocation *alloc)
537 {
538 	return (alloc->list.iter == NULL);
539 }
540 
rewind_alloc(struct block_allocation * alloc)541 void rewind_alloc(struct block_allocation *alloc)
542 {
543 	alloc->list.iter = alloc->list.first;
544 	alloc->list.partial_iter = 0;
545 }
546 
do_split_allocation(struct block_allocation * alloc,u32 len)547 static struct region *do_split_allocation(struct block_allocation *alloc, u32 len)
548 {
549 	struct region *reg = alloc->list.iter;
550 	struct region *new;
551 	struct region *tmp;
552 
553 	while (reg && len >= reg->len) {
554 		len -= reg->len;
555 		reg = reg->next;
556 	}
557 
558 	if (reg == NULL && len > 0)
559 		return NULL;
560 
561 	if (len > 0) {
562 		new = malloc(sizeof(struct region));
563 
564 		new->bg = reg->bg;
565 		new->block = reg->block + len;
566 		new->len = reg->len - len;
567 		new->next = reg->next;
568 		new->prev = reg;
569 
570 		reg->next = new;
571 		reg->len = len;
572 
573 		tmp = alloc->list.iter;
574 		alloc->list.iter = new;
575 		return tmp;
576 	} else {
577 		return reg;
578 	}
579 }
580 
581 /* Splits an allocation into two allocations.  The returned allocation will
582    point to the first half, and the original allocation ptr will point to the
583    second half. */
split_allocation(struct block_allocation * alloc,u32 len)584 static struct region *split_allocation(struct block_allocation *alloc, u32 len)
585 {
586 	/* First make sure there is a split at the current ptr */
587 	do_split_allocation(alloc, alloc->list.partial_iter);
588 
589 	/* Then split off len blocks */
590 	struct region *middle = do_split_allocation(alloc, len);
591 	alloc->list.partial_iter = 0;
592 	return middle;
593 }
594 
595 /* Reserve the next blocks for oob data (indirect or extent blocks) */
reserve_oob_blocks(struct block_allocation * alloc,int blocks)596 int reserve_oob_blocks(struct block_allocation *alloc, int blocks)
597 {
598 	struct region *oob = split_allocation(alloc, blocks);
599 	struct region *next;
600 
601 	if (oob == NULL)
602 		return -1;
603 
604 	while (oob && oob != alloc->list.iter) {
605 		next = oob->next;
606 		region_list_remove(&alloc->list, oob);
607 		region_list_append(&alloc->oob_list, oob);
608 		oob = next;
609 	}
610 
611 	return 0;
612 }
613 
advance_list_ptr(struct region_list * list,int blocks)614 static int advance_list_ptr(struct region_list *list, int blocks)
615 {
616 	struct region *reg = list->iter;
617 
618 	while (reg != NULL && blocks > 0) {
619 		if (reg->len > list->partial_iter + blocks) {
620 			list->partial_iter += blocks;
621 			return 0;
622 		}
623 
624 		blocks -= (reg->len - list->partial_iter);
625 		list->partial_iter = 0;
626 		reg = reg->next;
627 	}
628 
629 	if (blocks > 0)
630 		return -1;
631 
632 	return 0;
633 }
634 
635 /* Move the allocation pointer forward */
advance_blocks(struct block_allocation * alloc,int blocks)636 int advance_blocks(struct block_allocation *alloc, int blocks)
637 {
638 	return advance_list_ptr(&alloc->list, blocks);
639 }
640 
advance_oob_blocks(struct block_allocation * alloc,int blocks)641 int advance_oob_blocks(struct block_allocation *alloc, int blocks)
642 {
643 	return advance_list_ptr(&alloc->oob_list, blocks);
644 }
645 
append_oob_allocation(struct block_allocation * alloc,u32 len)646 int append_oob_allocation(struct block_allocation *alloc, u32 len)
647 {
648 	struct region *reg = ext4_allocate_best_fit(len);
649 
650 	if (reg == NULL) {
651 		error("failed to allocate %d blocks", len);
652 		return -1;
653 	}
654 
655 	for (; reg; reg = reg->next)
656 		region_list_append(&alloc->oob_list, reg);
657 
658 	return 0;
659 }
660 
661 /* Returns an ext4_inode structure for an inode number */
get_inode(u32 inode)662 struct ext4_inode *get_inode(u32 inode)
663 {
664 	inode -= 1;
665 	int bg = inode / info.inodes_per_group;
666 	inode %= info.inodes_per_group;
667 
668 	allocate_bg_inode_table(&aux_info.bgs[bg]);
669 	return (struct ext4_inode *)(aux_info.bgs[bg].inode_table + inode *
670 		info.inode_size);
671 }
672 
get_xattr_block_for_inode(struct ext4_inode * inode)673 struct ext4_xattr_header *get_xattr_block_for_inode(struct ext4_inode *inode)
674 {
675 	struct ext4_xattr_header *block = xattr_list_find(inode);
676 	if (block != NULL)
677 		return block;
678 
679 	u32 block_num = allocate_block();
680 	block = calloc(info.block_size, 1);
681 	if (block == NULL) {
682 		error("get_xattr: failed to allocate %d", info.block_size);
683 		return NULL;
684 	}
685 
686 	block->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
687 	block->h_refcount = cpu_to_le32(1);
688 	block->h_blocks = cpu_to_le32(1);
689 	inode->i_blocks_lo = cpu_to_le32(le32_to_cpu(inode->i_blocks_lo) + (info.block_size / 512));
690 	inode->i_file_acl_lo = cpu_to_le32(block_num);
691 
692 	int result = sparse_file_add_data(ext4_sparse_file, block, info.block_size, block_num);
693 	if (result != 0) {
694 		error("get_xattr: sparse_file_add_data failure %d", result);
695 		free(block);
696 		return NULL;
697 	}
698 	xattr_list_insert(inode, block);
699 	return block;
700 }
701 
702 /* Mark the first len inodes in a block group as used */
reserve_inodes(int bg,u32 num)703 u32 reserve_inodes(int bg, u32 num)
704 {
705 	unsigned int i;
706 	u32 inode;
707 
708 	if (get_free_inodes(bg) < num)
709 		return EXT4_ALLOCATE_FAILED;
710 
711 	for (i = 0; i < num; i++) {
712 		inode = aux_info.bgs[bg].first_free_inode + i - 1;
713 		aux_info.bgs[bg].inode_bitmap[inode / 8] |= 1 << (inode % 8);
714 	}
715 
716 	inode = aux_info.bgs[bg].first_free_inode;
717 
718 	aux_info.bgs[bg].first_free_inode += num;
719 	aux_info.bgs[bg].free_inodes -= num;
720 
721 	return inode;
722 }
723 
724 /* Returns the first free inode number
725    TODO: Inodes should be allocated in the block group of the data? */
allocate_inode()726 u32 allocate_inode()
727 {
728 	unsigned int bg;
729 	u32 inode;
730 
731 	for (bg = 0; bg < aux_info.groups; bg++) {
732 		inode = reserve_inodes(bg, 1);
733 		if (inode != EXT4_ALLOCATE_FAILED)
734 			return bg * info.inodes_per_group + inode;
735 	}
736 
737 	return EXT4_ALLOCATE_FAILED;
738 }
739 
740 /* Returns the number of free inodes in a block group */
get_free_inodes(u32 bg)741 u32 get_free_inodes(u32 bg)
742 {
743 	return aux_info.bgs[bg].free_inodes;
744 }
745 
746 /* Increments the directory count of the block group that contains inode */
add_directory(u32 inode)747 void add_directory(u32 inode)
748 {
749 	int bg = (inode - 1) / info.inodes_per_group;
750 	aux_info.bgs[bg].used_dirs += 1;
751 }
752 
753 /* Returns the number of inodes in a block group that are directories */
get_directories(int bg)754 u16 get_directories(int bg)
755 {
756 	return aux_info.bgs[bg].used_dirs;
757 }
758 
759 /* Returns the flags for a block group */
get_bg_flags(int bg)760 u16 get_bg_flags(int bg)
761 {
762 	return aux_info.bgs[bg].flags;
763 }
764 
765 /* Frees the memory used by a linked list of allocation regions */
free_alloc(struct block_allocation * alloc)766 void free_alloc(struct block_allocation *alloc)
767 {
768 	struct region *reg;
769 
770 	reg = alloc->list.first;
771 	while (reg) {
772 		struct region *next = reg->next;
773 		free(reg);
774 		reg = next;
775 	}
776 
777 	reg = alloc->oob_list.first;
778 	while (reg) {
779 		struct region *next = reg->next;
780 		free(reg);
781 		reg = next;
782 	}
783 
784 	free(alloc);
785 }
786 
reserve_bg_chunk(int bg,u32 start_block,u32 size)787 void reserve_bg_chunk(int bg, u32 start_block, u32 size) {
788 	struct block_group_info *bgs = aux_info.bgs;
789 	int chunk_count;
790 	if (bgs[bg].chunk_count == bgs[bg].max_chunk_count) {
791 		bgs[bg].max_chunk_count *= 2;
792 		bgs[bg].chunks = realloc(bgs[bg].chunks, bgs[bg].max_chunk_count * sizeof(struct region));
793 		if (!bgs[bg].chunks)
794 			critical_error("realloc failed");
795 	}
796 	chunk_count = bgs[bg].chunk_count;
797 	bgs[bg].chunks[chunk_count].block = start_block;
798 	bgs[bg].chunks[chunk_count].len = size;
799 	bgs[bg].chunks[chunk_count].bg = bg;
800 	bgs[bg].chunk_count++;
801 }
802 
reserve_blocks_for_allocation(struct block_allocation * alloc)803 int reserve_blocks_for_allocation(struct block_allocation *alloc) {
804 	struct region *reg;
805 	struct block_group_info *bgs = aux_info.bgs;
806 
807 	if (!alloc) return 0;
808 	reg = alloc->list.first;
809 	while (reg != NULL) {
810 		if (reserve_blocks(&bgs[reg->bg], reg->bg, reg->block - bgs[reg->bg].first_block, reg->len) < 0) {
811 			return -1;
812 		}
813 		reg = reg->next;
814 	}
815 	return 0;
816 }
817 
818