1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <assert.h>
18 #include <inttypes.h>
19 #include <stdbool.h>
20 #include <stdint.h>
21 #include <stdio.h>
22 #include <string.h>
23
24 #include "block_allocator.h"
25 #include "debug.h"
26 #include "transaction.h"
27
28 /*
29 * BLOCK_ALLOCATOR_QUEUE_LEN:
30 * Queue length large enough for a worst case tree update, e.g. update of a tree
31 * where each entry needs to be copied then split.
32 */
33 #define BLOCK_ALLOCATOR_QUEUE_LEN \
34 (BLOCK_SET_MAX_DEPTH * 2 * BLOCK_SET_MAX_DEPTH * 2)
35
36 /**
37 * struct block_allocator_queue_entry - pending block allocation set update
38 * @block: block number to free or allocate.
39 * @tmp: is_tmp value passed to block_allocate_etc or block_free_etc.
40 * @free: @block is free.
41 * @removed: queue entry was removed.
42 */
43 struct block_allocator_queue_entry {
44 data_block_t block;
45 bool tmp;
46 bool free;
47 bool removed;
48 };
49
50 /**
51 * struct block_allocator_queue - queue of pending block allocation set updates
52 * @entry: Ring buffer.
53 * @head Index in @entry of first (oldest) entry in queue.
54 * @tail: Index in @entry where next entry should be inserted.
55 * @updating: %true while updating sets, false otherwise.
56 */
57 struct block_allocator_queue {
58 struct block_allocator_queue_entry entry[BLOCK_ALLOCATOR_QUEUE_LEN];
59 unsigned int head;
60 unsigned int tail;
61 bool updating;
62 };
63
64 /**
65 * block_allocator_queue_empty - Check if block allocator queue is empty
66 * @q: Queue object.
67 *
68 * Return: %true if @q is empty, %false otherwise.
69 */
block_allocator_queue_empty(struct block_allocator_queue * q)70 static bool block_allocator_queue_empty(struct block_allocator_queue* q) {
71 assert(q->head < countof(q->entry));
72 assert(q->tail < countof(q->entry));
73
74 return q->head == q->tail;
75 }
76
77 /**
78 * block_allocator_queue_count - Get number of entries in block allocator queue
79 * @q: Queue object.
80 *
81 * Return: number of etnries in @q.
82 */
block_allocator_queue_count(struct block_allocator_queue * q)83 static unsigned int block_allocator_queue_count(
84 struct block_allocator_queue* q) {
85 return (q->tail + countof(q->entry) - q->head) % countof(q->entry);
86 }
87
88 /**
89 * block_allocator_queue_find - Search block allocator queue
90 * @q: Queue object.
91 * @block: Block number to search for.
92 *
93 * Return: If block is in @q index in @q->entry of matching entry, -1 if @block
94 * is not in @q.
95 */
block_allocator_queue_find(struct block_allocator_queue * q,data_block_t block)96 static int block_allocator_queue_find(struct block_allocator_queue* q,
97 data_block_t block) {
98 unsigned int i;
99
100 assert(q->head < countof(q->entry));
101 assert(q->tail < countof(q->entry));
102
103 for (i = q->head; i != q->tail; i = (i + 1) % countof(q->entry)) {
104 if (q->entry[i].removed) {
105 continue;
106 }
107 if (block == q->entry[i].block) {
108 return i;
109 }
110 }
111 return -1;
112 }
113
114 /**
115 * block_allocator_queue_add_removed - Add a removed entry to block allocator
116 * queue
117 * @q: Queue object.
118 *
119 * Add a removed entry to @q to make it non-empty.
120 */
block_allocator_queue_add_removed(struct block_allocator_queue * q)121 static void block_allocator_queue_add_removed(struct block_allocator_queue* q) {
122 assert(block_allocator_queue_empty(q));
123 unsigned int new_tail = (q->tail + 1) % countof(q->entry);
124 q->entry[q->tail].removed = true;
125 q->tail = new_tail;
126 pr_write("index %d\n", q->tail);
127 }
128
129 /**
130 * block_allocator_queue_add - Add a entry to block allocator queue
131 * @q: Queue object.
132 * @block: block number to free or allocate.
133 * @is_tmp: is_tmp value passed to block_allocate_etc or block_free_etc.
134 * @is_free: @block is free.
135 */
block_allocator_queue_add(struct block_allocator_queue * q,data_block_t block,bool is_tmp,bool is_free)136 static void block_allocator_queue_add(struct block_allocator_queue* q,
137 data_block_t block,
138 bool is_tmp,
139 bool is_free) {
140 int ret;
141 unsigned int index;
142 unsigned int new_tail;
143
144 ret = block_allocator_queue_find(q, block);
145 if (ret >= 0) {
146 index = ret;
147 assert(index < countof(q->entry));
148 assert(q->entry[index].tmp == is_tmp);
149 assert(q->entry[index].free != is_free);
150
151 q->entry[index].removed = true;
152 if (index != q->head || !q->updating) {
153 return;
154 }
155 pr_warn("block %" PRIu64
156 ", tmp %d, free %d, removed head during update\n",
157 block, is_tmp, is_free);
158 }
159
160 new_tail = (q->tail + 1) % countof(q->entry);
161
162 assert(q->head < countof(q->entry));
163 assert(q->tail < countof(q->entry));
164 assert(new_tail < countof(q->entry));
165 assert(new_tail != q->head);
166
167 q->entry[q->tail].block = block;
168 assert(q->entry[q->tail].block == block);
169 q->entry[q->tail].tmp = is_tmp;
170 q->entry[q->tail].free = is_free;
171 q->entry[q->tail].removed = false;
172 q->tail = new_tail;
173 pr_write("block %" PRIu64 ", tmp %d, free %d, index %d (rel %d)\n", block,
174 is_tmp, is_free, q->tail, block_allocator_queue_count(q));
175 }
176
177 /**
178 * block_allocator_queue_peek_head - Get head block allocator queue entry
179 * @q: Queue object.
180 *
181 * Return: entry at head of @q.
182 */
block_allocator_queue_peek_head(struct block_allocator_queue * q)183 static struct block_allocator_queue_entry block_allocator_queue_peek_head(
184 struct block_allocator_queue* q) {
185 assert(!block_allocator_queue_empty(q));
186
187 return q->entry[q->head];
188 }
189
190 /**
191 * block_allocator_queue_remove_head - Remove head block allocator queue entry
192 * @q: Queue object.
193 * @entry: Entry that should be at head of @q (used for validation only).
194 */
block_allocator_queue_remove_head(struct block_allocator_queue * q,struct block_allocator_queue_entry entry)195 static void block_allocator_queue_remove_head(
196 struct block_allocator_queue* q,
197 struct block_allocator_queue_entry entry) {
198 assert(block_allocator_queue_peek_head(q).block == entry.block);
199
200 pr_write("index %d, count %d\n", q->head, block_allocator_queue_count(q));
201 q->head = (q->head + 1) % countof(q->entry);
202 }
203
204 /**
205 * block_allocator_queue_find_free_block - Search allocator queue for free block
206 * @q: Queue object.
207 * @block: Block number to search for.
208 *
209 * Return: First block >= @block that is not in @q as an allocated block.
210 */
block_allocator_queue_find_free_block(struct block_allocator_queue * q,data_block_t block)211 static data_block_t block_allocator_queue_find_free_block(
212 struct block_allocator_queue* q,
213 data_block_t block) {
214 int index;
215
216 while (true) {
217 index = block_allocator_queue_find(q, block);
218 if (index < 0) {
219 return block;
220 }
221 if (q->entry[index].free) {
222 return block;
223 }
224 block++;
225 }
226 }
227
228 static struct block_allocator_queue block_allocator_queue;
229
230 /**
231 * find_free_block - Search for a free block
232 * @tr: Transaction object.
233 * @min_block_in: Block number to start search at.
234 *
235 * Return: Block number that is in all committed free sets and not already
236 * allocated by any transaction. To be considered free, a block must be in the
237 * current file-system free set AND any checkpointed free sets, if available.
238 */
find_free_block(struct transaction * tr,data_block_t min_block_in)239 static data_block_t find_free_block(struct transaction* tr,
240 data_block_t min_block_in) {
241 data_block_t block;
242 data_block_t min_block = min_block_in;
243 struct block_set* set;
244
245 if (!fs_is_writable(tr->fs)) {
246 printf("Read-only filesystem tried to allocate a free block\n");
247 if (!tr->failed) {
248 transaction_fail(tr);
249 }
250 return 0;
251 }
252
253 assert(list_in_list(&tr->node)); /* transaction must be active */
254
255 pr_read("min_block %" PRIu64 "\n", min_block);
256
257 block = min_block;
258 do {
259 block = block_set_find_next_block(tr, &tr->fs->free, block, true);
260 if (tr->failed) {
261 return 0;
262 }
263
264 /*
265 * if block_set_find_next_block() returns 0, there was no available free
266 * block after min_block
267 */
268 if (!block) {
269 break;
270 }
271 assert(block >= min_block);
272
273 /*
274 * set min_block to a candidate for an available free block. If no
275 * checkpoint or pending allocation contains this block, block will
276 * still equal min_block and we will exit the loop
277 */
278 min_block = block;
279
280 pr_read("check free block %" PRIu64 "\n", block);
281
282 /* check if the block is also free in the checkpoint */
283 block = block_set_find_next_block(tr, &tr->fs->checkpoint_free, block,
284 true);
285 if (tr->failed) {
286 return 0;
287 }
288 if (!block) {
289 break;
290 }
291 assert(block >= min_block);
292
293 assert(!list_is_empty(&tr->fs->allocated));
294 list_for_every_entry(&tr->fs->allocated, set, struct block_set, node) {
295 block = block_set_find_next_block(tr, set, block, false);
296 if (tr->failed) {
297 return 0;
298 }
299 assert(block >= min_block);
300 };
301 block = block_allocator_queue_find_free_block(&block_allocator_queue,
302 block);
303 assert(block >= min_block);
304 } while (block != min_block);
305
306 if (!block) {
307 if (LOCAL_TRACE >= TRACE_LEVEL_READ) {
308 if (min_block_in) {
309 block = find_free_block(tr, 0);
310 }
311 printf("%s: no space, min_block %" PRIu64
312 ", free block ignoring_min_block %" PRIu64 "\n",
313 __func__, min_block_in, block);
314
315 printf("%s: free\n", __func__);
316 block_set_print(tr, &tr->fs->free);
317 printf("%s: checkpoint free\n", __func__);
318 block_set_print(tr, &tr->fs->checkpoint_free);
319 list_for_every_entry(&tr->fs->allocated, set, struct block_set,
320 node) {
321 #if TLOG_LVL >= TLOG_LVL_DEBUG
322 printf("%s: allocated %p\n", __func__, set);
323 #endif
324 block_set_print(tr, set);
325 }
326 if (tr->new_free_set) {
327 printf("%s: new free\n", __func__);
328 block_set_print(tr, tr->new_free_set);
329 }
330 }
331
332 return 0;
333 }
334
335 pr_read("found free block %" PRIu64 "\n", block);
336
337 return block;
338 }
339
340 /**
341 * block_allocate_etc - Allocate a block
342 * @tr: Transaction object.
343 * @is_tmp: %true if allocated block should be automatically freed when
344 * transaction completes, %false if allocated block should be added
345 * to free set when transaction completes.
346 *
347 * Find a free block and add queue a set update.
348 *
349 * Return: Allocated block number.
350 */
block_allocate_etc(struct transaction * tr,bool is_tmp)351 data_block_t block_allocate_etc(struct transaction* tr, bool is_tmp) {
352 data_block_t block;
353 data_block_t min_block;
354 bool update_sets;
355
356 if (tr->failed) {
357 pr_warn("transaction failed, abort\n");
358
359 return 0;
360 }
361 assert(transaction_is_active(tr));
362
363 /* TODO: group allocations by set */
364 update_sets = block_allocator_queue_empty(&block_allocator_queue);
365 if (update_sets) {
366 tr->last_tmp_free_block = tr->fs->dev->block_count / 4 * 3;
367 tr->last_free_block = 0;
368 }
369 min_block = is_tmp ? tr->last_tmp_free_block : tr->last_free_block;
370
371 block = find_free_block(tr, min_block);
372 if (!block) {
373 block = find_free_block(tr, 0);
374 if (!block) {
375 if (!tr->failed) {
376 pr_err("no space\n");
377 transaction_fail(tr);
378 }
379 return 0;
380 }
381 }
382
383 block_allocator_queue_add(&block_allocator_queue, block, is_tmp, false);
384 full_assert(find_free_block(tr, block) != block);
385 if (update_sets) {
386 block_allocator_process_queue(tr);
387 }
388
389 full_assert(tr->failed || find_free_block(tr, block) != block);
390 if (tr->failed) {
391 return 0;
392 }
393
394 return block;
395 }
396
397 /**
398 * block_allocator_add_allocated - Update block sets with new allocated block
399 * @tr: Transaction object.
400 * @block: Block that was allocated.
401 * @is_tmp: %true if allocated block should be automatically freed when
402 * transaction completes, %false if allocated block should be added
403 * to free set when transaction completes.
404 *
405 * Add @block to the allocated set selected by @is_tmp. If called while
406 * completing the transaction update the new free set directly if needed.
407 */
block_allocator_add_allocated(struct transaction * tr,data_block_t block,bool is_tmp)408 static void block_allocator_add_allocated(struct transaction* tr,
409 data_block_t block,
410 bool is_tmp) {
411 if (is_tmp) {
412 pr_write("add %" PRIu64 " to tmp_allocated\n", block);
413
414 block_set_add_block(tr, &tr->tmp_allocated, block);
415 tr->last_tmp_free_block = block + 1;
416 } else {
417 pr_write("add %" PRIu64 " to allocated\n", block);
418
419 block_set_add_block(tr, &tr->allocated, block);
420
421 if (block < tr->min_free_block) {
422 pr_write("remove %" PRIu64 " from new_free_set\n", block);
423
424 assert(tr->new_free_set);
425 block_set_remove_block(tr, tr->new_free_set, block);
426 tr->last_free_block = block + 1;
427 }
428 }
429 }
430
431 /**
432 * block_free_etc - Free a block
433 * @tr: Transaction object.
434 * @block: Block that should be freed.
435 * @is_tmp: Must match is_tmp value passed to block_allocate_etc (always
436 * %false if @block was not allocated by this transaction).
437 */
block_free_etc(struct transaction * tr,data_block_t block,bool is_tmp)438 void block_free_etc(struct transaction* tr, data_block_t block, bool is_tmp) {
439 bool update_sets = block_allocator_queue_empty(&block_allocator_queue);
440
441 assert(block_is_clean(tr->fs->dev, block));
442
443 block_allocator_queue_add(&block_allocator_queue, block, is_tmp, true);
444 if (update_sets) {
445 block_allocator_process_queue(tr);
446 }
447 }
448
449 /**
450 * block_allocator_add_free - Update block sets with new free block
451 * @tr: Transaction object.
452 * @block: Block that should be freed.
453 * @is_tmp: Must match is_tmp value passed to block_allocate_etc (always
454 * %false if @block was not allocated by this transaction).
455 *
456 * If @block was allocated in this transaction, remove it from the allocated set
457 * (selected by @is_tmp). Otherwise add it to the set of blocks to remove when
458 * transaction completes. If called while completing the transaction update the
459 * new free set directly if needed.
460 */
block_allocator_add_free(struct transaction * tr,data_block_t block,bool is_tmp)461 static void block_allocator_add_free(struct transaction* tr,
462 data_block_t block,
463 bool is_tmp) {
464 assert(block_is_clean(tr->fs->dev, block));
465 if (is_tmp) {
466 assert(!block_set_block_in_set(tr, &tr->allocated, block));
467 assert(!block_set_block_in_set(tr, &tr->freed, block));
468
469 pr_write("remove %" PRIu64 " from tmp_allocated\n", block);
470
471 block_set_remove_block(tr, &tr->tmp_allocated, block);
472
473 return;
474 }
475
476 assert(!block_set_block_in_set(tr, &tr->tmp_allocated, block));
477 if (block_set_block_in_set(tr, &tr->allocated, block)) {
478 pr_write("remove %" PRIu64 " from allocated\n", block);
479
480 block_set_remove_block(tr, &tr->allocated, block);
481
482 if (block < tr->min_free_block) {
483 pr_write("add %" PRIu64 " to new_free_root\n", block);
484
485 assert(tr->new_free_set);
486 block_set_add_block(tr, tr->new_free_set, block);
487 }
488 } else {
489 if (block < tr->min_free_block) {
490 pr_write("add %" PRIu64 " to new_free_root\n", block);
491
492 assert(tr->new_free_set);
493 block_set_add_block(tr, tr->new_free_set, block);
494 } else {
495 pr_write("add %" PRIu64 " to freed\n", block);
496
497 block_set_add_block(tr, &tr->freed, block);
498 }
499 }
500 }
501
502 /**
503 * block_allocator_suspend_set_updates - Prevent set updates
504 * @tr: Transaction object.
505 */
block_allocator_suspend_set_updates(struct transaction * tr)506 void block_allocator_suspend_set_updates(struct transaction* tr) {
507 block_allocator_queue_add_removed(&block_allocator_queue);
508 }
509
510 /**
511 * block_allocator_allocation_queued - Check for queued block allocation
512 * @tr: Transaction object.
513 * @block: Block to serach for.
514 * @is_tmp: is_tmp value passed to block_allocate_etc.
515 *
516 * Return: %true if a block matching @block, @is_tmp and !free is in the
517 * block allocator queue, %false otherwise.
518 */
block_allocator_allocation_queued(struct transaction * tr,data_block_t block,bool is_tmp)519 bool block_allocator_allocation_queued(struct transaction* tr,
520 data_block_t block,
521 bool is_tmp) {
522 int index;
523 index = block_allocator_queue_find(&block_allocator_queue, block);
524 return index >= 0 && block_allocator_queue.entry[index].tmp == is_tmp &&
525 !block_allocator_queue.entry[index].free;
526 }
527
528 /**
529 * block_allocator_process_queue - Process all block allocator queue entries
530 * @tr: Transaction object.
531 */
block_allocator_process_queue(struct transaction * tr)532 void block_allocator_process_queue(struct transaction* tr) {
533 struct block_allocator_queue_entry entry;
534 int loop_limit =
535 BLOCK_SET_MAX_DEPTH * BLOCK_SET_MAX_DEPTH * BLOCK_SET_MAX_DEPTH +
536 tr->fs->dev->block_count;
537
538 assert(!block_allocator_queue.updating);
539
540 block_allocator_queue.updating = true;
541 while (!block_allocator_queue_empty(&block_allocator_queue)) {
542 assert(loop_limit-- > 0);
543 entry = block_allocator_queue_peek_head(&block_allocator_queue);
544 pr_write("block %" PRIu64 ", tmp %d, free %d, removed %d\n",
545 (data_block_t)entry.block, entry.tmp, entry.free,
546 entry.removed);
547 if (entry.removed) {
548 } else if (entry.free) {
549 block_allocator_add_free(tr, entry.block, entry.tmp);
550 } else {
551 block_allocator_add_allocated(tr, entry.block, entry.tmp);
552 }
553 block_allocator_queue_remove_head(&block_allocator_queue, entry);
554 }
555 block_allocator_queue.updating = false;
556 }
557