1 /*
2 * Copyright (C) 2015-2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <assert.h>
18 #include <inttypes.h>
19 #include <lk/compiler.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <string.h>
24
25 #include "array.h"
26 #include "block_allocator.h"
27 #include "block_set.h"
28 #include "checkpoint.h"
29 #include "debug.h"
30 #include "file.h"
31 #include "transaction.h"
32
33 bool print_merge_free;
34
35 /**
36 * transaction_check_free
37 * @tr: Transaction object.
38 * @set: Free set to check.
39 * @min_free Number of block that must be in @set
40 *
41 * Return: %true is @set contains @min_free or more blocks, %false otherwise.
42 */
transaction_check_free(struct transaction * tr,struct block_set * set,data_block_t min_free)43 static bool transaction_check_free(struct transaction* tr,
44 struct block_set* set,
45 data_block_t min_free) {
46 data_block_t next_block;
47 struct block_range free_range;
48 data_block_t count;
49
50 next_block = 0;
51 while (true) {
52 free_range = block_set_find_next_range(tr, set, next_block);
53 if (block_range_empty(free_range)) {
54 return false;
55 }
56 count = free_range.end - free_range.start;
57 if (count >= min_free) {
58 return true;
59 }
60 min_free -= count;
61 next_block = free_range.end;
62 }
63 }
64
65 /**
66 * transaction_merge_free_sets
67 * @tr: Transaction object.
68 * @new_set: Output set.
69 * @set_i: Initial set.
70 * @set_d: Set of blocks to delete.
71 * @set_a: Set of blocks to add.
72 *
73 * Helper function to update the free_set when committing a transaction.
74 * @new_set = @set_i - @set_d - @new_set-blocks + @set_a + set_[ida]-blocks
75 * @new_set must start empty and will be initialized and filled in this
76 * function.
77 */
transaction_merge_free_sets(struct transaction * tr,struct block_set * new_set,struct block_set * set_i,struct block_set * set_d,struct block_set * set_a)78 static void transaction_merge_free_sets(struct transaction* tr,
79 struct block_set* new_set,
80 struct block_set* set_i,
81 struct block_set* set_d,
82 struct block_set* set_a) {
83 data_block_t next_block;
84 struct block_range delete_range = BLOCK_RANGE_INITIAL_VALUE(delete_range);
85 struct block_range add_range = BLOCK_RANGE_INITIAL_VALUE(add_range);
86
87 /* new_set should start empty. */
88 assert(block_set_find_next_block(tr, new_set, 1, true) == 0);
89 block_set_copy(tr, new_set, set_i);
90
91 full_assert(block_set_check(tr, set_i));
92 full_assert(block_set_check(tr, set_d));
93 full_assert(block_set_check(tr, set_a));
94
95 assert(!block_mac_valid(tr, &set_i->block_tree.root) ||
96 transaction_block_need_copy(
97 tr, block_mac_to_block(tr, &set_i->block_tree.root)));
98 if (print_merge_free) {
99 printf("%s\n", __func__);
100 }
101
102 /* TODO: don't walk the whole tree each time */
103 next_block = 1;
104 while (next_block != 0) {
105 tr->min_free_block = next_block;
106 delete_range = block_set_find_next_range(tr, set_d, next_block);
107 add_range = block_set_find_next_range(tr, set_a, next_block);
108 if (print_merge_free) {
109 printf("%s: add %" PRIu64 "-%" PRIu64 " or delete %" PRIu64
110 "-%" PRIu64 "\n",
111 __func__, add_range.start, add_range.end - 1,
112 delete_range.start, delete_range.end - 1);
113 }
114 assert(!block_range_overlap(delete_range, add_range));
115 if (block_range_before(delete_range, add_range)) {
116 assert(delete_range.start >= next_block);
117 tr->min_free_block = delete_range.end;
118 block_allocator_suspend_set_updates(tr);
119 block_set_remove_range(tr, new_set, delete_range);
120 block_allocator_process_queue(tr);
121 next_block = delete_range.end;
122 } else if (!block_range_empty(add_range)) {
123 assert(add_range.start >= next_block);
124 tr->min_free_block = add_range.end;
125 block_allocator_suspend_set_updates(tr);
126 block_set_add_range(tr, new_set, add_range);
127 block_allocator_process_queue(tr);
128 next_block = add_range.end;
129 } else {
130 assert(block_range_empty(delete_range));
131 assert(block_range_empty(add_range));
132 next_block = 0;
133 }
134 if (tr->failed) {
135 pr_warn("transaction failed, abort\n");
136 return;
137 }
138 }
139 full_assert(block_set_check(tr, new_set));
140 }
141
142 /** transaction_rebuild_free_set - Rebuild free set from referenced file blocks
143 * @tr: Transaction object.
144 * @new_free_set: Output free set.
145 * @files: Root block and mac of the files tree.
146 * @checkpoint: Checkpoint metadata block and mac
147 *
148 * Rebuilds the file system free set by walking the current files tree and
149 * ensuring that all referenced blocks are marked as not free. @new_free_set
150 * will be initialized to contain all blocks not referenced from the files root.
151 * The @checkpoint metadata block will also be removed from the free set, but
152 * its children (checkpoint files tree and free set) will not be checked. The
153 * blocks in the checkpoint (beside the metadata block) are tracked as
154 * free/allocated by the checkpoint free set rather than the active file system
155 * free set.
156 *
157 * We ignore tr->freed and tr->fs->free here because we are reconstructing the
158 * entire free set. All blocks that were freed in this transaction will not be
159 * referenced by @new_files.
160 */
transaction_rebuild_free_set(struct transaction * tr,struct block_set * new_free_set,struct block_mac * new_files,struct block_mac * new_checkpoint)161 static void transaction_rebuild_free_set(struct transaction* tr,
162 struct block_set* new_free_set,
163 struct block_mac* new_files,
164 struct block_mac* new_checkpoint) {
165 struct block_range init_range = {
166 .start = tr->fs->min_block_num,
167 .end = tr->fs->dev->block_count,
168 };
169 struct block_range range;
170 struct block_set previously_allocated =
171 BLOCK_SET_INITIAL_VALUE(previously_allocated);
172
173 /*
174 * Copy and save tr->allocated so that we can keep track of the blocks
175 * already allocated for the current transaction when performing allocations
176 * for the new free set tree nodes. We then reset tr->allocated so that it
177 * will only hold new blocks allocated for new_free_set. All blocks
178 * allocated for files will already be referenced in new_files, so we'll
179 * already be removing them from new_free_set.
180 */
181 assert(list_in_list(&tr->allocated.node));
182 list_delete(&tr->allocated.node);
183 block_set_copy_ro(tr, &previously_allocated, &tr->allocated);
184 list_add_tail(&tr->fs->allocated, &previously_allocated.node);
185
186 block_set_init(tr->fs, &tr->allocated);
187 list_add_tail(&tr->fs->allocated, &tr->allocated.node);
188
189 block_set_init(tr->fs, new_free_set);
190 new_free_set->block_tree.copy_on_write = true;
191 new_free_set->block_tree.allow_copy_on_write = true;
192
193 block_set_add_range(tr, new_free_set, init_range);
194
195 if (block_mac_valid(tr, new_checkpoint)) {
196 block_set_remove_block(tr, new_free_set,
197 block_mac_to_block(tr, new_checkpoint));
198 }
199 if (tr->failed) {
200 pr_warn("transaction failed, abort\n");
201 return;
202 }
203
204 if (block_mac_valid(tr, new_files)) {
205 files_rebuild_free_set(tr, new_free_set, new_files);
206 if (tr->failed) {
207 pr_warn("transaction failed, abort\n");
208 return;
209 }
210 }
211
212 for (range = block_set_find_next_range(tr, &tr->allocated, 1);
213 !block_range_empty(range);
214 range = block_set_find_next_range(tr, &tr->allocated, range.end)) {
215 tr->min_free_block = range.end;
216 block_allocator_suspend_set_updates(tr);
217 block_set_remove_range(tr, new_free_set, range);
218 block_allocator_process_queue(tr);
219 }
220
221 /*
222 * Copy the rest of the allocated blocks back to tr->allocated to maintain a
223 * consistent state. We don't actually need to do this with the current code
224 * calling this function, but this restores the transaction state to what
225 * would be expected if it were to be used in the future.
226 */
227 for (range = block_set_find_next_range(tr, &previously_allocated, 1);
228 !block_range_empty(range);
229 range = block_set_find_next_range(tr, &previously_allocated,
230 range.end)) {
231 block_set_add_range(tr, &tr->allocated, range);
232 }
233 list_delete(&previously_allocated.node);
234
235 full_assert(block_set_check(tr, new_free_set));
236 }
237
238 /**
239 * transaction_block_need_copy - Check if block needs copy
240 * @tr: Transaction object.
241 * @block: Block number to check.
242 *
243 * Return: %true if block has not been allocated as a non-tmp block for @tr,
244 * %false otherwise.
245 */
transaction_block_need_copy(struct transaction * tr,data_block_t block)246 bool transaction_block_need_copy(struct transaction* tr, data_block_t block) {
247 assert(block);
248 assert(!block_set_block_in_set(tr, &tr->tmp_allocated, block));
249 assert(!block_allocator_allocation_queued(tr, block, true));
250
251 return !block_set_block_in_set(tr, &tr->allocated, block) &&
252 !block_allocator_allocation_queued(tr, block, false);
253 }
254
255 /**
256 * transaction_delete_active - Remove transaction from active lists (internal)
257 * @tr: Transaction object.
258 */
transaction_delete_active(struct transaction * tr)259 static void transaction_delete_active(struct transaction* tr) {
260 assert(list_in_list(&tr->allocated.node));
261 assert(list_in_list(&tr->tmp_allocated.node));
262 list_delete(&tr->allocated.node);
263 list_delete(&tr->tmp_allocated.node);
264 }
265
266 /**
267 * transaction_fail - Fail transaction
268 * @tr: Transaction object.
269 *
270 * Marks transaction as failed, removes it from active list, discards dirty
271 * cache entries and restore open files to last committed state.
272 */
transaction_fail(struct transaction * tr)273 void transaction_fail(struct transaction* tr) {
274 assert(!tr->failed);
275
276 tr->failed = true;
277
278 if (tr->complete) {
279 return;
280 }
281
282 block_cache_discard_transaction(tr, true);
283 transaction_delete_active(tr);
284 file_transaction_failed(tr);
285 }
286
287 /**
288 * transaction_free - Free transaction
289 * @tr: Transaction object.
290 *
291 * Prepare @tr for free. @tr must not be active and all open files must already
292 * be closed.
293 */
transaction_free(struct transaction * tr)294 void transaction_free(struct transaction* tr) {
295 assert(!transaction_is_active(tr));
296 assert(list_is_empty(&tr->open_files));
297 assert(list_in_list(&tr->node));
298 list_delete(&tr->node);
299 }
300
301 /**
302 * check_free_tree - Check tree of free set (internal)
303 * @tr: Transaction object.
304 * @free: Set object.
305 *
306 * Check that the blocks used by the tree for a free set are not in the same
307 * set.
308 */
check_free_tree(struct transaction * tr,struct block_set * free)309 static void check_free_tree(struct transaction* tr, struct block_set* free) {
310 unsigned int i;
311 struct block_tree_path path;
312
313 block_tree_walk(tr, &free->block_tree, 0, true, &path);
314 while (block_tree_path_get_key(&path)) {
315 for (i = 0; i < path.count; i++) {
316 assert(!block_set_block_in_set(
317 tr, free,
318 block_mac_to_block(tr, &path.entry[i].block_mac)));
319 }
320 block_tree_path_next(&path);
321 }
322 }
323
324 /**
325 * transaction_complete - Complete transaction, optionally updating checkpoint
326 * @tr: Transaction object.
327 * @update_checkpoint: If true, update checkpoint with the new file-system
328 * state.
329 */
transaction_complete_etc(struct transaction * tr,bool update_checkpoint)330 void transaction_complete_etc(struct transaction* tr, bool update_checkpoint) {
331 struct block_mac new_files;
332 struct transaction* tmp_tr;
333 struct transaction* other_tr;
334 struct block_set new_free_set = BLOCK_SET_INITIAL_VALUE(new_free_set);
335 struct checkpoint* new_checkpoint = NULL;
336 struct block_mac new_checkpoint_mac;
337 struct obj_ref new_checkpoint_ref =
338 OBJ_REF_INITIAL_VALUE(new_checkpoint_ref);
339 bool super_block_updated;
340
341 assert(tr->fs);
342 assert(!tr->complete);
343
344 if (tr->fs->checkpoint_required) {
345 tr->fs->checkpoint_required = false;
346 if (!checkpoint_commit(tr->fs)) {
347 /*
348 * checkpoint creation failed, so we need to try again before we
349 * commit the next transaction
350 */
351 tr->fs->checkpoint_required = true;
352 transaction_fail(tr);
353 pr_warn("auto-checkpoint failed, abort\n");
354 goto err_transaction_failed;
355 }
356 }
357
358 // printf("%s: %" PRIu64 "\n", __func__, tr->version);
359
360 block_mac_copy(tr, &new_checkpoint_mac, &tr->fs->checkpoint);
361
362 if (tr->failed) {
363 pr_warn("transaction failed, abort\n");
364 goto err_transaction_failed;
365 }
366
367 assert(transaction_is_active(tr));
368
369 file_transaction_complete(tr, &new_files);
370 if (tr->failed) {
371 pr_warn("transaction failed, abort\n");
372 goto err_transaction_failed;
373 }
374
375 if (update_checkpoint) {
376 new_checkpoint = checkpoint_get_new_block(tr, &new_checkpoint_ref,
377 &new_checkpoint_mac);
378 if (tr->failed) {
379 pr_warn("transaction failed, abort\n");
380 goto err_transaction_failed;
381 }
382 assert(new_checkpoint);
383 }
384
385 tr->new_free_set = &new_free_set;
386 if (tr->rebuild_free_set) {
387 transaction_rebuild_free_set(tr, &new_free_set, &new_files,
388 &new_checkpoint_mac);
389 } else {
390 transaction_merge_free_sets(tr, &new_free_set, &tr->fs->free,
391 &tr->allocated, &tr->freed);
392 }
393 if (tr->failed) {
394 pr_warn("transaction failed, abort\n");
395 goto err_transaction_failed;
396 }
397
398 if (!transaction_check_free(tr, &new_free_set, tr->fs->reserved_count)) {
399 if (!tr->failed) {
400 transaction_fail(tr);
401 }
402 pr_warn("transaction would leave fs too full, abort\n");
403 goto err_transaction_failed;
404 }
405
406 if (tr->fs->alternate_data && tr->repaired) {
407 if (!tr->failed) {
408 transaction_fail(tr);
409 }
410 pr_warn("transaction cannot repair alternate fs, abort\n");
411 goto err_transaction_failed;
412 }
413
414 if (0) {
415 printf("%s: old free:\n", __func__);
416 block_set_print(tr, &tr->fs->free);
417 printf("%s: tmp_allocated:\n", __func__);
418 block_set_print(tr, &tr->tmp_allocated);
419 printf("%s: allocated:\n", __func__);
420 block_set_print(tr, &tr->allocated);
421 printf("%s: freed:\n", __func__);
422 block_set_print(tr, &tr->freed);
423 printf("%s: new free:\n", __func__);
424 block_set_print(tr, &new_free_set);
425 }
426
427 if (tr->failed) {
428 pr_warn("transaction failed, abort\n");
429 goto err_transaction_failed;
430 }
431
432 if (update_checkpoint) {
433 checkpoint_update_roots(tr, new_checkpoint, &new_files,
434 &new_free_set.block_tree.root);
435 block_put_dirty(tr, new_checkpoint, &new_checkpoint_ref,
436 &new_checkpoint_mac, NULL);
437 /*
438 * We have now released the block reference new_checkpoint_ref, so make
439 * sure we don't release it again in err_transaction_failed
440 */
441 new_checkpoint = NULL;
442 }
443
444 block_cache_clean_transaction(tr);
445
446 if (tr->failed) {
447 pr_warn("transaction failed, abort\n");
448 goto err_transaction_failed;
449 }
450
451 assert(block_range_empty(new_free_set.initial_range));
452 check_free_tree(tr, &new_free_set);
453
454 if (block_mac_same_block(tr, &tr->fs->free.block_tree.root,
455 &new_free_set.block_tree.root)) {
456 /*
457 * If the root block of the free tree did not move, there can be no
458 * other changes to the filesystem.
459 */
460 assert(block_mac_eq(tr, &tr->fs->free.block_tree.root,
461 &new_free_set.block_tree.root));
462 assert(block_mac_eq(tr, &tr->fs->files.root, &new_files));
463
464 /*
465 * Skip super block write if there are no changes to the filesystem.
466 * This is needed in case a previous write error has triggered a request
467 * to write another copy of the old super block. There can only be one
468 * copy of each block in the cache. If we try to write a new super block
469 * here before cleaning the pending one, we get a conflict. If there
470 * were changes to the filesystem, the pending super block has already
471 * been cleaned at this point.
472 */
473 goto complete_nop_transaction;
474 }
475
476 super_block_updated = update_super_block(tr, &new_free_set.block_tree.root,
477 &new_files, &new_checkpoint_mac);
478 if (!super_block_updated) {
479 assert(tr->failed);
480 pr_warn("failed to update super block, abort\n");
481 goto err_transaction_failed;
482 }
483 block_cache_clean_transaction(tr);
484
485 /*
486 * If an error was detected writing the super block, it is not safe to
487 * continue as we do not know if the write completed. We need to rewrite a
488 * known state over the unknown super block to avoid an inconsistent view of
489 * the filesystem.
490 *
491 * At this point block_cache_complete_write has been called by the block
492 * device, so the current superblock slot in the block cache is free and not
493 * associated with the pending transaction.
494 */
495 if (tr->failed) {
496 pr_warn("failed to write super block, notify fs and abort the transaction\n");
497 /*
498 * Superblock could have been written or not. Make sure no other blocks
499 * are written to the filesystem before writing another copy of the
500 * superblock with the existing file and free trees.
501 *
502 * TODO: Don't trigger a superblock write on unaffected filesystems.
503 * We update all for now to simplify testing.
504 */
505 fs_unknown_super_block_state_all();
506 goto err_transaction_failed;
507 }
508
509 tr->fs->free.block_tree.root = new_free_set.block_tree.root;
510 block_range_clear(
511 &tr->fs->free
512 .initial_range); /* clear for initial file-system state */
513 tr->fs->files.root = new_files;
514 tr->fs->super_block_version = tr->fs->written_super_block_version;
515 tr->fs->checkpoint = new_checkpoint_mac;
516 if (tr->repaired) {
517 assert(!tr->fs->alternate_data);
518 tr->fs->main_repaired = true;
519 }
520 if (update_checkpoint) {
521 tr->fs->checkpoint_free.block_tree.root = new_free_set.block_tree.root;
522 block_range_clear(&tr->fs->checkpoint_free.initial_range);
523 }
524
525 complete_nop_transaction:
526 transaction_delete_active(tr);
527 tr->complete = true;
528
529 file_transaction_success(tr);
530 assert(!tr->failed);
531
532 check_free_tree(tr, &tr->fs->free);
533
534 list_for_every_entry_safe(&tr->fs->transactions, other_tr, tmp_tr,
535 struct transaction, node) {
536 if (tr->failed) {
537 break;
538 }
539 if (!transaction_is_active(other_tr)) {
540 continue;
541 }
542 if (tr->rebuild_free_set) {
543 /*
544 * TODO: only fail actually conflicting transactions when rebuilding
545 * the free set. When rebuilding, tr->freed does not contain all
546 * freed blocks if tree nodes were dropped. We could rebuild a free
547 * set delta by subtracting the new free set from the old one and
548 * then compare this delta against other transactions.
549 */
550 pr_warn("Rebuilding free set requires failing all pending transactions\n");
551 transaction_fail(other_tr);
552 } else if (block_set_overlap(tr, &tr->freed, &other_tr->freed)) {
553 pr_warn("fail conflicting transaction\n");
554 transaction_fail(other_tr);
555 }
556 }
557 if (tr->failed) {
558 pr_warn("transaction failed while failing conflicting transactions\n");
559 tr->failed = false;
560 list_for_every_entry_safe(&tr->fs->transactions, other_tr, tmp_tr,
561 struct transaction, node) {
562 if (!transaction_is_active(other_tr)) {
563 continue;
564 }
565 pr_warn("fail possibly conflicting transaction\n");
566 transaction_fail(other_tr);
567 }
568 }
569 assert(!tr->failed);
570 block_cache_discard_transaction(tr, false);
571
572 err_transaction_failed:
573 if (new_checkpoint) {
574 block_put_dirty_discard(new_checkpoint, &new_checkpoint_ref);
575 }
576 if (tr->failed) {
577 file_transaction_complete_failed(tr);
578 }
579 assert(!block_cache_debug_get_ref_block_count());
580 }
581
582 /**
583 * transaction_initial_super_block_complete - Complete special transaction
584 * @tr: Transaction object. Must match initial_super_block_tr in fs.
585 *
586 * Flush the initial superblock in @tr to disk. If the block could not be
587 * written re-initialize @tr and leave it in place for another attempt.
588 * Otherwise clear @tr->fs->initial_super_block_tr and free @tr.
589 *
590 * The initial superblock can only be flushed from the block cache by the
591 * block_cache_clean_transaction() call here, as we do not allow initial
592 * superblocks to be flushed to make room for other data. This ensures that we
593 * don't run out of room to recreate the superblock write in case it fails.
594 */
transaction_initial_super_block_complete(struct transaction * tr)595 void transaction_initial_super_block_complete(struct transaction* tr) {
596 assert(tr == tr->fs->initial_super_block_tr);
597 block_cache_clean_transaction(tr);
598 if (tr->failed) {
599 /*
600 * If we failed to write the superblock we re-initialize a new attempt
601 * to write that superblock before the next time we write to this
602 * filesystem.
603 */
604 pr_err("%s: failed to write initial superblock, version %d.\n",
605 __func__, tr->fs->written_super_block_version);
606 write_current_super_block(tr->fs, true /* reinitialize */);
607 return;
608 }
609 printf("%s: write initial superblock, version %d -> %d\n", __func__,
610 tr->fs->super_block_version, tr->fs->written_super_block_version);
611
612 assert(tr == tr->fs->initial_super_block_tr);
613 tr->fs->super_block_version = tr->fs->written_super_block_version;
614 tr->fs->initial_super_block_tr = NULL;
615
616 /* not a real transaction, discard the state so it can be freed */
617 transaction_fail(tr);
618 transaction_free(tr);
619 free(tr);
620 }
621
622 /**
623 * transaction_activate - Activate transaction
624 * @tr: Transaction object.
625 */
transaction_activate(struct transaction * tr)626 void transaction_activate(struct transaction* tr) {
627 assert(tr->fs);
628 assert(!transaction_is_active(tr));
629
630 tr->failed = false;
631 tr->invalid_block_found = false;
632 tr->complete = false;
633 tr->rebuild_free_set = false;
634 tr->repaired = false;
635 tr->min_free_block = 0;
636 tr->last_free_block = 0;
637 tr->last_tmp_free_block = 0;
638 tr->new_free_set = NULL;
639
640 block_set_init(tr->fs, &tr->tmp_allocated);
641 block_set_init(tr->fs, &tr->allocated);
642 block_set_init(tr->fs, &tr->freed);
643
644 fs_file_tree_init(tr->fs, &tr->files_added);
645 fs_file_tree_init(tr->fs, &tr->files_updated);
646 fs_file_tree_init(tr->fs, &tr->files_removed);
647
648 list_add_tail(&tr->fs->allocated, &tr->allocated.node);
649 list_add_tail(&tr->fs->allocated, &tr->tmp_allocated.node);
650 }
651
652 /**
653 * transaction_init - Initialize new transaction object
654 * @tr: Transaction object.
655 * @fs: File system state object.
656 * @activate: Activate transaction
657 */
transaction_init(struct transaction * tr,struct fs * fs,bool activate)658 void transaction_init(struct transaction* tr, struct fs* fs, bool activate) {
659 assert(fs);
660 assert(fs->dev);
661
662 memset(tr, 0, sizeof(*tr));
663 tr->fs = fs;
664
665 list_initialize(&tr->open_files);
666 list_add_tail(&fs->transactions, &tr->node);
667
668 if (activate) {
669 transaction_activate(tr);
670 }
671 }
672