1 /*
2 * blkmap64_rb.c --- Simple rb-tree implementation for bitmaps
3 *
4 * (C)2010 Red Hat, Inc., Lukas Czerner <lczerner@redhat.com>
5 *
6 * %Begin-Header%
7 * This file may be redistributed under the terms of the GNU Public
8 * License.
9 * %End-Header%
10 */
11
12 #include "config.h"
13 #include <stdio.h>
14 #include <string.h>
15 #if HAVE_UNISTD_H
16 #include <unistd.h>
17 #endif
18 #include <fcntl.h>
19 #include <time.h>
20 #if HAVE_SYS_STAT_H
21 #include <sys/stat.h>
22 #endif
23 #if HAVE_SYS_TYPES_H
24 #include <sys/types.h>
25 #endif
26 #if HAVE_LINUX_TYPES_H
27 #include <linux/types.h>
28 #endif
29
30 #include "ext2_fs.h"
31 #include "ext2fsP.h"
32 #include "bmap64.h"
33 #include "rbtree.h"
34
35 #include <limits.h>
36
37 struct bmap_rb_extent {
38 struct rb_node node;
39 __u64 start;
40 __u64 count;
41 };
42
43 struct ext2fs_rb_private {
44 struct rb_root root;
45 struct bmap_rb_extent *wcursor;
46 struct bmap_rb_extent *rcursor;
47 struct bmap_rb_extent *rcursor_next;
48 #ifdef ENABLE_BMAP_STATS_OPS
49 __u64 mark_hit;
50 __u64 test_hit;
51 #endif
52 };
53
node_to_extent(struct rb_node * node)54 inline static struct bmap_rb_extent *node_to_extent(struct rb_node *node)
55 {
56 /*
57 * This depends on the fact the struct rb_node is at the
58 * beginning of the bmap_rb_extent structure. We use this
59 * instead of the ext2fs_rb_entry macro because it causes gcc
60 * -Wall to generate a huge amount of noise.
61 */
62 return (struct bmap_rb_extent *) node;
63 }
64
65 static int rb_insert_extent(__u64 start, __u64 count,
66 struct ext2fs_rb_private *);
67 static void rb_get_new_extent(struct bmap_rb_extent **, __u64, __u64);
68
69 /* #define DEBUG_RB */
70
71 #ifdef DEBUG_RB
print_tree(struct rb_root * root)72 static void print_tree(struct rb_root *root)
73 {
74 struct rb_node *node = NULL;
75 struct bmap_rb_extent *ext;
76
77 fprintf(stderr, "\t\t\t=================================\n");
78 node = ext2fs_rb_first(root);
79 for (node = ext2fs_rb_first(root); node != NULL;
80 node = ext2fs_rb_next(node)) {
81 ext = node_to_extent(node);
82 fprintf(stderr, "\t\t\t--> (%llu -> %llu)\n",
83 ext->start, ext->start + ext->count);
84 }
85 fprintf(stderr, "\t\t\t=================================\n");
86 }
87
check_tree(struct rb_root * root,const char * msg)88 static void check_tree(struct rb_root *root, const char *msg)
89 {
90 struct rb_node *node;
91 struct bmap_rb_extent *ext, *old = NULL;
92
93 for (node = ext2fs_rb_first(root); node;
94 node = ext2fs_rb_next(node)) {
95 ext = node_to_extent(node);
96 if (ext->count == 0) {
97 fprintf(stderr, "Tree Error: count is zero\n");
98 fprintf(stderr, "extent: %llu -> %llu (%llu)\n",
99 ext->start, ext->start + ext->count,
100 ext->count);
101 goto err_out;
102 }
103 if (ext->start + ext->count < ext->start) {
104 fprintf(stderr,
105 "Tree Error: start or count is crazy\n");
106 fprintf(stderr, "extent: %llu -> %llu (%llu)\n",
107 ext->start, ext->start + ext->count,
108 ext->count);
109 goto err_out;
110 }
111
112 if (old) {
113 if (old->start > ext->start) {
114 fprintf(stderr, "Tree Error: start is crazy\n");
115 fprintf(stderr, "extent: %llu -> %llu (%llu)\n",
116 old->start, old->start + old->count,
117 old->count);
118 fprintf(stderr,
119 "extent next: %llu -> %llu (%llu)\n",
120 ext->start, ext->start + ext->count,
121 ext->count);
122 goto err_out;
123 }
124 if ((old->start + old->count) >= ext->start) {
125 fprintf(stderr,
126 "Tree Error: extent is crazy\n");
127 fprintf(stderr, "extent: %llu -> %llu (%llu)\n",
128 old->start, old->start + old->count,
129 old->count);
130 fprintf(stderr,
131 "extent next: %llu -> %llu (%llu)\n",
132 ext->start, ext->start + ext->count,
133 ext->count);
134 goto err_out;
135 }
136 }
137 old = ext;
138 }
139 return;
140
141 err_out:
142 fprintf(stderr, "%s\n", msg);
143 print_tree(root);
144 exit(1);
145 }
146 #else
147 #define check_tree(root, msg) do {} while (0)
148 #define print_tree(root) do {} while (0)
149 #endif
150
rb_get_new_extent(struct bmap_rb_extent ** ext,__u64 start,__u64 count)151 static void rb_get_new_extent(struct bmap_rb_extent **ext, __u64 start,
152 __u64 count)
153 {
154 struct bmap_rb_extent *new_ext;
155 int retval;
156
157 retval = ext2fs_get_mem(sizeof (struct bmap_rb_extent),
158 &new_ext);
159 if (retval)
160 abort();
161
162 new_ext->start = start;
163 new_ext->count = count;
164 *ext = new_ext;
165 }
166
167 inline
rb_free_extent(struct ext2fs_rb_private * bp,struct bmap_rb_extent * ext)168 static void rb_free_extent(struct ext2fs_rb_private *bp,
169 struct bmap_rb_extent *ext)
170 {
171 if (bp->wcursor == ext)
172 bp->wcursor = NULL;
173 if (bp->rcursor == ext)
174 bp->rcursor = NULL;
175 if (bp->rcursor_next == ext)
176 bp->rcursor_next = NULL;
177 ext2fs_free_mem(&ext);
178 }
179
rb_alloc_private_data(ext2fs_generic_bitmap_64 bitmap)180 static errcode_t rb_alloc_private_data (ext2fs_generic_bitmap_64 bitmap)
181 {
182 struct ext2fs_rb_private *bp;
183 errcode_t retval;
184
185 retval = ext2fs_get_mem(sizeof (struct ext2fs_rb_private), &bp);
186 if (retval)
187 return retval;
188
189 bp->root = RB_ROOT;
190 bp->rcursor = NULL;
191 bp->rcursor_next = NULL;
192 bp->wcursor = NULL;
193
194 #ifdef ENABLE_BMAP_STATS_OPS
195 bp->test_hit = 0;
196 bp->mark_hit = 0;
197 #endif
198
199 bitmap->private = (void *) bp;
200 return 0;
201 }
202
rb_new_bmap(ext2_filsys fs EXT2FS_ATTR ((unused)),ext2fs_generic_bitmap_64 bitmap)203 static errcode_t rb_new_bmap(ext2_filsys fs EXT2FS_ATTR((unused)),
204 ext2fs_generic_bitmap_64 bitmap)
205 {
206 errcode_t retval;
207
208 retval = rb_alloc_private_data (bitmap);
209 if (retval)
210 return retval;
211
212 return 0;
213 }
214
rb_free_tree(struct rb_root * root)215 static void rb_free_tree(struct rb_root *root)
216 {
217 struct bmap_rb_extent *ext;
218 struct rb_node *node, *next;
219
220 for (node = ext2fs_rb_first(root); node; node = next) {
221 next = ext2fs_rb_next(node);
222 ext = node_to_extent(node);
223 ext2fs_rb_erase(node, root);
224 ext2fs_free_mem(&ext);
225 }
226 }
227
rb_free_bmap(ext2fs_generic_bitmap_64 bitmap)228 static void rb_free_bmap(ext2fs_generic_bitmap_64 bitmap)
229 {
230 struct ext2fs_rb_private *bp;
231
232 bp = (struct ext2fs_rb_private *) bitmap->private;
233
234 rb_free_tree(&bp->root);
235 ext2fs_free_mem(&bp);
236 bp = 0;
237 }
238
rb_copy_bmap(ext2fs_generic_bitmap_64 src,ext2fs_generic_bitmap_64 dest)239 static errcode_t rb_copy_bmap(ext2fs_generic_bitmap_64 src,
240 ext2fs_generic_bitmap_64 dest)
241 {
242 struct ext2fs_rb_private *src_bp, *dest_bp;
243 struct bmap_rb_extent *src_ext, *dest_ext;
244 struct rb_node *dest_node, *src_node, *dest_last, **n;
245 errcode_t retval = 0;
246
247 retval = rb_alloc_private_data (dest);
248 if (retval)
249 return retval;
250
251 src_bp = (struct ext2fs_rb_private *) src->private;
252 dest_bp = (struct ext2fs_rb_private *) dest->private;
253 src_bp->rcursor = NULL;
254 dest_bp->rcursor = NULL;
255
256 src_node = ext2fs_rb_first(&src_bp->root);
257 while (src_node) {
258 src_ext = node_to_extent(src_node);
259 retval = ext2fs_get_mem(sizeof (struct bmap_rb_extent),
260 &dest_ext);
261 if (retval)
262 break;
263
264 memcpy(dest_ext, src_ext, sizeof(struct bmap_rb_extent));
265
266 dest_node = &dest_ext->node;
267 n = &dest_bp->root.rb_node;
268
269 dest_last = NULL;
270 if (*n) {
271 dest_last = ext2fs_rb_last(&dest_bp->root);
272 n = &(dest_last)->rb_right;
273 }
274
275 ext2fs_rb_link_node(dest_node, dest_last, n);
276 ext2fs_rb_insert_color(dest_node, &dest_bp->root);
277
278 src_node = ext2fs_rb_next(src_node);
279 }
280
281 return retval;
282 }
283
rb_truncate(__u64 new_max,struct rb_root * root)284 static void rb_truncate(__u64 new_max, struct rb_root *root)
285 {
286 struct bmap_rb_extent *ext;
287 struct rb_node *node;
288
289 node = ext2fs_rb_last(root);
290 while (node) {
291 ext = node_to_extent(node);
292
293 if ((ext->start + ext->count - 1) <= new_max)
294 break;
295 else if (ext->start > new_max) {
296 ext2fs_rb_erase(node, root);
297 ext2fs_free_mem(&ext);
298 node = ext2fs_rb_last(root);
299 continue;
300 } else
301 ext->count = new_max - ext->start + 1;
302 }
303 }
304
rb_resize_bmap(ext2fs_generic_bitmap_64 bmap,__u64 new_end,__u64 new_real_end)305 static errcode_t rb_resize_bmap(ext2fs_generic_bitmap_64 bmap,
306 __u64 new_end, __u64 new_real_end)
307 {
308 struct ext2fs_rb_private *bp;
309
310 bp = (struct ext2fs_rb_private *) bmap->private;
311 bp->rcursor = NULL;
312 bp->wcursor = NULL;
313
314 rb_truncate(((new_end < bmap->end) ? new_end : bmap->end) - bmap->start,
315 &bp->root);
316
317 bmap->end = new_end;
318 bmap->real_end = new_real_end;
319
320 if (bmap->end < bmap->real_end)
321 rb_insert_extent(bmap->end + 1 - bmap->start,
322 bmap->real_end - bmap->end, bp);
323 return 0;
324
325 }
326
327 inline static int
rb_test_bit(struct ext2fs_rb_private * bp,__u64 bit)328 rb_test_bit(struct ext2fs_rb_private *bp, __u64 bit)
329 {
330 struct bmap_rb_extent *rcursor, *next_ext = NULL;
331 struct rb_node *parent = NULL, *next;
332 struct rb_node **n = &bp->root.rb_node;
333 struct bmap_rb_extent *ext;
334
335 rcursor = bp->rcursor;
336 if (!rcursor)
337 goto search_tree;
338
339 if (bit >= rcursor->start && bit < rcursor->start + rcursor->count) {
340 #ifdef ENABLE_BMAP_STATS_OPS
341 bp->test_hit++;
342 #endif
343 return 1;
344 }
345
346 next_ext = bp->rcursor_next;
347 if (!next_ext) {
348 next = ext2fs_rb_next(&rcursor->node);
349 if (next)
350 next_ext = node_to_extent(next);
351 bp->rcursor_next = next_ext;
352 }
353 if (next_ext) {
354 if ((bit >= rcursor->start + rcursor->count) &&
355 (bit < next_ext->start)) {
356 #ifdef BMAP_STATS_OPS
357 bp->test_hit++;
358 #endif
359 return 0;
360 }
361 }
362 bp->rcursor = NULL;
363 bp->rcursor_next = NULL;
364
365 rcursor = bp->wcursor;
366 if (!rcursor)
367 goto search_tree;
368
369 if (bit >= rcursor->start && bit < rcursor->start + rcursor->count)
370 return 1;
371
372 search_tree:
373
374 while (*n) {
375 parent = *n;
376 ext = node_to_extent(parent);
377 if (bit < ext->start)
378 n = &(*n)->rb_left;
379 else if (bit >= (ext->start + ext->count))
380 n = &(*n)->rb_right;
381 else {
382 bp->rcursor = ext;
383 bp->rcursor_next = NULL;
384 return 1;
385 }
386 }
387 return 0;
388 }
389
rb_insert_extent(__u64 start,__u64 count,struct ext2fs_rb_private * bp)390 static int rb_insert_extent(__u64 start, __u64 count,
391 struct ext2fs_rb_private *bp)
392 {
393 struct rb_root *root = &bp->root;
394 struct rb_node *parent = NULL, **n = &root->rb_node;
395 struct rb_node *new_node, *node, *next;
396 struct bmap_rb_extent *new_ext;
397 struct bmap_rb_extent *ext;
398 int retval = 0;
399
400 if (count == 0)
401 return 0;
402
403 bp->rcursor_next = NULL;
404 ext = bp->wcursor;
405 if (ext) {
406 if (start >= ext->start &&
407 start <= (ext->start + ext->count)) {
408 #ifdef ENABLE_BMAP_STATS_OPS
409 bp->mark_hit++;
410 #endif
411 goto got_extent;
412 }
413 }
414
415 while (*n) {
416 parent = *n;
417 ext = node_to_extent(parent);
418
419 if (start < ext->start) {
420 n = &(*n)->rb_left;
421 } else if (start > (ext->start + ext->count)) {
422 n = &(*n)->rb_right;
423 } else {
424 got_extent:
425 if ((start + count) <= (ext->start + ext->count))
426 return 1;
427
428 if ((ext->start + ext->count) == start)
429 retval = 0;
430 else
431 retval = 1;
432
433 count += (start - ext->start);
434 start = ext->start;
435 new_ext = ext;
436 new_node = &ext->node;
437
438 goto skip_insert;
439 }
440 }
441
442 rb_get_new_extent(&new_ext, start, count);
443
444 new_node = &new_ext->node;
445 ext2fs_rb_link_node(new_node, parent, n);
446 ext2fs_rb_insert_color(new_node, root);
447 bp->wcursor = new_ext;
448
449 node = ext2fs_rb_prev(new_node);
450 if (node) {
451 ext = node_to_extent(node);
452 if ((ext->start + ext->count) == start) {
453 start = ext->start;
454 count += ext->count;
455 ext2fs_rb_erase(node, root);
456 rb_free_extent(bp, ext);
457 }
458 }
459
460 skip_insert:
461 /* See if we can merge extent to the right */
462 for (node = ext2fs_rb_next(new_node); node != NULL; node = next) {
463 next = ext2fs_rb_next(node);
464 ext = node_to_extent(node);
465
466 if ((ext->start + ext->count) <= start)
467 continue;
468
469 /* No more merging */
470 if ((start + count) < ext->start)
471 break;
472
473 /* ext is embedded in new_ext interval */
474 if ((start + count) >= (ext->start + ext->count)) {
475 ext2fs_rb_erase(node, root);
476 rb_free_extent(bp, ext);
477 continue;
478 } else {
479 /* merge ext with new_ext */
480 count += ((ext->start + ext->count) -
481 (start + count));
482 ext2fs_rb_erase(node, root);
483 rb_free_extent(bp, ext);
484 break;
485 }
486 }
487
488 new_ext->start = start;
489 new_ext->count = count;
490
491 return retval;
492 }
493
rb_remove_extent(__u64 start,__u64 count,struct ext2fs_rb_private * bp)494 static int rb_remove_extent(__u64 start, __u64 count,
495 struct ext2fs_rb_private *bp)
496 {
497 struct rb_root *root = &bp->root;
498 struct rb_node *parent = NULL, **n = &root->rb_node;
499 struct rb_node *node;
500 struct bmap_rb_extent *ext;
501 __u64 new_start, new_count;
502 int retval = 0;
503
504 if (ext2fs_rb_empty_root(root))
505 return 0;
506
507 while (*n) {
508 parent = *n;
509 ext = node_to_extent(parent);
510 if (start < ext->start) {
511 n = &(*n)->rb_left;
512 continue;
513 } else if (start >= (ext->start + ext->count)) {
514 n = &(*n)->rb_right;
515 continue;
516 }
517
518 if ((start > ext->start) &&
519 (start + count) < (ext->start + ext->count)) {
520 /* We have to split extent into two */
521 new_start = start + count;
522 new_count = (ext->start + ext->count) - new_start;
523
524 ext->count = start - ext->start;
525
526 rb_insert_extent(new_start, new_count, bp);
527 return 1;
528 }
529
530 if ((start + count) >= (ext->start + ext->count)) {
531 ext->count = start - ext->start;
532 retval = 1;
533 }
534
535 if (0 == ext->count) {
536 parent = ext2fs_rb_next(&ext->node);
537 ext2fs_rb_erase(&ext->node, root);
538 rb_free_extent(bp, ext);
539 break;
540 }
541
542 if (start == ext->start) {
543 ext->start += count;
544 ext->count -= count;
545 return 1;
546 }
547 }
548
549 /* See if we should delete or truncate extent on the right */
550 for (; parent != NULL; parent = node) {
551 node = ext2fs_rb_next(parent);
552 ext = node_to_extent(parent);
553 if ((ext->start + ext->count) <= start)
554 continue;
555
556 /* No more extents to be removed/truncated */
557 if ((start + count) < ext->start)
558 break;
559
560 /* The entire extent is within the region to be removed */
561 if ((start + count) >= (ext->start + ext->count)) {
562 ext2fs_rb_erase(parent, root);
563 rb_free_extent(bp, ext);
564 retval = 1;
565 continue;
566 } else {
567 /* modify the last extent in region to be removed */
568 ext->count -= ((start + count) - ext->start);
569 ext->start = start + count;
570 retval = 1;
571 break;
572 }
573 }
574
575 return retval;
576 }
577
rb_mark_bmap(ext2fs_generic_bitmap_64 bitmap,__u64 arg)578 static int rb_mark_bmap(ext2fs_generic_bitmap_64 bitmap, __u64 arg)
579 {
580 struct ext2fs_rb_private *bp;
581 int retval;
582
583 bp = (struct ext2fs_rb_private *) bitmap->private;
584 arg -= bitmap->start;
585
586 retval = rb_insert_extent(arg, 1, bp);
587 check_tree(&bp->root, __func__);
588 return retval;
589 }
590
rb_unmark_bmap(ext2fs_generic_bitmap_64 bitmap,__u64 arg)591 static int rb_unmark_bmap(ext2fs_generic_bitmap_64 bitmap, __u64 arg)
592 {
593 struct ext2fs_rb_private *bp;
594 int retval;
595
596 bp = (struct ext2fs_rb_private *) bitmap->private;
597 arg -= bitmap->start;
598
599 retval = rb_remove_extent(arg, 1, bp);
600 check_tree(&bp->root, __func__);
601
602 return retval;
603 }
604
605 inline
rb_test_bmap(ext2fs_generic_bitmap_64 bitmap,__u64 arg)606 static int rb_test_bmap(ext2fs_generic_bitmap_64 bitmap, __u64 arg)
607 {
608 struct ext2fs_rb_private *bp;
609
610 bp = (struct ext2fs_rb_private *) bitmap->private;
611 arg -= bitmap->start;
612
613 return rb_test_bit(bp, arg);
614 }
615
rb_mark_bmap_extent(ext2fs_generic_bitmap_64 bitmap,__u64 arg,unsigned int num)616 static void rb_mark_bmap_extent(ext2fs_generic_bitmap_64 bitmap, __u64 arg,
617 unsigned int num)
618 {
619 struct ext2fs_rb_private *bp;
620
621 bp = (struct ext2fs_rb_private *) bitmap->private;
622 arg -= bitmap->start;
623
624 rb_insert_extent(arg, num, bp);
625 check_tree(&bp->root, __func__);
626 }
627
rb_unmark_bmap_extent(ext2fs_generic_bitmap_64 bitmap,__u64 arg,unsigned int num)628 static void rb_unmark_bmap_extent(ext2fs_generic_bitmap_64 bitmap, __u64 arg,
629 unsigned int num)
630 {
631 struct ext2fs_rb_private *bp;
632
633 bp = (struct ext2fs_rb_private *) bitmap->private;
634 arg -= bitmap->start;
635
636 rb_remove_extent(arg, num, bp);
637 check_tree(&bp->root, __func__);
638 }
639
rb_test_clear_bmap_extent(ext2fs_generic_bitmap_64 bitmap,__u64 start,unsigned int len)640 static int rb_test_clear_bmap_extent(ext2fs_generic_bitmap_64 bitmap,
641 __u64 start, unsigned int len)
642 {
643 struct rb_node *parent = NULL, **n;
644 struct rb_node *node, *next;
645 struct ext2fs_rb_private *bp;
646 struct bmap_rb_extent *ext;
647 int retval = 1;
648
649 bp = (struct ext2fs_rb_private *) bitmap->private;
650 n = &bp->root.rb_node;
651 start -= bitmap->start;
652
653 if (len == 0 || ext2fs_rb_empty_root(&bp->root))
654 return 1;
655
656 /*
657 * If we find nothing, we should examine whole extent, but
658 * when we find match, the extent is not clean, thus be return
659 * false.
660 */
661 while (*n) {
662 parent = *n;
663 ext = node_to_extent(parent);
664 if (start < ext->start) {
665 n = &(*n)->rb_left;
666 } else if (start >= (ext->start + ext->count)) {
667 n = &(*n)->rb_right;
668 } else {
669 /*
670 * We found extent int the tree -> extent is not
671 * clean
672 */
673 return 0;
674 }
675 }
676
677 node = parent;
678 while (node) {
679 next = ext2fs_rb_next(node);
680 ext = node_to_extent(node);
681 node = next;
682
683 if ((ext->start + ext->count) <= start)
684 continue;
685
686 /* No more merging */
687 if ((start + len) <= ext->start)
688 break;
689
690 retval = 0;
691 break;
692 }
693 return retval;
694 }
695
rb_set_bmap_range(ext2fs_generic_bitmap_64 bitmap,__u64 start,size_t num,void * in)696 static errcode_t rb_set_bmap_range(ext2fs_generic_bitmap_64 bitmap,
697 __u64 start, size_t num, void *in)
698 {
699 struct ext2fs_rb_private *bp;
700 unsigned char *cp = in;
701 size_t i;
702 int first_set = -1;
703
704 bp = (struct ext2fs_rb_private *) bitmap->private;
705
706 for (i = 0; i < num; i++) {
707 if ((i & 7) == 0) {
708 unsigned char c = cp[i/8];
709 if (c == 0xFF) {
710 if (first_set == -1)
711 first_set = i;
712 i += 7;
713 continue;
714 }
715 if ((c == 0x00) && (first_set == -1)) {
716 i += 7;
717 continue;
718 }
719 }
720 if (ext2fs_test_bit(i, in)) {
721 if (first_set == -1)
722 first_set = i;
723 continue;
724 }
725 if (first_set == -1)
726 continue;
727
728 rb_insert_extent(start + first_set - bitmap->start,
729 i - first_set, bp);
730 check_tree(&bp->root, __func__);
731 first_set = -1;
732 }
733 if (first_set != -1) {
734 rb_insert_extent(start + first_set - bitmap->start,
735 num - first_set, bp);
736 check_tree(&bp->root, __func__);
737 }
738
739 return 0;
740 }
741
rb_get_bmap_range(ext2fs_generic_bitmap_64 bitmap,__u64 start,size_t num,void * out)742 static errcode_t rb_get_bmap_range(ext2fs_generic_bitmap_64 bitmap,
743 __u64 start, size_t num, void *out)
744 {
745
746 struct rb_node *parent = NULL, *next, **n;
747 struct ext2fs_rb_private *bp;
748 struct bmap_rb_extent *ext;
749 __u64 count, pos;
750
751 bp = (struct ext2fs_rb_private *) bitmap->private;
752 n = &bp->root.rb_node;
753 start -= bitmap->start;
754
755 if (ext2fs_rb_empty_root(&bp->root))
756 return 0;
757
758 while (*n) {
759 parent = *n;
760 ext = node_to_extent(parent);
761 if (start < ext->start) {
762 n = &(*n)->rb_left;
763 } else if (start >= (ext->start + ext->count)) {
764 n = &(*n)->rb_right;
765 } else
766 break;
767 }
768
769 memset(out, 0, (num + 7) >> 3);
770
771 for (; parent != NULL; parent = next) {
772 next = ext2fs_rb_next(parent);
773 ext = node_to_extent(parent);
774
775 pos = ext->start;
776 count = ext->count;
777 if (pos >= start + num)
778 break;
779 if (pos < start) {
780 if (pos + count < start)
781 continue;
782 count -= start - pos;
783 pos = start;
784 }
785 if (pos + count > start + num)
786 count = start + num - pos;
787
788 while (count > 0) {
789 if ((count >= 8) &&
790 ((pos - start) % 8) == 0) {
791 int nbytes = count >> 3;
792 int offset = (pos - start) >> 3;
793
794 memset(((char *) out) + offset, 0xFF, nbytes);
795 pos += nbytes << 3;
796 count -= nbytes << 3;
797 continue;
798 }
799 ext2fs_fast_set_bit64((pos - start), out);
800 pos++;
801 count--;
802 }
803 }
804 return 0;
805 }
806
rb_clear_bmap(ext2fs_generic_bitmap_64 bitmap)807 static void rb_clear_bmap(ext2fs_generic_bitmap_64 bitmap)
808 {
809 struct ext2fs_rb_private *bp;
810
811 bp = (struct ext2fs_rb_private *) bitmap->private;
812
813 rb_free_tree(&bp->root);
814 bp->rcursor = NULL;
815 bp->rcursor_next = NULL;
816 bp->wcursor = NULL;
817 check_tree(&bp->root, __func__);
818 }
819
rb_find_first_zero(ext2fs_generic_bitmap_64 bitmap,__u64 start,__u64 end,__u64 * out)820 static errcode_t rb_find_first_zero(ext2fs_generic_bitmap_64 bitmap,
821 __u64 start, __u64 end, __u64 *out)
822 {
823 struct rb_node *parent = NULL, **n;
824 struct ext2fs_rb_private *bp;
825 struct bmap_rb_extent *ext;
826
827 bp = (struct ext2fs_rb_private *) bitmap->private;
828 n = &bp->root.rb_node;
829 start -= bitmap->start;
830 end -= bitmap->start;
831
832 if (start > end)
833 return EINVAL;
834
835 if (ext2fs_rb_empty_root(&bp->root))
836 return ENOENT;
837
838 while (*n) {
839 parent = *n;
840 ext = node_to_extent(parent);
841 if (start < ext->start) {
842 n = &(*n)->rb_left;
843 } else if (start >= (ext->start + ext->count)) {
844 n = &(*n)->rb_right;
845 } else if (ext->start + ext->count <= end) {
846 *out = ext->start + ext->count + bitmap->start;
847 return 0;
848 } else
849 return ENOENT;
850 }
851
852 *out = start + bitmap->start;
853 return 0;
854 }
855
rb_find_first_set(ext2fs_generic_bitmap_64 bitmap,__u64 start,__u64 end,__u64 * out)856 static errcode_t rb_find_first_set(ext2fs_generic_bitmap_64 bitmap,
857 __u64 start, __u64 end, __u64 *out)
858 {
859 struct rb_node *parent = NULL, **n;
860 struct rb_node *node;
861 struct ext2fs_rb_private *bp;
862 struct bmap_rb_extent *ext;
863
864 bp = (struct ext2fs_rb_private *) bitmap->private;
865 n = &bp->root.rb_node;
866 start -= bitmap->start;
867 end -= bitmap->start;
868
869 if (start > end)
870 return EINVAL;
871
872 if (ext2fs_rb_empty_root(&bp->root))
873 return ENOENT;
874
875 while (*n) {
876 parent = *n;
877 ext = node_to_extent(parent);
878 if (start < ext->start) {
879 n = &(*n)->rb_left;
880 } else if (start >= (ext->start + ext->count)) {
881 n = &(*n)->rb_right;
882 } else {
883 /* The start bit is set */
884 *out = start + bitmap->start;
885 return 0;
886 }
887 }
888
889 node = parent;
890 ext = node_to_extent(node);
891 if (ext->start < start) {
892 node = ext2fs_rb_next(node);
893 if (node == NULL)
894 return ENOENT;
895 ext = node_to_extent(node);
896 }
897 if (ext->start <= end) {
898 *out = ext->start + bitmap->start;
899 return 0;
900 }
901 return ENOENT;
902 }
903
904 #ifdef ENABLE_BMAP_STATS
rb_print_stats(ext2fs_generic_bitmap_64 bitmap)905 static void rb_print_stats(ext2fs_generic_bitmap_64 bitmap)
906 {
907 struct ext2fs_rb_private *bp;
908 struct rb_node *node = NULL;
909 struct bmap_rb_extent *ext;
910 __u64 count = 0;
911 __u64 max_size = 0;
912 __u64 min_size = ULONG_MAX;
913 __u64 size = 0, avg_size = 0;
914 double eff;
915 #ifdef ENABLE_BMAP_STATS_OPS
916 __u64 mark_all, test_all;
917 double m_hit = 0.0, t_hit = 0.0;
918 #endif
919
920 bp = (struct ext2fs_rb_private *) bitmap->private;
921
922 for (node = ext2fs_rb_first(&bp->root); node != NULL;
923 node = ext2fs_rb_next(node)) {
924 ext = node_to_extent(node);
925 count++;
926 if (ext->count > max_size)
927 max_size = ext->count;
928 if (ext->count < min_size)
929 min_size = ext->count;
930 size += ext->count;
931 }
932
933 if (count)
934 avg_size = size / count;
935 if (min_size == ULONG_MAX)
936 min_size = 0;
937 eff = (double)((count * sizeof(struct bmap_rb_extent)) << 3) /
938 (bitmap->real_end - bitmap->start);
939 #ifdef ENABLE_BMAP_STATS_OPS
940 mark_all = bitmap->stats.mark_count + bitmap->stats.mark_ext_count;
941 test_all = bitmap->stats.test_count + bitmap->stats.test_ext_count;
942 if (mark_all)
943 m_hit = ((double)bp->mark_hit / mark_all) * 100;
944 if (test_all)
945 t_hit = ((double)bp->test_hit / test_all) * 100;
946
947 fprintf(stderr, "%16llu cache hits on test (%.2f%%)\n"
948 "%16llu cache hits on mark (%.2f%%)\n",
949 bp->test_hit, t_hit, bp->mark_hit, m_hit);
950 #endif
951 fprintf(stderr, "%16llu extents (%llu bytes)\n",
952 count, ((count * sizeof(struct bmap_rb_extent)) +
953 sizeof(struct ext2fs_rb_private)));
954 fprintf(stderr, "%16llu bits minimum size\n",
955 min_size);
956 fprintf(stderr, "%16llu bits maximum size\n"
957 "%16llu bits average size\n",
958 max_size, avg_size);
959 fprintf(stderr, "%16llu bits set in bitmap (out of %llu)\n", size,
960 bitmap->real_end - bitmap->start);
961 fprintf(stderr,
962 "%16.4lf memory / bitmap bit memory ratio (bitarray = 1)\n",
963 eff);
964 }
965 #else
rb_print_stats(ext2fs_generic_bitmap_64 bitmap EXT2FS_ATTR ((unused)))966 static void rb_print_stats(ext2fs_generic_bitmap_64 bitmap EXT2FS_ATTR((unused)))
967 {
968 }
969 #endif
970
971 struct ext2_bitmap_ops ext2fs_blkmap64_rbtree = {
972 .type = EXT2FS_BMAP64_RBTREE,
973 .new_bmap = rb_new_bmap,
974 .free_bmap = rb_free_bmap,
975 .copy_bmap = rb_copy_bmap,
976 .resize_bmap = rb_resize_bmap,
977 .mark_bmap = rb_mark_bmap,
978 .unmark_bmap = rb_unmark_bmap,
979 .test_bmap = rb_test_bmap,
980 .test_clear_bmap_extent = rb_test_clear_bmap_extent,
981 .mark_bmap_extent = rb_mark_bmap_extent,
982 .unmark_bmap_extent = rb_unmark_bmap_extent,
983 .set_bmap_range = rb_set_bmap_range,
984 .get_bmap_range = rb_get_bmap_range,
985 .clear_bmap = rb_clear_bmap,
986 .print_stats = rb_print_stats,
987 .find_first_zero = rb_find_first_zero,
988 .find_first_set = rb_find_first_set,
989 };
990