1 /*
2  * Copyright 2018 Google Inc. All rights reserved.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 extern crate smallvec;
18 
19 use std::cmp::max;
20 use std::marker::PhantomData;
21 use std::ptr::write_bytes;
22 use std::slice::from_raw_parts;
23 
24 use endian_scalar::{emplace_scalar, read_scalar_at};
25 use primitives::*;
26 use push::{Push, PushAlignment};
27 use table::Table;
28 use vector::{SafeSliceAccess, Vector};
29 use vtable::{field_index_to_field_offset, VTable};
30 use vtable_writer::VTableWriter;
31 
32 pub const N_SMALLVEC_STRING_VECTOR_CAPACITY: usize = 16;
33 
34 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
35 struct FieldLoc {
36     off: UOffsetT,
37     id: VOffsetT,
38 }
39 
40 /// FlatBufferBuilder builds a FlatBuffer through manipulating its internal
41 /// state. It has an owned `Vec<u8>` that grows as needed (up to the hardcoded
42 /// limit of 2GiB, which is set by the FlatBuffers format).
43 #[derive(Clone, Debug, Eq, PartialEq)]
44 pub struct FlatBufferBuilder<'fbb> {
45     owned_buf: Vec<u8>,
46     head: usize,
47 
48     field_locs: Vec<FieldLoc>,
49     written_vtable_revpos: Vec<UOffsetT>,
50 
51     nested: bool,
52     finished: bool,
53 
54     min_align: usize,
55 
56     _phantom: PhantomData<&'fbb ()>,
57 }
58 
59 impl<'fbb> FlatBufferBuilder<'fbb> {
60     /// Create a FlatBufferBuilder that is ready for writing.
new() -> Self61     pub fn new() -> Self {
62         Self::new_with_capacity(0)
63     }
64 
65     /// Create a FlatBufferBuilder that is ready for writing, with a
66     /// ready-to-use capacity of the provided size.
67     ///
68     /// The maximum valid value is `FLATBUFFERS_MAX_BUFFER_SIZE`.
new_with_capacity(size: usize) -> Self69     pub fn new_with_capacity(size: usize) -> Self {
70         // we need to check the size here because we create the backing buffer
71         // directly, bypassing the typical way of using grow_owned_buf:
72         assert!(
73             size <= FLATBUFFERS_MAX_BUFFER_SIZE,
74             "cannot initialize buffer bigger than 2 gigabytes"
75         );
76 
77         FlatBufferBuilder {
78             owned_buf: vec![0u8; size],
79             head: size,
80 
81             field_locs: Vec::new(),
82             written_vtable_revpos: Vec::new(),
83 
84             nested: false,
85             finished: false,
86 
87             min_align: 0,
88 
89             _phantom: PhantomData,
90         }
91     }
92 
93     /// Reset the FlatBufferBuilder internal state. Use this method after a
94     /// call to a `finish` function in order to re-use a FlatBufferBuilder.
95     ///
96     /// This function is the only way to reset the `finished` state and start
97     /// again.
98     ///
99     /// If you are using a FlatBufferBuilder repeatedly, make sure to use this
100     /// function, because it re-uses the FlatBufferBuilder's existing
101     /// heap-allocated `Vec<u8>` internal buffer. This offers significant speed
102     /// improvements as compared to creating a new FlatBufferBuilder for every
103     /// new object.
reset(&mut self)104     pub fn reset(&mut self) {
105         // memset only the part of the buffer that could be dirty:
106         {
107             let to_clear = self.owned_buf.len() - self.head;
108             let ptr = (&mut self.owned_buf[self.head..]).as_mut_ptr();
109             unsafe {
110                 write_bytes(ptr, 0, to_clear);
111             }
112         }
113 
114         self.head = self.owned_buf.len();
115         self.written_vtable_revpos.clear();
116 
117         self.nested = false;
118         self.finished = false;
119 
120         self.min_align = 0;
121     }
122 
123     /// Destroy the FlatBufferBuilder, returning its internal byte vector
124     /// and the index into it that represents the start of valid data.
collapse(self) -> (Vec<u8>, usize)125     pub fn collapse(self) -> (Vec<u8>, usize) {
126         (self.owned_buf, self.head)
127     }
128 
129     /// Push a Push'able value onto the front of the in-progress data.
130     ///
131     /// This function uses traits to provide a unified API for writing
132     /// scalars, tables, vectors, and WIPOffsets.
133     #[inline]
push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output>134     pub fn push<P: Push>(&mut self, x: P) -> WIPOffset<P::Output> {
135         let sz = P::size();
136         self.align(sz, P::alignment());
137         self.make_space(sz);
138         {
139             let (dst, rest) = (&mut self.owned_buf[self.head..]).split_at_mut(sz);
140             x.push(dst, rest);
141         }
142         WIPOffset::new(self.used_space() as UOffsetT)
143     }
144 
145     /// Push a Push'able value onto the front of the in-progress data, and
146     /// store a reference to it in the in-progress vtable. If the value matches
147     /// the default, then this is a no-op.
148     #[inline]
push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X)149     pub fn push_slot<X: Push + PartialEq>(&mut self, slotoff: VOffsetT, x: X, default: X) {
150         self.assert_nested("push_slot");
151         if x == default {
152             return;
153         }
154         self.push_slot_always(slotoff, x);
155     }
156 
157     /// Push a Push'able value onto the front of the in-progress data, and
158     /// store a reference to it in the in-progress vtable.
159     #[inline]
push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X)160     pub fn push_slot_always<X: Push>(&mut self, slotoff: VOffsetT, x: X) {
161         self.assert_nested("push_slot_always");
162         let off = self.push(x);
163         self.track_field(slotoff, off.value());
164     }
165 
166     /// Retrieve the number of vtables that have been serialized into the
167     /// FlatBuffer. This is primarily used to check vtable deduplication.
168     #[inline]
num_written_vtables(&self) -> usize169     pub fn num_written_vtables(&self) -> usize {
170         self.written_vtable_revpos.len()
171     }
172 
173     /// Start a Table write.
174     ///
175     /// Asserts that the builder is not in a nested state.
176     ///
177     /// Users probably want to use `push_slot` to add values after calling this.
178     #[inline]
start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset>179     pub fn start_table(&mut self) -> WIPOffset<TableUnfinishedWIPOffset> {
180         self.assert_not_nested(
181             "start_table can not be called when a table or vector is under construction",
182         );
183         self.nested = true;
184 
185         WIPOffset::new(self.used_space() as UOffsetT)
186     }
187 
188     /// End a Table write.
189     ///
190     /// Asserts that the builder is in a nested state.
191     #[inline]
end_table( &mut self, off: WIPOffset<TableUnfinishedWIPOffset>, ) -> WIPOffset<TableFinishedWIPOffset>192     pub fn end_table(
193         &mut self,
194         off: WIPOffset<TableUnfinishedWIPOffset>,
195     ) -> WIPOffset<TableFinishedWIPOffset> {
196         self.assert_nested("end_table");
197 
198         let o = self.write_vtable(off);
199 
200         self.nested = false;
201         self.field_locs.clear();
202 
203         WIPOffset::new(o.value())
204     }
205 
206     /// Start a Vector write.
207     ///
208     /// Asserts that the builder is not in a nested state.
209     ///
210     /// Most users will prefer to call `create_vector`.
211     /// Speed optimizing users who choose to create vectors manually using this
212     /// function will want to use `push` to add values.
213     #[inline]
start_vector<T: Push>(&mut self, num_items: usize)214     pub fn start_vector<T: Push>(&mut self, num_items: usize) {
215         self.assert_not_nested(
216             "start_vector can not be called when a table or vector is under construction",
217         );
218         self.nested = true;
219         self.align(num_items * T::size(), T::alignment().max_of(SIZE_UOFFSET));
220     }
221 
222     /// End a Vector write.
223     ///
224     /// Note that the `num_elems` parameter is the number of written items, not
225     /// the byte count.
226     ///
227     /// Asserts that the builder is in a nested state.
228     #[inline]
end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>>229     pub fn end_vector<T: Push>(&mut self, num_elems: usize) -> WIPOffset<Vector<'fbb, T>> {
230         self.assert_nested("end_vector");
231         self.nested = false;
232         let o = self.push::<UOffsetT>(num_elems as UOffsetT);
233         WIPOffset::new(o.value())
234     }
235 
236     /// Create a utf8 string.
237     ///
238     /// The wire format represents this as a zero-terminated byte vector.
239     #[inline]
create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str>240     pub fn create_string<'a: 'b, 'b>(&'a mut self, s: &'b str) -> WIPOffset<&'fbb str> {
241         self.assert_not_nested(
242             "create_string can not be called when a table or vector is under construction",
243         );
244         WIPOffset::new(self.create_byte_string(s.as_bytes()).value())
245     }
246 
247     /// Create a zero-terminated byte vector.
248     #[inline]
create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]>249     pub fn create_byte_string(&mut self, data: &[u8]) -> WIPOffset<&'fbb [u8]> {
250         self.assert_not_nested(
251             "create_byte_string can not be called when a table or vector is under construction",
252         );
253         self.align(data.len() + 1, PushAlignment::new(SIZE_UOFFSET));
254         self.push(0u8);
255         self.push_bytes_unprefixed(data);
256         self.push(data.len() as UOffsetT);
257         WIPOffset::new(self.used_space() as UOffsetT)
258     }
259 
260     /// Create a vector by memcpy'ing. This is much faster than calling
261     /// `create_vector`, but the underlying type must be represented as
262     /// little-endian on the host machine. This property is encoded in the
263     /// type system through the SafeSliceAccess trait. The following types are
264     /// always safe, on any platform: bool, u8, i8, and any
265     /// FlatBuffers-generated struct.
266     #[inline]
create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>( &'a mut self, items: &'b [T], ) -> WIPOffset<Vector<'fbb, T>>267     pub fn create_vector_direct<'a: 'b, 'b, T: SafeSliceAccess + Push + Sized + 'b>(
268         &'a mut self,
269         items: &'b [T],
270     ) -> WIPOffset<Vector<'fbb, T>> {
271         self.assert_not_nested(
272             "create_vector_direct can not be called when a table or vector is under construction",
273         );
274         let elem_size = T::size();
275         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
276 
277         let bytes = {
278             let ptr = items.as_ptr() as *const T as *const u8;
279             unsafe { from_raw_parts(ptr, items.len() * elem_size) }
280         };
281         self.push_bytes_unprefixed(bytes);
282         self.push(items.len() as UOffsetT);
283 
284         WIPOffset::new(self.used_space() as UOffsetT)
285     }
286 
287     /// Create a vector of strings.
288     ///
289     /// Speed-sensitive users may wish to reduce memory usage by creating the
290     /// vector manually: use `start_vector`, `push`, and `end_vector`.
291     #[inline]
create_vector_of_strings<'a, 'b>( &'a mut self, xs: &'b [&'b str], ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>>292     pub fn create_vector_of_strings<'a, 'b>(
293         &'a mut self,
294         xs: &'b [&'b str],
295     ) -> WIPOffset<Vector<'fbb, ForwardsUOffset<&'fbb str>>> {
296         self.assert_not_nested("create_vector_of_strings can not be called when a table or vector is under construction");
297         // internally, smallvec can be a stack-allocated or heap-allocated vector:
298         // if xs.len() > N_SMALLVEC_STRING_VECTOR_CAPACITY then it will overflow to the heap.
299         let mut offsets: smallvec::SmallVec<[WIPOffset<&str>; N_SMALLVEC_STRING_VECTOR_CAPACITY]> =
300             smallvec::SmallVec::with_capacity(xs.len());
301         unsafe {
302             offsets.set_len(xs.len());
303         }
304 
305         // note that this happens in reverse, because the buffer is built back-to-front:
306         for (i, &s) in xs.iter().enumerate().rev() {
307             let o = self.create_string(s);
308             offsets[i] = o;
309         }
310         self.create_vector(&offsets[..])
311     }
312 
313     /// Create a vector of Push-able objects.
314     ///
315     /// Speed-sensitive users may wish to reduce memory usage by creating the
316     /// vector manually: use `start_vector`, `push`, and `end_vector`.
317     #[inline]
create_vector<'a: 'b, 'b, T: Push + Copy + 'b>( &'a mut self, items: &'b [T], ) -> WIPOffset<Vector<'fbb, T::Output>>318     pub fn create_vector<'a: 'b, 'b, T: Push + Copy + 'b>(
319         &'a mut self,
320         items: &'b [T],
321     ) -> WIPOffset<Vector<'fbb, T::Output>> {
322         let elem_size = T::size();
323         self.align(items.len() * elem_size, T::alignment().max_of(SIZE_UOFFSET));
324         for i in (0..items.len()).rev() {
325             self.push(items[i]);
326         }
327         WIPOffset::new(self.push::<UOffsetT>(items.len() as UOffsetT).value())
328     }
329 
330     /// Get the byte slice for the data that has been written, regardless of
331     /// whether it has been finished.
332     #[inline]
unfinished_data(&self) -> &[u8]333     pub fn unfinished_data(&self) -> &[u8] {
334         &self.owned_buf[self.head..]
335     }
336     /// Get the byte slice for the data that has been written after a call to
337     /// one of the `finish` functions.
338     #[inline]
finished_data(&self) -> &[u8]339     pub fn finished_data(&self) -> &[u8] {
340         self.assert_finished("finished_bytes cannot be called when the buffer is not yet finished");
341         &self.owned_buf[self.head..]
342     }
343     /// Assert that a field is present in the just-finished Table.
344     ///
345     /// This is somewhat low-level and is mostly used by the generated code.
346     #[inline]
required( &self, tab_revloc: WIPOffset<TableFinishedWIPOffset>, slot_byte_loc: VOffsetT, assert_msg_name: &'static str, )347     pub fn required(
348         &self,
349         tab_revloc: WIPOffset<TableFinishedWIPOffset>,
350         slot_byte_loc: VOffsetT,
351         assert_msg_name: &'static str,
352     ) {
353         let idx = self.used_space() - tab_revloc.value() as usize;
354         let tab = Table::new(&self.owned_buf[self.head..], idx);
355         let o = tab.vtable().get(slot_byte_loc) as usize;
356         assert!(o != 0, "missing required field {}", assert_msg_name);
357     }
358 
359     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
360     /// identifier on to it, pushing a size prefix on to it, and marking the
361     /// internal state of the FlatBufferBuilder as `finished`. Afterwards,
362     /// users can call `finished_data` to get the resulting data.
363     #[inline]
finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)364     pub fn finish_size_prefixed<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
365         self.finish_with_opts(root, file_identifier, true);
366     }
367 
368     /// Finalize the FlatBuffer by: aligning it, pushing an optional file
369     /// identifier on to it, and marking the internal state of the
370     /// FlatBufferBuilder as `finished`. Afterwards, users can call
371     /// `finished_data` to get the resulting data.
372     #[inline]
finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>)373     pub fn finish<T>(&mut self, root: WIPOffset<T>, file_identifier: Option<&str>) {
374         self.finish_with_opts(root, file_identifier, false);
375     }
376 
377     /// Finalize the FlatBuffer by: aligning it and marking the internal state
378     /// of the FlatBufferBuilder as `finished`. Afterwards, users can call
379     /// `finished_data` to get the resulting data.
380     #[inline]
finish_minimal<T>(&mut self, root: WIPOffset<T>)381     pub fn finish_minimal<T>(&mut self, root: WIPOffset<T>) {
382         self.finish_with_opts(root, None, false);
383     }
384 
385     #[inline]
used_space(&self) -> usize386     fn used_space(&self) -> usize {
387         self.owned_buf.len() - self.head as usize
388     }
389 
390     #[inline]
track_field(&mut self, slot_off: VOffsetT, off: UOffsetT)391     fn track_field(&mut self, slot_off: VOffsetT, off: UOffsetT) {
392         let fl = FieldLoc { id: slot_off, off };
393         self.field_locs.push(fl);
394     }
395 
396     /// Write the VTable, if it is new.
write_vtable( &mut self, table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>, ) -> WIPOffset<VTableWIPOffset>397     fn write_vtable(
398         &mut self,
399         table_tail_revloc: WIPOffset<TableUnfinishedWIPOffset>,
400     ) -> WIPOffset<VTableWIPOffset> {
401         self.assert_nested("write_vtable");
402 
403         // Write the vtable offset, which is the start of any Table.
404         // We fill its value later.
405         let object_revloc_to_vtable: WIPOffset<VTableWIPOffset> =
406             WIPOffset::new(self.push::<UOffsetT>(0xF0F0_F0F0 as UOffsetT).value());
407 
408         // Layout of the data this function will create when a new vtable is
409         // needed.
410         // --------------------------------------------------------------------
411         // vtable starts here
412         // | x, x -- vtable len (bytes) [u16]
413         // | x, x -- object inline len (bytes) [u16]
414         // | x, x -- zero, or num bytes from start of object to field #0   [u16]
415         // | ...
416         // | x, x -- zero, or num bytes from start of object to field #n-1 [u16]
417         // vtable ends here
418         // table starts here
419         // | x, x, x, x -- offset (negative direction) to the vtable [i32]
420         // |               aka "vtableoffset"
421         // | -- table inline data begins here, we don't touch it --
422         // table ends here -- aka "table_start"
423         // --------------------------------------------------------------------
424         //
425         // Layout of the data this function will create when we re-use an
426         // existing vtable.
427         //
428         // We always serialize this particular vtable, then compare it to the
429         // other vtables we know about to see if there is a duplicate. If there
430         // is, then we erase the serialized vtable we just made.
431         // We serialize it first so that we are able to do byte-by-byte
432         // comparisons with already-serialized vtables. This 1) saves
433         // bookkeeping space (we only keep revlocs to existing vtables), 2)
434         // allows us to convert to little-endian once, then do
435         // fast memcmp comparisons, and 3) by ensuring we are comparing real
436         // serialized vtables, we can be more assured that we are doing the
437         // comparisons correctly.
438         //
439         // --------------------------------------------------------------------
440         // table starts here
441         // | x, x, x, x -- offset (negative direction) to an existing vtable [i32]
442         // |               aka "vtableoffset"
443         // | -- table inline data begins here, we don't touch it --
444         // table starts here: aka "table_start"
445         // --------------------------------------------------------------------
446 
447         // fill the WIP vtable with zeros:
448         let vtable_byte_len = get_vtable_byte_len(&self.field_locs);
449         self.make_space(vtable_byte_len);
450 
451         // compute the length of the table (not vtable!) in bytes:
452         let table_object_size = object_revloc_to_vtable.value() - table_tail_revloc.value();
453         debug_assert!(table_object_size < 0x10000); // vTable use 16bit offsets.
454 
455         // Write the VTable (we may delete it afterwards, if it is a duplicate):
456         let vt_start_pos = self.head;
457         let vt_end_pos = self.head + vtable_byte_len;
458         {
459             // write the vtable header:
460             let vtfw = &mut VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]);
461             vtfw.write_vtable_byte_length(vtable_byte_len as VOffsetT);
462             vtfw.write_object_inline_size(table_object_size as VOffsetT);
463 
464             // serialize every FieldLoc to the vtable:
465             for &fl in self.field_locs.iter() {
466                 let pos: VOffsetT = (object_revloc_to_vtable.value() - fl.off) as VOffsetT;
467                 debug_assert_eq!(
468                     vtfw.get_field_offset(fl.id),
469                     0,
470                     "tried to write a vtable field multiple times"
471                 );
472                 vtfw.write_field_offset(fl.id, pos);
473             }
474         }
475         let dup_vt_use = {
476             let this_vt = VTable::init(&self.owned_buf[..], self.head);
477             self.find_duplicate_stored_vtable_revloc(this_vt)
478         };
479 
480         let vt_use = match dup_vt_use {
481             Some(n) => {
482                 VTableWriter::init(&mut self.owned_buf[vt_start_pos..vt_end_pos]).clear();
483                 self.head += vtable_byte_len;
484                 n
485             }
486             None => {
487                 let new_vt_use = self.used_space() as UOffsetT;
488                 self.written_vtable_revpos.push(new_vt_use);
489                 new_vt_use
490             }
491         };
492 
493         {
494             let n = self.head + self.used_space() - object_revloc_to_vtable.value() as usize;
495             let saw = read_scalar_at::<UOffsetT>(&self.owned_buf, n);
496             debug_assert_eq!(saw, 0xF0F0_F0F0);
497             emplace_scalar::<SOffsetT>(
498                 &mut self.owned_buf[n..n + SIZE_SOFFSET],
499                 vt_use as SOffsetT - object_revloc_to_vtable.value() as SOffsetT,
500             );
501         }
502 
503         self.field_locs.clear();
504 
505         object_revloc_to_vtable
506     }
507 
508     #[inline]
find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT>509     fn find_duplicate_stored_vtable_revloc(&self, needle: VTable) -> Option<UOffsetT> {
510         for &revloc in self.written_vtable_revpos.iter().rev() {
511             let o = VTable::init(
512                 &self.owned_buf[..],
513                 self.head + self.used_space() - revloc as usize,
514             );
515             if needle == o {
516                 return Some(revloc);
517             }
518         }
519         None
520     }
521 
522     // Only call this when you know it is safe to double the size of the buffer.
523     #[inline]
grow_owned_buf(&mut self)524     fn grow_owned_buf(&mut self) {
525         let old_len = self.owned_buf.len();
526         let new_len = max(1, old_len * 2);
527 
528         let starting_active_size = self.used_space();
529 
530         let diff = new_len - old_len;
531         self.owned_buf.resize(new_len, 0);
532         self.head += diff;
533 
534         let ending_active_size = self.used_space();
535         debug_assert_eq!(starting_active_size, ending_active_size);
536 
537         if new_len == 1 {
538             return;
539         }
540 
541         // calculate the midpoint, and safely copy the old end data to the new
542         // end position:
543         let middle = new_len / 2;
544         {
545             let (left, right) = &mut self.owned_buf[..].split_at_mut(middle);
546             right.copy_from_slice(left);
547         }
548         // finally, zero out the old end data.
549         {
550             let ptr = (&mut self.owned_buf[..middle]).as_mut_ptr();
551             unsafe {
552                 write_bytes(ptr, 0, middle);
553             }
554         }
555     }
556 
557     // with or without a size prefix changes how we load the data, so finish*
558     // functions are split along those lines.
finish_with_opts<T>( &mut self, root: WIPOffset<T>, file_identifier: Option<&str>, size_prefixed: bool, )559     fn finish_with_opts<T>(
560         &mut self,
561         root: WIPOffset<T>,
562         file_identifier: Option<&str>,
563         size_prefixed: bool,
564     ) {
565         self.assert_not_finished("buffer cannot be finished when it is already finished");
566         self.assert_not_nested(
567             "buffer cannot be finished when a table or vector is under construction",
568         );
569         self.written_vtable_revpos.clear();
570 
571         let to_align = {
572             // for the root offset:
573             let a = SIZE_UOFFSET;
574             // for the size prefix:
575             let b = if size_prefixed { SIZE_UOFFSET } else { 0 };
576             // for the file identifier (a string that is not zero-terminated):
577             let c = if file_identifier.is_some() {
578                 FILE_IDENTIFIER_LENGTH
579             } else {
580                 0
581             };
582             a + b + c
583         };
584 
585         {
586             let ma = PushAlignment::new(self.min_align);
587             self.align(to_align, ma);
588         }
589 
590         if let Some(ident) = file_identifier {
591             debug_assert_eq!(ident.len(), FILE_IDENTIFIER_LENGTH);
592             self.push_bytes_unprefixed(ident.as_bytes());
593         }
594 
595         self.push(root);
596 
597         if size_prefixed {
598             let sz = self.used_space() as UOffsetT;
599             self.push::<UOffsetT>(sz);
600         }
601         self.finished = true;
602     }
603 
604     #[inline]
align(&mut self, len: usize, alignment: PushAlignment)605     fn align(&mut self, len: usize, alignment: PushAlignment) {
606         self.track_min_align(alignment.value());
607         let s = self.used_space() as usize;
608         self.make_space(padding_bytes(s + len, alignment.value()));
609     }
610 
611     #[inline]
track_min_align(&mut self, alignment: usize)612     fn track_min_align(&mut self, alignment: usize) {
613         self.min_align = max(self.min_align, alignment);
614     }
615 
616     #[inline]
push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT617     fn push_bytes_unprefixed(&mut self, x: &[u8]) -> UOffsetT {
618         let n = self.make_space(x.len());
619         self.owned_buf[n..n + x.len()].copy_from_slice(x);
620 
621         n as UOffsetT
622     }
623 
624     #[inline]
make_space(&mut self, want: usize) -> usize625     fn make_space(&mut self, want: usize) -> usize {
626         self.ensure_capacity(want);
627         self.head -= want;
628         self.head
629     }
630 
631     #[inline]
ensure_capacity(&mut self, want: usize) -> usize632     fn ensure_capacity(&mut self, want: usize) -> usize {
633         if self.unused_ready_space() >= want {
634             return want;
635         }
636         assert!(
637             want <= FLATBUFFERS_MAX_BUFFER_SIZE,
638             "cannot grow buffer beyond 2 gigabytes"
639         );
640 
641         while self.unused_ready_space() < want {
642             self.grow_owned_buf();
643         }
644         want
645     }
646     #[inline]
unused_ready_space(&self) -> usize647     fn unused_ready_space(&self) -> usize {
648         self.head
649     }
650     #[inline]
assert_nested(&self, fn_name: &'static str)651     fn assert_nested(&self, fn_name: &'static str) {
652         // we don't assert that self.field_locs.len() >0 because the vtable
653         // could be empty (e.g. for empty tables, or for all-default values).
654         debug_assert!(
655             self.nested,
656             format!(
657                 "incorrect FlatBufferBuilder usage: {} must be called while in a nested state",
658                 fn_name
659             )
660         );
661     }
662     #[inline]
assert_not_nested(&self, msg: &'static str)663     fn assert_not_nested(&self, msg: &'static str) {
664         debug_assert!(!self.nested, msg);
665     }
666     #[inline]
assert_finished(&self, msg: &'static str)667     fn assert_finished(&self, msg: &'static str) {
668         debug_assert!(self.finished, msg);
669     }
670     #[inline]
assert_not_finished(&self, msg: &'static str)671     fn assert_not_finished(&self, msg: &'static str) {
672         debug_assert!(!self.finished, msg);
673     }
674 }
675 
676 /// Compute the length of the vtable needed to represent the provided FieldLocs.
677 /// If there are no FieldLocs, then provide the minimum number of bytes
678 /// required: enough to write the VTable header.
679 #[inline]
get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize680 fn get_vtable_byte_len(field_locs: &[FieldLoc]) -> usize {
681     let max_voffset = field_locs.iter().map(|fl| fl.id).max();
682     match max_voffset {
683         None => field_index_to_field_offset(0) as usize,
684         Some(mv) => mv as usize + SIZE_VOFFSET,
685     }
686 }
687 
688 #[inline]
padding_bytes(buf_size: usize, scalar_size: usize) -> usize689 fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize {
690     // ((!buf_size) + 1) & (scalar_size - 1)
691     (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1))
692 }
693 
694 impl<'fbb> Default for FlatBufferBuilder<'fbb> {
default() -> Self695     fn default() -> Self {
696         Self::new_with_capacity(0)
697     }
698 }
699