1#!/usr/bin/env python3
2
3#
4# Copyright (C) 2018 The Android Open Source Project
5#
6# Licensed under the Apache License, Version 2.0 (the "License");
7# you may not use this file except in compliance with the License.
8# You may obtain a copy of the License at
9#
10#      http://www.apache.org/licenses/LICENSE-2.0
11#
12# Unless required by applicable law or agreed to in writing, software
13# distributed under the License is distributed on an "AS IS" BASIS,
14# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15# See the License for the specific language governing permissions and
16# limitations under the License.
17#
18
19"""This module implements a Android.bp parser."""
20
21import collections
22import glob
23import itertools
24import os
25import re
26import sys
27
28
29#------------------------------------------------------------------------------
30# Python 2 compatibility
31#------------------------------------------------------------------------------
32
33if sys.version_info >= (3,):
34    py3_chr = chr  # pylint: disable=invalid-name
35else:
36    def py3_chr(codepoint):
37        """Convert an integer character codepoint into a utf-8 string."""
38        return unichr(codepoint).encode('utf-8')
39
40try:
41    from enum import Enum
42except ImportError:
43    class _Enum(object):  # pylint: disable=too-few-public-methods
44        """A name-value pair for each enumeration."""
45
46        __slot__ = ('name', 'value')
47
48
49        def __init__(self, name, value):
50            """Create a name-value pair."""
51            self.name = name
52            self.value = value
53
54
55        def __repr__(self):
56            """Return the name of the enumeration."""
57            return self.name
58
59
60    class _EnumMeta(type):  # pylint: disable=too-few-public-methods
61        """Metaclass for Enum base class."""
62
63        def __new__(mcs, name, bases, attrs):
64            """Collects enumerations from attributes of the derived classes."""
65            enums = []
66            new_attrs = {'_enums': enums}
67
68            for key, value in attrs.iteritems():
69                if key.startswith('_'):
70                    new_attrs[key] = value
71                else:
72                    item = _Enum(key, value)
73                    enums.append(item)
74                    new_attrs[key] = item
75
76            return type.__new__(mcs, name, bases, new_attrs)
77
78
79        def __iter__(cls):
80            """Iterate the list of enumerations."""
81            return iter(cls._enums)
82
83
84    class Enum(object):  # pylint: disable=too-few-public-methods
85        """Enum base class."""
86        __metaclass__ = _EnumMeta
87
88
89#------------------------------------------------------------------------------
90# Lexer
91#------------------------------------------------------------------------------
92
93class Token(Enum):  # pylint: disable=too-few-public-methods
94    """Token enumerations."""
95
96    EOF = 0
97
98    IDENT = 1
99    LPAREN = 2
100    RPAREN = 3
101    LBRACKET = 4
102    RBRACKET = 5
103    LBRACE = 6
104    RBRACE = 7
105    COLON = 8
106    ASSIGN = 9
107    ASSIGNPLUS = 10
108    PLUS = 11
109    COMMA = 12
110    STRING = 13
111    INTEGER = 14
112
113    COMMENT = 15
114    SPACE = 16
115
116
117class LexerError(ValueError):
118    """Lexer error exception class."""
119
120    def __init__(self, buf, pos, message):
121        """Create a lexer error exception object."""
122        super(LexerError, self).__init__(message)
123        self.message = message
124        self.line, self.column = Lexer.compute_line_column(buf, pos)
125
126
127    def __str__(self):
128        """Convert lexer error to string representation."""
129        return 'LexerError: {}:{}: {}'.format(
130            self.line, self.column, self.message)
131
132
133class Lexer(object):
134    """Lexer to tokenize the input string."""
135
136    def __init__(self, buf, offset=0):
137        """Tokenize the source code in buf starting from offset.
138
139        Args:
140            buf (string) The source code to be tokenized.
141            offset (int) The position to start.
142        """
143
144        self.buf = buf
145
146        self.start = None
147        self.end = offset
148        self.token = None
149        self.literal = None
150
151        self._next()
152
153
154    def consume(self, *tokens):
155        """Consume one or more token."""
156
157        for token in tokens:
158            if token == self.token:
159                self._next()
160            else:
161                raise LexerError(self.buf, self.start,
162                                 'unexpected token ' + self.token.name)
163
164
165    def _next(self):
166        """Read next non-comment non-space token."""
167
168        buf_len = len(self.buf)
169        while self.end < buf_len:
170            self.start = self.end
171            self.token, self.end, self.literal = self.lex(self.buf, self.start)
172            if self.token != Token.SPACE and self.token != Token.COMMENT:
173                return
174
175        self.start = self.end
176        self.token = Token.EOF
177        self.literal = None
178
179
180    @staticmethod
181    def compute_line_column(buf, pos):
182        """Compute the line number and the column number of a given position in
183        the buffer."""
184
185        prior = buf[0:pos]
186        newline_pos = prior.rfind('\n')
187        if newline_pos == -1:
188            return (1, pos + 1)
189        return (prior.count('\n') + 1, pos - newline_pos)
190
191
192    UNICODE_CHARS_PATTERN = re.compile('[^\\\\\\n"]+')
193
194
195    ESCAPE_CHAR_TABLE = {
196        'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t',
197        'v': '\v', '\\': '\\', '\'': '\'', '\"': '\"',
198    }
199
200
201    OCT_TABLE = {str(i) for i in range(8)}
202
203
204    @staticmethod
205    def decode_oct(buf, offset, start, end):
206        """Read characters from buf[start:end] and interpret them as an octal
207        integer."""
208
209        if end > len(buf):
210            raise LexerError(buf, offset, 'bad octal escape sequence')
211        try:
212            codepoint = int(buf[start:end], 8)
213        except ValueError:
214            raise LexerError(buf, offset, 'bad octal escape sequence')
215        if codepoint > 0xff:
216            raise LexerError(buf, offset, 'bad octal escape sequence')
217        return codepoint
218
219
220    @staticmethod
221    def decode_hex(buf, offset, start, end):
222        """Read characters from buf[start:end] and interpret them as a
223        hexadecimal integer."""
224
225        if end > len(buf):
226            raise LexerError(buf, offset, 'bad hex escape sequence')
227        try:
228            return int(buf[start:end], 16)
229        except ValueError:
230            raise LexerError(buf, offset, 'bad hex escape sequence')
231
232
233    @classmethod
234    def lex_interpreted_string(cls, buf, offset):
235        """Tokenize a golang interpreted string.
236
237        Args:
238            buf (str)    The source code buffer.
239            offset (int) The position to find a golang interpreted string
240                         literal.
241
242        Returns:
243            A tuple with the end of matched buffer and the interpreted string
244            literal.
245        """
246
247        buf_len = len(buf)
248        pos = offset + 1
249        literal = ''
250        while pos < buf_len:
251            # Match unicode characters
252            match = cls.UNICODE_CHARS_PATTERN.match(buf, pos)
253            if match:
254                literal += match.group(0)
255                pos = match.end()
256            # Read the next character
257            try:
258                char = buf[pos]
259            except IndexError:
260                raise LexerError(buf, pos,
261                                 'unclosed interpreted string literal')
262            if char == '\\':
263                # Escape sequences
264                try:
265                    char = buf[pos + 1]
266                except IndexError:
267                    raise LexerError(buf, pos, 'bad escape sequence')
268                if char in cls.OCT_TABLE:
269                    literal += chr(cls.decode_oct(buf, pos, pos + 1, pos + 4))
270                    pos += 4
271                elif char == 'x':
272                    literal += chr(cls.decode_hex(buf, pos, pos + 2, pos + 4))
273                    pos += 4
274                elif char == 'u':
275                    literal += py3_chr(
276                        cls.decode_hex(buf, pos, pos + 2, pos + 6))
277                    pos += 6
278                elif char == 'U':
279                    literal += py3_chr(
280                        cls.decode_hex(buf, pos, pos + 2, pos + 10))
281                    pos += 10
282                else:
283                    try:
284                        literal += cls.ESCAPE_CHAR_TABLE[char]
285                        pos += 2
286                    except KeyError:
287                        raise LexerError(buf, pos, 'bad escape sequence')
288                continue
289            if char == '"':
290                # End of string literal
291                return (pos + 1, literal)
292            raise LexerError(buf, pos, 'unclosed interpreted string literal')
293
294
295    @classmethod
296    def lex_string(cls, buf, offset):
297        """Tokenize a golang string literal.
298
299        Args:
300            buf (str)    The source code buffer.
301            offset (int) The position to find a golang string literal.
302
303        Returns:
304            A tuple with the end of matched buffer and the interpreted string
305            literal.
306        """
307
308        char = buf[offset]
309        if char == '`':
310            try:
311                end = buf.index('`', offset + 1)
312                return (end + 1, buf[offset + 1 : end])
313            except ValueError:
314                raise LexerError(buf, len(buf), 'unclosed raw string literal')
315        if char == '"':
316            return cls.lex_interpreted_string(buf, offset)
317        raise LexerError(buf, offset, 'no string literal start character')
318
319
320    LEXER_PATTERNS = (
321        (Token.IDENT, '[A-Za-z_][0-9A-Za-z_]*'),
322        (Token.LPAREN, '\\('),
323        (Token.RPAREN, '\\)'),
324        (Token.LBRACKET, '\\['),
325        (Token.RBRACKET, '\\]'),
326        (Token.LBRACE, '\\{'),
327        (Token.RBRACE, '\\}'),
328        (Token.COLON, ':'),
329        (Token.ASSIGN, '='),
330        (Token.ASSIGNPLUS, '\\+='),
331        (Token.PLUS, '\\+'),
332        (Token.COMMA, ','),
333        (Token.STRING, '["`]'),
334        (Token.INTEGER, '-{0,1}[0-9]+'),
335
336        (Token.COMMENT,
337         '/(?:(?:/[^\\n]*)|(?:\\*(?:(?:[^*]*)|(?:\\*+[^/*]))*\\*+/))'),
338        (Token.SPACE, '\\s+'),
339    )
340
341
342    LEXER_MATCHER = re.compile('|'.join(
343        '(' + pattern + ')' for _, pattern in LEXER_PATTERNS))
344
345
346    @classmethod
347    def lex(cls, buf, offset):
348        """Tokenize a token from buf[offset].
349
350        Args:
351            buf (string) The source code buffer.
352            offset (int) The position to find and tokenize a token.
353
354        Return:
355            A tuple with three elements.  The first element is the token id.
356            The second element is the end of the token.  The third element is
357            the value for strings or identifiers.
358        """
359
360        match = cls.LEXER_MATCHER.match(buf, offset)
361        if not match:
362            raise LexerError(buf, offset, 'unknown token')
363        token = cls.LEXER_PATTERNS[match.lastindex - 1][0]
364
365        if token == Token.STRING:
366            end, literal = cls.lex_string(buf, offset)
367        else:
368            end = match.end()
369            if token in {Token.IDENT, Token.INTEGER}:
370                literal = buf[offset:end]
371            else:
372                literal = None
373
374        return (token, end, literal)
375
376
377#------------------------------------------------------------------------------
378# AST
379#------------------------------------------------------------------------------
380
381class Expr(object):  # pylint: disable=too-few-public-methods
382    """Base class for all expressions."""
383
384    def eval(self, env):
385        """Evaluate the expression under an environment."""
386        raise NotImplementedError()
387
388
389class String(Expr, str):
390    """String constant literal."""
391
392    def eval(self, env):
393        """Evaluate the string expression under an environment."""
394        return self
395
396
397class Bool(Expr):  # pylint: disable=too-few-public-methods
398    """Boolean constant literal."""
399
400    __slots__ = ('value',)
401
402
403    def __init__(self, value):
404        """Create a boolean constant literal."""
405        self.value = value
406
407
408    def __repr__(self):
409        """Convert a boolean constant literal to string representation."""
410        return repr(self.value)
411
412
413    def __bool__(self):
414        """Convert boolean constant literal to Python bool type."""
415        return self.value
416
417    __nonzero__ = __bool__
418
419
420    def __eq__(self, rhs):
421        """Compare whether two instances are equal."""
422        return self.value == rhs.value
423
424
425    def __hash__(self):
426        """Compute the hashed value."""
427        return hash(self.value)
428
429
430    def eval(self, env):
431        """Evaluate the boolean expression under an environment."""
432        return self
433
434
435class Integer(Expr):  # pylint: disable=too-few-public-methods
436    """Integer constant literal."""
437
438    __slots__ = ('value',)
439
440
441    def __init__(self, value):
442        """Create an integer constant literal."""
443        self.value = value
444
445
446    def __repr__(self):
447        """Convert an integer constant literal to string representation."""
448        return repr(self.value)
449
450
451    def __bool__(self):
452        """Convert an integer constant literal to Python bool type."""
453        return bool(self.value)
454
455    __nonzero__ = __bool__
456
457
458    def __int__(self):
459        """Convert an integer constant literal to Python int type."""
460        return self.value
461
462
463    def __eq__(self, rhs):
464        """Compare whether two instances are equal."""
465        return self.value == rhs.value
466
467
468    def __hash__(self):
469        """Compute the hashed value."""
470        return hash(self.value)
471
472
473    def eval(self, env):
474        """Evaluate the integer expression under an environment."""
475        return self
476
477
478class VarRef(Expr):  # pylint: disable=too-few-public-methods
479    """A reference to a variable."""
480
481    def __init__(self, name, value):
482        """Create a variable reference with a name and the value under static
483        scoping."""
484        self.name = name
485        self.value = value
486
487
488    def __repr__(self):
489        """Convert a variable reference to string representation."""
490        return self.name
491
492
493    def eval(self, env):
494        """Evaluate the identifier under an environment."""
495        if self.value is None:
496            return env[self.name].eval(env)
497        return self.value.eval(env)
498
499
500class List(Expr, list):
501    """List expression."""
502
503    def eval(self, env):
504        """Evaluate list elements under an environment."""
505        return List(item.eval(env) for item in self)
506
507
508class Dict(Expr, collections.OrderedDict):
509    """Dictionary expression."""
510
511    def __repr__(self):
512        attrs = ', '.join(key + ': ' + repr(value)
513                          for key, value in self.items())
514        return '{' + attrs + '}'
515
516
517    def eval(self, env):
518        """Evaluate dictionary values under an environment."""
519        return Dict((key, value.eval(env)) for key, value in self.items())
520
521
522class Concat(Expr):  # pylint: disable=too-few-public-methods
523    """List/string/integer plus operator."""
524
525    __slots__ = ('lhs', 'rhs')
526
527
528    def __init__(self, lhs, rhs):
529        """Create a list/string/integer plus expression."""
530        self.lhs = lhs
531        self.rhs = rhs
532
533
534    def __repr__(self):
535        return '(' + repr(self.lhs) + ' + ' + repr(self.rhs) + ')'
536
537
538    def eval(self, env):
539        """Evaluate list/string/integer plus operator under an environment."""
540        lhs = self.lhs.eval(env)
541        rhs = self.rhs.eval(env)
542        if isinstance(lhs, List) and isinstance(rhs, List):
543            return List(itertools.chain(lhs, rhs))
544        if isinstance(lhs, String) and isinstance(rhs, String):
545            return String(lhs + rhs)
546        if isinstance(lhs, Integer) and isinstance(rhs, Integer):
547            return Integer(int(lhs) + int(rhs))
548        raise TypeError('bad plus operands')
549
550
551#------------------------------------------------------------------------------
552# Parser
553#------------------------------------------------------------------------------
554
555class ParseError(ValueError):
556    """Parser error exception class."""
557
558    def __init__(self, lexer, message):
559        """Create a parser error exception object."""
560        super(ParseError, self).__init__(message)
561        self.message = message
562        self.line, self.column = \
563            Lexer.compute_line_column(lexer.buf, lexer.start)
564
565
566    def __str__(self):
567        """Convert parser error to string representation."""
568        return 'ParseError: {}:{}: {}'.format(
569            self.line, self.column, self.message)
570
571
572class Parser(object):
573    """Parser to parse Android.bp files."""
574
575    def __init__(self, lexer, inherited_env=None):
576        """Initialize the parser with the lexer."""
577        self.lexer = lexer
578
579        self.var_defs = []
580        self.vars = {} if inherited_env is None else dict(inherited_env)
581        self.modules = []
582
583
584    def parse(self):
585        """Parse AST from tokens."""
586        lexer = self.lexer
587        while lexer.token != Token.EOF:
588            if lexer.token == Token.IDENT:
589                ident = self.parse_ident_lvalue()
590                if lexer.token in {Token.ASSIGN, Token.ASSIGNPLUS}:
591                    self.parse_assign(ident, lexer.token)
592                elif lexer.token in {Token.LBRACE, Token.LPAREN}:
593                    self.parse_module_definition(ident)
594                else:
595                    raise ParseError(lexer,
596                                     'unexpected token ' + lexer.token.name)
597            else:
598                raise ParseError(lexer, 'unexpected token ' + lexer.token.name)
599        lexer.consume(Token.EOF)
600
601
602    def create_var_ref(self, name):
603        """Create a variable reference."""
604        return VarRef(name, self.vars.get(name))
605
606
607    def define_var(self, name, value):
608        """Define a variable."""
609        self.var_defs.append((name, value))
610        self.vars[name] = value
611
612
613    def parse_assign(self, ident, assign_token):
614        """Parse an assignment statement."""
615        lexer = self.lexer
616        lexer.consume(assign_token)
617        value = self.parse_expression()
618        if assign_token == Token.ASSIGNPLUS:
619            value = Concat(self.create_var_ref(ident), value)
620        self.define_var(ident, value)
621
622
623    def parse_module_definition(self, module_ident):
624        """Parse a module definition."""
625        properties = self.parse_dict()
626        self.modules.append((module_ident, properties))
627
628
629    def parse_ident_lvalue(self):
630        """Parse an identifier as an l-value."""
631        ident = self.lexer.literal
632        self.lexer.consume(Token.IDENT)
633        return ident
634
635
636    def parse_ident_rvalue(self):
637        """Parse an identifier as a r-value.
638
639        Returns:
640            Returns VarRef if the literal is not 'true' nor 'false'.
641
642            Returns Bool(true/false) if the literal is either 'true' or 'false'.
643        """
644        lexer = self.lexer
645        if lexer.literal in {'true', 'false'}:
646            result = Bool(lexer.literal == 'true')
647        else:
648            result = self.create_var_ref(lexer.literal)
649        lexer.consume(Token.IDENT)
650        return result
651
652
653    def parse_string(self):
654        """Parse a string."""
655        lexer = self.lexer
656        string = String(lexer.literal)
657        lexer.consume(Token.STRING)
658        return string
659
660
661    def parse_integer(self):
662        """Parse an integer."""
663        lexer = self.lexer
664        integer = Integer(int(lexer.literal))
665        lexer.consume(Token.INTEGER)
666        return integer
667
668
669    def parse_operand(self):
670        """Parse an operand."""
671        lexer = self.lexer
672        token = lexer.token
673        if token == Token.STRING:
674            return self.parse_string()
675        if token == Token.IDENT:
676            return self.parse_ident_rvalue()
677        if token == Token.INTEGER:
678            return self.parse_integer()
679        if token == Token.LBRACKET:
680            return self.parse_list()
681        if token == Token.LBRACE:
682            return self.parse_dict()
683        if token == Token.LPAREN:
684            lexer.consume(Token.LPAREN)
685            operand = self.parse_expression()
686            lexer.consume(Token.RPAREN)
687            return operand
688        raise ParseError(lexer, 'unexpected token ' + token.name)
689
690
691    def parse_expression(self):
692        """Parse an expression."""
693        lexer = self.lexer
694        expr = self.parse_operand()
695        while lexer.token == Token.PLUS:
696            lexer.consume(Token.PLUS)
697            expr = Concat(expr, self.parse_operand())
698        return expr
699
700
701    def parse_list(self):
702        """Parse a list."""
703        result = List()
704        lexer = self.lexer
705        lexer.consume(Token.LBRACKET)
706        while lexer.token != Token.RBRACKET:
707            result.append(self.parse_expression())
708            if lexer.token == Token.COMMA:
709                lexer.consume(Token.COMMA)
710        lexer.consume(Token.RBRACKET)
711        return result
712
713
714    def parse_dict(self):
715        """Parse a dict."""
716        result = Dict()
717        lexer = self.lexer
718
719        is_func_syntax = lexer.token == Token.LPAREN
720        if is_func_syntax:
721            lexer.consume(Token.LPAREN)
722        else:
723            lexer.consume(Token.LBRACE)
724
725        while lexer.token != Token.RBRACE and lexer.token != Token.RPAREN:
726            if lexer.token != Token.IDENT:
727                raise ParseError(lexer, 'unexpected token ' + lexer.token.name)
728            key = self.parse_ident_lvalue()
729
730            if lexer.token == Token.ASSIGN:
731                lexer.consume(Token.ASSIGN)
732            else:
733                lexer.consume(Token.COLON)
734
735            value = self.parse_expression()
736            result[key] = value
737
738            if lexer.token == Token.COMMA:
739                lexer.consume(Token.COMMA)
740
741        if is_func_syntax:
742            lexer.consume(Token.RPAREN)
743        else:
744            lexer.consume(Token.RBRACE)
745
746        return result
747
748
749class RecursiveParser(object):
750    """This is a recursive parser which will parse blueprint files
751    recursively."""
752
753    def __init__(self):
754        self.visited = set()
755        self.modules = []
756
757
758    @staticmethod
759    def glob_sub_files(pattern, sub_file_name):
760        """List the sub file paths that match with the pattern with
761        wildcards."""
762
763        for path in glob.glob(pattern):
764            if os.path.isfile(path):
765                if os.path.basename(path) == sub_file_name:
766                    yield path
767            else:
768                sub_file_path = os.path.join(path, sub_file_name)
769                if os.path.isfile(sub_file_path):
770                    yield sub_file_path
771
772
773    @classmethod
774    def find_sub_files_from_env(cls, rootdir, env, use_subdirs,
775                                default_sub_name='Android.bp'):
776        """Find the sub files from the names specified in build, subdirs, and
777        optional_subdirs."""
778
779        subs = []
780
781        if 'build' in env:
782            subs.extend(os.path.join(rootdir, filename)
783                        for filename in env['build'].eval(env))
784        if use_subdirs:
785            sub_name = env['subname'] if 'subname' in env else default_sub_name
786
787            if 'subdirs' in env:
788                for path in env['subdirs'].eval(env):
789                    subs.extend(cls.glob_sub_files(os.path.join(rootdir, path),
790                                                   sub_name))
791            if 'optional_subdirs' in env:
792                for path in env['optional_subdirs'].eval(env):
793                    subs.extend(cls.glob_sub_files(os.path.join(rootdir, path),
794                                                   sub_name))
795        return subs
796
797
798    @staticmethod
799    def _read_file(path, env):
800        """Read a blueprint file and return modules and the environment."""
801        with open(path, 'r') as bp_file:
802            content = bp_file.read()
803        parser = Parser(Lexer(content), env)
804        parser.parse()
805        return (parser.modules, parser.vars)
806
807
808    def _parse_file(self, path, env, evaluate):
809        """Parse a blueprint file and append to self.modules."""
810        modules, sub_env = self._read_file(path, env)
811        if evaluate:
812            modules = [(ident, attrs.eval(env)) for ident, attrs in modules]
813        self.modules += modules
814        return sub_env
815
816
817    def _parse_file_recursive(self, path, env, evaluate, use_subdirs):
818        """Parse a blueprint file and recursively."""
819
820        self.visited.add(os.path.abspath(path))
821
822        sub_env = self._parse_file(path, env, evaluate)
823
824        rootdir = os.path.dirname(path)
825
826        sub_file_paths = self.find_sub_files_from_env(rootdir, sub_env,
827                                                      use_subdirs)
828
829        sub_env.pop('build', None)
830        sub_env.pop('subdirs', None)
831        sub_env.pop('optional_subdirs', None)
832
833        for sub_file_path in sub_file_paths:
834            if os.path.abspath(sub_file_path) not in self.visited:
835                self._parse_file_recursive(sub_file_path, sub_env, evaluate,
836                                           use_subdirs)
837        return sub_env
838
839
840    def _scan_and_parse_all_file_recursive(self, filename, path, env, evaluate):
841        """Scan all files with the specified name and parse them."""
842
843        rootdir = os.path.dirname(path)
844        envs = [(rootdir, env)]
845        assert env is not None
846
847        # Scan directories for all blueprint files
848        for basedir, dirnames, filenames in os.walk(rootdir):
849            # Drop irrelevant environments
850            while not basedir.startswith(envs[-1][0]):
851                envs.pop()
852
853            # Filter sub directories
854            new_dirnames = []
855            for name in dirnames:
856                if name in {'.git', '.repo'}:
857                    continue
858                if basedir == rootdir and name == 'out':
859                    continue
860                new_dirnames.append(name)
861            dirnames[:] = new_dirnames
862
863            # Parse blueprint files
864            if filename in filenames:
865                try:
866                    path = os.path.join(basedir, filename)
867                    sys.stdout.flush()
868                    sub_env = self._parse_file_recursive(path, envs[-1][1],
869                                                         evaluate, False)
870                    assert sub_env is not None
871                    envs.append((basedir, sub_env))
872                except IOError:
873                    pass
874
875
876    def parse_file(self, path, env=None, evaluate=True):
877        """Parse blueprint files recursively."""
878
879        if env is None:
880            env = {}
881
882        sub_env = self._read_file(path, env)[1]
883
884        if 'subdirs' in sub_env or 'optional_subdirs' in sub_env:
885            self._parse_file_recursive(path, env, evaluate, True)
886        else:
887            self._scan_and_parse_all_file_recursive('Android.bp', path, env,
888                                                    evaluate)
889
890
891#------------------------------------------------------------------------------
892# Transformation
893#------------------------------------------------------------------------------
894
895def _build_named_modules_dict(modules):
896    """Build a name-to-module dict."""
897    named_modules = {}
898    for i, (ident, attrs) in enumerate(modules):
899        name = attrs.get('name')
900        if name is not None:
901            named_modules[name] = [ident, attrs, i]
902    return named_modules
903
904
905def _po_sorted_modules(modules, named_modules):
906    """Sort modules in post order."""
907    modules = [(ident, attrs, i) for i, (ident, attrs) in enumerate(modules)]
908
909    # Build module dependency graph.
910    edges = {}
911    for ident, attrs, module_id in modules:
912        defaults = attrs.get('defaults')
913        if defaults:
914            edges[module_id] = set(
915                named_modules[default][2] for default in defaults)
916
917    # Traverse module graph in post order.
918    post_order = []
919    visited = set()
920
921    def _traverse(module_id):
922        visited.add(module_id)
923        for next_module_id in edges.get(module_id, []):
924            if next_module_id not in visited:
925                _traverse(next_module_id)
926        post_order.append(modules[module_id])
927
928    for module_id in range(len(modules)):
929        if module_id not in visited:
930            _traverse(module_id)
931
932    return post_order
933
934
935def evaluate_default(attrs, default_attrs):
936    """Add default attributes if the keys do not exist."""
937    for key, value in default_attrs.items():
938        if key not in attrs:
939            attrs[key] = value
940        else:
941            attrs_value = attrs[key]
942            if isinstance(value, Dict) and isinstance(attrs_value, Dict):
943                attrs[key] = evaluate_default(attrs_value, value)
944    return attrs
945
946
947def evaluate_defaults(modules):
948    """Add default attributes to all modules if the keys do not exist."""
949    named_modules = _build_named_modules_dict(modules)
950    for ident, attrs, i in _po_sorted_modules(modules, named_modules):
951        for default in attrs.get('defaults', []):
952            attrs = evaluate_default(attrs, named_modules[default][1])
953        modules[i] = (ident, attrs)
954    return modules
955