1#!/usr/bin/env python
2
3import collections
4import copy
5import glob
6from os import path
7import sys
8from xml.etree import ElementTree
9
10from fontTools import ttLib
11
12EMOJI_VS = 0xFE0F
13
14LANG_TO_SCRIPT = {
15    'as': 'Beng',
16    'be': 'Cyrl',
17    'bg': 'Cyrl',
18    'bn': 'Beng',
19    'cu': 'Cyrl',
20    'cy': 'Latn',
21    'da': 'Latn',
22    'de': 'Latn',
23    'en': 'Latn',
24    'es': 'Latn',
25    'et': 'Latn',
26    'eu': 'Latn',
27    'fr': 'Latn',
28    'ga': 'Latn',
29    'gu': 'Gujr',
30    'hi': 'Deva',
31    'hr': 'Latn',
32    'hu': 'Latn',
33    'hy': 'Armn',
34    'ja': 'Jpan',
35    'kn': 'Knda',
36    'ko': 'Kore',
37    'la': 'Latn',
38    'ml': 'Mlym',
39    'mn': 'Cyrl',
40    'mr': 'Deva',
41    'nb': 'Latn',
42    'nn': 'Latn',
43    'or': 'Orya',
44    'pa': 'Guru',
45    'pt': 'Latn',
46    'sl': 'Latn',
47    'ta': 'Taml',
48    'te': 'Telu',
49    'tk': 'Latn',
50}
51
52def lang_to_script(lang_code):
53    lang = lang_code.lower()
54    while lang not in LANG_TO_SCRIPT:
55        hyphen_idx = lang.rfind('-')
56        assert hyphen_idx != -1, (
57            'We do not know what script the "%s" language is written in.'
58            % lang_code)
59        assumed_script = lang[hyphen_idx+1:]
60        if len(assumed_script) == 4 and assumed_script.isalpha():
61            # This is actually the script
62            return assumed_script.title()
63        lang = lang[:hyphen_idx]
64    return LANG_TO_SCRIPT[lang]
65
66
67def printable(inp):
68    if type(inp) is set:  # set of character sequences
69        return '{' + ', '.join([printable(seq) for seq in inp]) + '}'
70    if type(inp) is tuple:  # character sequence
71        return '<' + (', '.join([printable(ch) for ch in inp])) + '>'
72    else:  # single character
73        return 'U+%04X' % inp
74
75
76def open_font(font):
77    font_file, index = font
78    font_path = path.join(_fonts_dir, font_file)
79    if index is not None:
80        return ttLib.TTFont(font_path, fontNumber=index)
81    else:
82        return ttLib.TTFont(font_path)
83
84
85def get_best_cmap(font):
86    ttfont = open_font(font)
87    all_unicode_cmap = None
88    bmp_cmap = None
89    for cmap in ttfont['cmap'].tables:
90        specifier = (cmap.format, cmap.platformID, cmap.platEncID)
91        if specifier == (4, 3, 1):
92            assert bmp_cmap is None, 'More than one BMP cmap in %s' % (font, )
93            bmp_cmap = cmap
94        elif specifier == (12, 3, 10):
95            assert all_unicode_cmap is None, (
96                'More than one UCS-4 cmap in %s' % (font, ))
97            all_unicode_cmap = cmap
98
99    return all_unicode_cmap.cmap if all_unicode_cmap else bmp_cmap.cmap
100
101
102def get_variation_sequences_cmap(font):
103    ttfont = open_font(font)
104    vs_cmap = None
105    for cmap in ttfont['cmap'].tables:
106        specifier = (cmap.format, cmap.platformID, cmap.platEncID)
107        if specifier == (14, 0, 5):
108            assert vs_cmap is None, 'More than one VS cmap in %s' % (font, )
109            vs_cmap = cmap
110    return vs_cmap
111
112
113def get_emoji_map(font):
114    # Add normal characters
115    emoji_map = copy.copy(get_best_cmap(font))
116    reverse_cmap = {glyph: code for code, glyph in emoji_map.items()}
117
118    # Add variation sequences
119    vs_dict = get_variation_sequences_cmap(font).uvsDict
120    for vs in vs_dict:
121        for base, glyph in vs_dict[vs]:
122            if glyph is None:
123                emoji_map[(base, vs)] = emoji_map[base]
124            else:
125                emoji_map[(base, vs)] = glyph
126
127    # Add GSUB rules
128    ttfont = open_font(font)
129    for lookup in ttfont['GSUB'].table.LookupList.Lookup:
130        if lookup.LookupType != 4:
131            # Other lookups are used in the emoji font for fallback.
132            # We ignore them for now.
133            continue
134        for subtable in lookup.SubTable:
135            ligatures = subtable.ligatures
136            for first_glyph in ligatures:
137                for ligature in ligatures[first_glyph]:
138                    sequence = [first_glyph] + ligature.Component
139                    sequence = [reverse_cmap[glyph] for glyph in sequence]
140                    sequence = tuple(sequence)
141                    # Make sure no starting subsequence of 'sequence' has been
142                    # seen before.
143                    for sub_len in range(2, len(sequence)+1):
144                        subsequence = sequence[:sub_len]
145                        assert subsequence not in emoji_map
146                    emoji_map[sequence] = ligature.LigGlyph
147
148    return emoji_map
149
150
151def assert_font_supports_any_of_chars(font, chars):
152    best_cmap = get_best_cmap(font)
153    for char in chars:
154        if char in best_cmap:
155            return
156    sys.exit('None of characters in %s were found in %s' % (chars, font))
157
158
159def assert_font_supports_all_of_chars(font, chars):
160    best_cmap = get_best_cmap(font)
161    for char in chars:
162        assert char in best_cmap, (
163            'U+%04X was not found in %s' % (char, font))
164
165
166def assert_font_supports_none_of_chars(font, chars, fallbackName):
167    best_cmap = get_best_cmap(font)
168    for char in chars:
169        if fallbackName:
170            assert char not in best_cmap, 'U+%04X was found in %s' % (char, font)
171        else:
172            assert char not in best_cmap, (
173                'U+%04X was found in %s in fallback %s' % (char, font, fallbackName))
174
175
176def assert_font_supports_all_sequences(font, sequences):
177    vs_dict = get_variation_sequences_cmap(font).uvsDict
178    for base, vs in sorted(sequences):
179        assert vs in vs_dict and (base, None) in vs_dict[vs], (
180            '<U+%04X, U+%04X> was not found in %s' % (base, vs, font))
181
182
183def check_hyphens(hyphens_dir):
184    # Find all the scripts that need automatic hyphenation
185    scripts = set()
186    for hyb_file in glob.iglob(path.join(hyphens_dir, '*.hyb')):
187        hyb_file = path.basename(hyb_file)
188        assert hyb_file.startswith('hyph-'), (
189            'Unknown hyphenation file %s' % hyb_file)
190        lang_code = hyb_file[hyb_file.index('-')+1:hyb_file.index('.')]
191        scripts.add(lang_to_script(lang_code))
192
193    HYPHENS = {0x002D, 0x2010}
194    for script in scripts:
195        fonts = _script_to_font_map[script]
196        assert fonts, 'No fonts found for the "%s" script' % script
197        for font in fonts:
198            assert_font_supports_any_of_chars(font, HYPHENS)
199
200
201class FontRecord(object):
202    def __init__(self, name, scripts, variant, weight, style, fallback_for, font):
203        self.name = name
204        self.scripts = scripts
205        self.variant = variant
206        self.weight = weight
207        self.style = style
208        self.fallback_for = fallback_for
209        self.font = font
210
211
212def parse_fonts_xml(fonts_xml_path):
213    global _script_to_font_map, _fallback_chains, _all_fonts
214    _script_to_font_map = collections.defaultdict(set)
215    _fallback_chains = {}
216    _all_fonts = []
217    tree = ElementTree.parse(fonts_xml_path)
218    families = tree.findall('family')
219    # Minikin supports up to 254 but users can place their own font at the first
220    # place. Thus, 253 is the maximum allowed number of font families in the
221    # default collection.
222    assert len(families) < 254, (
223        'System font collection can contains up to 253 font families.')
224    for family in families:
225        name = family.get('name')
226        variant = family.get('variant')
227        langs = family.get('lang')
228        if name:
229            assert variant is None, (
230                'No variant expected for LGC font %s.' % name)
231            assert langs is None, (
232                'No language expected for LGC fonts %s.' % name)
233            assert name not in _fallback_chains, 'Duplicated name entry %s' % name
234            _fallback_chains[name] = []
235        else:
236            assert variant in {None, 'elegant', 'compact'}, (
237                'Unexpected value for variant: %s' % variant)
238
239    for family in families:
240        name = family.get('name')
241        variant = family.get('variant')
242        langs = family.get('lang')
243
244        if langs:
245            langs = langs.split()
246            scripts = {lang_to_script(lang) for lang in langs}
247        else:
248            scripts = set()
249
250        for child in family:
251            assert child.tag == 'font', (
252                'Unknown tag <%s>' % child.tag)
253            font_file = child.text.rstrip()
254            weight = int(child.get('weight'))
255            assert weight % 100 == 0, (
256                'Font weight "%d" is not a multiple of 100.' % weight)
257
258            style = child.get('style')
259            assert style in {'normal', 'italic'}, (
260                'Unknown style "%s"' % style)
261
262            fallback_for = child.get('fallbackFor')
263
264            assert not name or not fallback_for, (
265                'name and fallbackFor cannot be present at the same time')
266            assert not fallback_for or fallback_for in _fallback_chains, (
267                'Unknown fallback name: %s' % fallback_for)
268
269            index = child.get('index')
270            if index:
271                index = int(index)
272
273            if not path.exists(path.join(_fonts_dir, font_file)):
274                continue # Missing font is a valid case. Just ignore the missing font files.
275
276            record = FontRecord(
277                name,
278                frozenset(scripts),
279                variant,
280                weight,
281                style,
282                fallback_for,
283                (font_file, index))
284
285            _all_fonts.append(record)
286
287            if not fallback_for:
288                if not name or name == 'sans-serif':
289                    for _, fallback in _fallback_chains.iteritems():
290                        fallback.append(record)
291                else:
292                    _fallback_chains[name].append(record)
293            else:
294                _fallback_chains[fallback_for].append(record)
295
296            if name: # non-empty names are used for default LGC fonts
297                map_scripts = {'Latn', 'Grek', 'Cyrl'}
298            else:
299                map_scripts = scripts
300            for script in map_scripts:
301                _script_to_font_map[script].add((font_file, index))
302
303
304def check_emoji_coverage(all_emoji, equivalent_emoji):
305    emoji_font = get_emoji_font()
306    check_emoji_font_coverage(emoji_font, all_emoji, equivalent_emoji)
307
308
309def get_emoji_font():
310    emoji_fonts = [
311        record.font for record in _all_fonts
312        if 'Zsye' in record.scripts]
313    assert len(emoji_fonts) == 1, 'There are %d emoji fonts.' % len(emoji_fonts)
314    return emoji_fonts[0]
315
316
317def check_emoji_font_coverage(emoji_font, all_emoji, equivalent_emoji):
318    coverage = get_emoji_map(emoji_font)
319    for sequence in all_emoji:
320        assert sequence in coverage, (
321            '%s is not supported in the emoji font.' % printable(sequence))
322
323    for sequence in coverage:
324        if sequence in {0x0000, 0x000D, 0x0020}:
325            # The font needs to support a few extra characters, which is OK
326            continue
327        assert sequence in all_emoji, (
328            'Emoji font should not support %s.' % printable(sequence))
329
330    for first, second in sorted(equivalent_emoji.items()):
331        assert coverage[first] == coverage[second], (
332            '%s and %s should map to the same glyph.' % (
333                printable(first),
334                printable(second)))
335
336    for glyph in set(coverage.values()):
337        maps_to_glyph = [seq for seq in coverage if coverage[seq] == glyph]
338        if len(maps_to_glyph) > 1:
339            # There are more than one sequences mapping to the same glyph. We
340            # need to make sure they were expected to be equivalent.
341            equivalent_seqs = set()
342            for seq in maps_to_glyph:
343                equivalent_seq = seq
344                while equivalent_seq in equivalent_emoji:
345                    equivalent_seq = equivalent_emoji[equivalent_seq]
346                equivalent_seqs.add(equivalent_seq)
347            assert len(equivalent_seqs) == 1, (
348                'The sequences %s should not result in the same glyph %s' % (
349                    printable(equivalent_seqs),
350                    glyph))
351
352
353def check_emoji_defaults(default_emoji):
354    missing_text_chars = _emoji_properties['Emoji'] - default_emoji
355    for name, fallback_chain in _fallback_chains.iteritems():
356        emoji_font_seen = False
357        for record in fallback_chain:
358            if 'Zsye' in record.scripts:
359                emoji_font_seen = True
360                # No need to check the emoji font
361                continue
362            # For later fonts, we only check them if they have a script
363            # defined, since the defined script may get them to a higher
364            # score even if they appear after the emoji font. However,
365            # we should skip checking the text symbols font, since
366            # symbol fonts should be able to override the emoji display
367            # style when 'Zsym' is explicitly specified by the user.
368            if emoji_font_seen and (not record.scripts or 'Zsym' in record.scripts):
369                continue
370
371            # Check default emoji-style characters
372            assert_font_supports_none_of_chars(record.font, sorted(default_emoji), name)
373
374            # Mark default text-style characters appearing in fonts above the emoji
375            # font as seen
376            if not emoji_font_seen:
377                missing_text_chars -= set(get_best_cmap(record.font))
378
379        # Noto does not have monochrome glyphs for Unicode 7.0 wingdings and
380        # webdings yet.
381        missing_text_chars -= _chars_by_age['7.0']
382        assert missing_text_chars == set(), (
383            'Text style version of some emoji characters are missing: ' +
384                repr(missing_text_chars))
385
386
387# Setting reverse to true returns a dictionary that maps the values to sets of
388# characters, useful for some binary properties. Otherwise, we get a
389# dictionary that maps characters to the property values, assuming there's only
390# one property in the file.
391def parse_unicode_datafile(file_path, reverse=False):
392    if reverse:
393        output_dict = collections.defaultdict(set)
394    else:
395        output_dict = {}
396    with open(file_path) as datafile:
397        for line in datafile:
398            if '#' in line:
399                line = line[:line.index('#')]
400            line = line.strip()
401            if not line:
402                continue
403
404            chars, prop = line.split(';')[:2]
405            chars = chars.strip()
406            prop = prop.strip()
407
408            if ' ' in chars:  # character sequence
409                sequence = [int(ch, 16) for ch in chars.split(' ')]
410                additions = [tuple(sequence)]
411            elif '..' in chars:  # character range
412                char_start, char_end = chars.split('..')
413                char_start = int(char_start, 16)
414                char_end = int(char_end, 16)
415                additions = xrange(char_start, char_end+1)
416            else:  # singe character
417                additions = [int(chars, 16)]
418            if reverse:
419                output_dict[prop].update(additions)
420            else:
421                for addition in additions:
422                    assert addition not in output_dict
423                    output_dict[addition] = prop
424    return output_dict
425
426
427def parse_emoji_variants(file_path):
428    emoji_set = set()
429    text_set = set()
430    with open(file_path) as datafile:
431        for line in datafile:
432            if '#' in line:
433                line = line[:line.index('#')]
434            line = line.strip()
435            if not line:
436                continue
437            sequence, description, _ = line.split(';')
438            sequence = sequence.strip().split(' ')
439            base = int(sequence[0], 16)
440            vs = int(sequence[1], 16)
441            description = description.strip()
442            if description == 'text style':
443                text_set.add((base, vs))
444            elif description == 'emoji style':
445                emoji_set.add((base, vs))
446    return text_set, emoji_set
447
448
449def parse_ucd(ucd_path):
450    global _emoji_properties, _chars_by_age
451    global _text_variation_sequences, _emoji_variation_sequences
452    global _emoji_sequences, _emoji_zwj_sequences
453    _emoji_properties = parse_unicode_datafile(
454        path.join(ucd_path, 'emoji-data.txt'), reverse=True)
455    emoji_properties_additions = parse_unicode_datafile(
456        path.join(ucd_path, 'additions', 'emoji-data.txt'), reverse=True)
457    for prop in emoji_properties_additions.keys():
458        _emoji_properties[prop].update(emoji_properties_additions[prop])
459
460    _chars_by_age = parse_unicode_datafile(
461        path.join(ucd_path, 'DerivedAge.txt'), reverse=True)
462    sequences = parse_emoji_variants(
463        path.join(ucd_path, 'emoji-variation-sequences.txt'))
464    _text_variation_sequences, _emoji_variation_sequences = sequences
465    _emoji_sequences = parse_unicode_datafile(
466        path.join(ucd_path, 'emoji-sequences.txt'))
467    _emoji_sequences.update(parse_unicode_datafile(
468        path.join(ucd_path, 'additions', 'emoji-sequences.txt')))
469    _emoji_zwj_sequences = parse_unicode_datafile(
470        path.join(ucd_path, 'emoji-zwj-sequences.txt'))
471    _emoji_zwj_sequences.update(parse_unicode_datafile(
472        path.join(ucd_path, 'additions', 'emoji-zwj-sequences.txt')))
473
474    exclusions = parse_unicode_datafile(path.join(ucd_path, 'additions', 'emoji-exclusions.txt'))
475    _emoji_sequences = remove_emoji_exclude(_emoji_sequences, exclusions)
476    _emoji_zwj_sequences = remove_emoji_exclude(_emoji_zwj_sequences, exclusions)
477    _emoji_variation_sequences = remove_emoji_variation_exclude(_emoji_variation_sequences, exclusions)
478    # Unicode 12.0 adds Basic_Emoji in emoji-sequences.txt. We ignore them here since we are already
479    # checking the emoji presentations with emoji-variation-sequences.txt.
480    # Please refer to http://unicode.org/reports/tr51/#def_basic_emoji_set .
481    _emoji_sequences = {k: v for k, v in _emoji_sequences.iteritems() if not v == 'Basic_Emoji' }
482
483
484def remove_emoji_variation_exclude(source, items):
485    return source.difference(items.keys())
486
487def remove_emoji_exclude(source, items):
488    return {k: v for k, v in source.items() if k not in items}
489
490def flag_sequence(territory_code):
491    return tuple(0x1F1E6 + ord(ch) - ord('A') for ch in territory_code)
492
493EQUIVALENT_FLAGS = {
494    flag_sequence('BV'): flag_sequence('NO'),
495    flag_sequence('CP'): flag_sequence('FR'),
496    flag_sequence('HM'): flag_sequence('AU'),
497    flag_sequence('SJ'): flag_sequence('NO'),
498    flag_sequence('UM'): flag_sequence('US'),
499}
500
501COMBINING_KEYCAP = 0x20E3
502
503LEGACY_ANDROID_EMOJI = {
504    0xFE4E5: flag_sequence('JP'),
505    0xFE4E6: flag_sequence('US'),
506    0xFE4E7: flag_sequence('FR'),
507    0xFE4E8: flag_sequence('DE'),
508    0xFE4E9: flag_sequence('IT'),
509    0xFE4EA: flag_sequence('GB'),
510    0xFE4EB: flag_sequence('ES'),
511    0xFE4EC: flag_sequence('RU'),
512    0xFE4ED: flag_sequence('CN'),
513    0xFE4EE: flag_sequence('KR'),
514    0xFE82C: (ord('#'), COMBINING_KEYCAP),
515    0xFE82E: (ord('1'), COMBINING_KEYCAP),
516    0xFE82F: (ord('2'), COMBINING_KEYCAP),
517    0xFE830: (ord('3'), COMBINING_KEYCAP),
518    0xFE831: (ord('4'), COMBINING_KEYCAP),
519    0xFE832: (ord('5'), COMBINING_KEYCAP),
520    0xFE833: (ord('6'), COMBINING_KEYCAP),
521    0xFE834: (ord('7'), COMBINING_KEYCAP),
522    0xFE835: (ord('8'), COMBINING_KEYCAP),
523    0xFE836: (ord('9'), COMBINING_KEYCAP),
524    0xFE837: (ord('0'), COMBINING_KEYCAP),
525}
526
527# This is used to define the emoji that should have the same glyph.
528# i.e. previously we had gender based Kiss (0x1F48F), which had the same glyph
529# with Kiss: Woman, Man (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F48B, 0x200D, 0x1F468)
530# in that case a valid row would be:
531# (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F48B, 0x200D, 0x1F468): 0x1F48F,
532ZWJ_IDENTICALS = {
533}
534
535SAME_FLAG_MAPPINGS = [
536    # Diego Garcia and British Indian Ocean Territory
537    ((0x1F1EE, 0x1F1F4), (0x1F1E9, 0x1F1EC)),
538    # St. Martin and France
539    ((0x1F1F2, 0x1F1EB), (0x1F1EB, 0x1F1F7)),
540    # Spain and Ceuta & Melilla
541    ((0x1F1EA, 0x1F1F8), (0x1F1EA, 0x1F1E6)),
542]
543
544ZWJ = 0x200D
545
546def is_fitzpatrick_modifier(cp):
547    return 0x1F3FB <= cp <= 0x1F3FF
548
549
550def reverse_emoji(seq):
551    rev = list(reversed(seq))
552    # if there are fitzpatrick modifiers in the sequence, keep them after
553    # the emoji they modify
554    for i in xrange(1, len(rev)):
555        if is_fitzpatrick_modifier(rev[i-1]):
556            rev[i], rev[i-1] = rev[i-1], rev[i]
557    return tuple(rev)
558
559
560def compute_expected_emoji():
561    equivalent_emoji = {}
562    sequence_pieces = set()
563    all_sequences = set()
564    all_sequences.update(_emoji_variation_sequences)
565
566    # add zwj sequences not in the current emoji-zwj-sequences.txt
567    adjusted_emoji_zwj_sequences = dict(_emoji_zwj_sequences)
568    adjusted_emoji_zwj_sequences.update(_emoji_zwj_sequences)
569
570    # Add empty flag tag sequence that is supported as fallback
571    _emoji_sequences[(0x1F3F4, 0xE007F)] = 'Emoji_Tag_Sequence'
572
573    for sequence in _emoji_sequences.keys():
574        sequence = tuple(ch for ch in sequence if ch != EMOJI_VS)
575        all_sequences.add(sequence)
576        sequence_pieces.update(sequence)
577        if _emoji_sequences.get(sequence, None) == 'Emoji_Tag_Sequence':
578            # Add reverse of all emoji ZWJ sequences, which are added to the
579            # fonts as a workaround to get the sequences work in RTL text.
580            # TODO: test if these are actually needed by Minikin/HarfBuzz.
581            reversed_seq = reverse_emoji(sequence)
582            all_sequences.add(reversed_seq)
583            equivalent_emoji[reversed_seq] = sequence
584
585    for sequence in adjusted_emoji_zwj_sequences.keys():
586        sequence = tuple(ch for ch in sequence if ch != EMOJI_VS)
587        all_sequences.add(sequence)
588        sequence_pieces.update(sequence)
589        # Add reverse of all emoji ZWJ sequences, which are added to the fonts
590        # as a workaround to get the sequences work in RTL text.
591        reversed_seq = reverse_emoji(sequence)
592        all_sequences.add(reversed_seq)
593        equivalent_emoji[reversed_seq] = sequence
594
595    for first, second in SAME_FLAG_MAPPINGS:
596        equivalent_emoji[first] = second
597
598    # Add all tag characters used in flags
599    sequence_pieces.update(range(0xE0030, 0xE0039 + 1))
600    sequence_pieces.update(range(0xE0061, 0xE007A + 1))
601
602    all_emoji = (
603        _emoji_properties['Emoji'] |
604        all_sequences |
605        sequence_pieces |
606        set(LEGACY_ANDROID_EMOJI.keys()))
607    default_emoji = (
608        _emoji_properties['Emoji_Presentation'] |
609        all_sequences |
610        set(LEGACY_ANDROID_EMOJI.keys()))
611
612    equivalent_emoji.update(EQUIVALENT_FLAGS)
613    equivalent_emoji.update(LEGACY_ANDROID_EMOJI)
614    equivalent_emoji.update(ZWJ_IDENTICALS)
615
616    for seq in _emoji_variation_sequences:
617        equivalent_emoji[seq] = seq[0]
618
619    return all_emoji, default_emoji, equivalent_emoji
620
621
622def check_compact_only_fallback():
623    for name, fallback_chain in _fallback_chains.iteritems():
624        for record in fallback_chain:
625            if record.variant == 'compact':
626                same_script_elegants = [x for x in fallback_chain
627                    if x.scripts == record.scripts and x.variant == 'elegant']
628                assert same_script_elegants, (
629                    '%s must be in elegant of %s as fallback of "%s" too' % (
630                    record.font, record.scripts, record.fallback_for),)
631
632
633def check_vertical_metrics():
634    for record in _all_fonts:
635        if record.name in ['sans-serif', 'sans-serif-condensed']:
636            font = open_font(record.font)
637            assert font['head'].yMax == 2163 and font['head'].yMin == -555, (
638                'yMax and yMin of %s do not match expected values.' % (
639                record.font,))
640
641        if record.name in ['sans-serif', 'sans-serif-condensed',
642                           'serif', 'monospace']:
643            font = open_font(record.font)
644            assert (font['hhea'].ascent == 1900 and
645                    font['hhea'].descent == -500), (
646                        'ascent and descent of %s do not match expected '
647                        'values.' % (record.font,))
648
649
650def check_cjk_punctuation():
651    cjk_scripts = {'Hans', 'Hant', 'Jpan', 'Kore'}
652    cjk_punctuation = range(0x3000, 0x301F + 1)
653    for name, fallback_chain in _fallback_chains.iteritems():
654        for record in fallback_chain:
655            if record.scripts.intersection(cjk_scripts):
656                # CJK font seen. Stop checking the rest of the fonts.
657                break
658            assert_font_supports_none_of_chars(record.font, cjk_punctuation, name)
659
660
661def main():
662    global _fonts_dir
663    target_out = sys.argv[1]
664    _fonts_dir = path.join(target_out, 'fonts')
665
666    fonts_xml_path = path.join(target_out, 'etc', 'fonts.xml')
667    parse_fonts_xml(fonts_xml_path)
668
669    check_compact_only_fallback()
670
671    check_vertical_metrics()
672
673    hyphens_dir = path.join(target_out, 'usr', 'hyphen-data')
674    check_hyphens(hyphens_dir)
675
676    check_cjk_punctuation()
677
678    check_emoji = sys.argv[2]
679    if check_emoji == 'true':
680        ucd_path = sys.argv[3]
681        parse_ucd(ucd_path)
682        all_emoji, default_emoji, equivalent_emoji = compute_expected_emoji()
683        check_emoji_coverage(all_emoji, equivalent_emoji)
684        check_emoji_defaults(default_emoji)
685
686
687if __name__ == '__main__':
688    main()
689