Home | History | Annotate | Download | only in fonts
      1 #!/usr/bin/env python
      2 
      3 import collections
      4 import copy
      5 import glob
      6 import itertools
      7 from os import path
      8 import sys
      9 from xml.etree import ElementTree
     10 
     11 from fontTools import ttLib
     12 
     13 EMOJI_VS = 0xFE0F
     14 
     15 LANG_TO_SCRIPT = {
     16     'as': 'Beng',
     17     'bn': 'Beng',
     18     'cy': 'Latn',
     19     'da': 'Latn',
     20     'de': 'Latn',
     21     'en': 'Latn',
     22     'es': 'Latn',
     23     'et': 'Latn',
     24     'eu': 'Latn',
     25     'fr': 'Latn',
     26     'ga': 'Latn',
     27     'gu': 'Gujr',
     28     'hi': 'Deva',
     29     'hr': 'Latn',
     30     'hu': 'Latn',
     31     'hy': 'Armn',
     32     'ja': 'Jpan',
     33     'kn': 'Knda',
     34     'ko': 'Kore',
     35     'ml': 'Mlym',
     36     'mn': 'Cyrl',
     37     'mr': 'Deva',
     38     'nb': 'Latn',
     39     'nn': 'Latn',
     40     'or': 'Orya',
     41     'pa': 'Guru',
     42     'pt': 'Latn',
     43     'sl': 'Latn',
     44     'ta': 'Taml',
     45     'te': 'Telu',
     46     'tk': 'Latn',
     47 }
     48 
     49 def lang_to_script(lang_code):
     50     lang = lang_code.lower()
     51     while lang not in LANG_TO_SCRIPT:
     52         hyphen_idx = lang.rfind('-')
     53         assert hyphen_idx != -1, (
     54             'We do not know what script the "%s" language is written in.'
     55             % lang_code)
     56         assumed_script = lang[hyphen_idx+1:]
     57         if len(assumed_script) == 4 and assumed_script.isalpha():
     58             # This is actually the script
     59             return assumed_script.title()
     60         lang = lang[:hyphen_idx]
     61     return LANG_TO_SCRIPT[lang]
     62 
     63 
     64 def printable(inp):
     65     if type(inp) is set:  # set of character sequences
     66         return '{' + ', '.join([printable(seq) for seq in inp]) + '}'
     67     if type(inp) is tuple:  # character sequence
     68         return '<' + (', '.join([printable(ch) for ch in inp])) + '>'
     69     else:  # single character
     70         return 'U+%04X' % inp
     71 
     72 
     73 def open_font(font):
     74     font_file, index = font
     75     font_path = path.join(_fonts_dir, font_file)
     76     if index is not None:
     77         return ttLib.TTFont(font_path, fontNumber=index)
     78     else:
     79         return ttLib.TTFont(font_path)
     80 
     81 
     82 def get_best_cmap(font):
     83     ttfont = open_font(font)
     84     all_unicode_cmap = None
     85     bmp_cmap = None
     86     for cmap in ttfont['cmap'].tables:
     87         specifier = (cmap.format, cmap.platformID, cmap.platEncID)
     88         if specifier == (4, 3, 1):
     89             assert bmp_cmap is None, 'More than one BMP cmap in %s' % (font, )
     90             bmp_cmap = cmap
     91         elif specifier == (12, 3, 10):
     92             assert all_unicode_cmap is None, (
     93                 'More than one UCS-4 cmap in %s' % (font, ))
     94             all_unicode_cmap = cmap
     95 
     96     return all_unicode_cmap.cmap if all_unicode_cmap else bmp_cmap.cmap
     97 
     98 
     99 def get_variation_sequences_cmap(font):
    100     ttfont = open_font(font)
    101     vs_cmap = None
    102     for cmap in ttfont['cmap'].tables:
    103         specifier = (cmap.format, cmap.platformID, cmap.platEncID)
    104         if specifier == (14, 0, 5):
    105             assert vs_cmap is None, 'More than one VS cmap in %s' % (font, )
    106             vs_cmap = cmap
    107     return vs_cmap
    108 
    109 
    110 def get_emoji_map(font):
    111     # Add normal characters
    112     emoji_map = copy.copy(get_best_cmap(font))
    113     reverse_cmap = {glyph: code for code, glyph in emoji_map.items()}
    114 
    115     # Add variation sequences
    116     vs_dict = get_variation_sequences_cmap(font).uvsDict
    117     for vs in vs_dict:
    118         for base, glyph in vs_dict[vs]:
    119             if glyph is None:
    120                 emoji_map[(base, vs)] = emoji_map[base]
    121             else:
    122                 emoji_map[(base, vs)] = glyph
    123 
    124     # Add GSUB rules
    125     ttfont = open_font(font)
    126     for lookup in ttfont['GSUB'].table.LookupList.Lookup:
    127         assert lookup.LookupType == 4, 'We only understand type 4 lookups'
    128         for subtable in lookup.SubTable:
    129             ligatures = subtable.ligatures
    130             for first_glyph in ligatures:
    131                 for ligature in ligatures[first_glyph]:
    132                     sequence = [first_glyph] + ligature.Component
    133                     sequence = [reverse_cmap[glyph] for glyph in sequence]
    134                     sequence = tuple(sequence)
    135                     # Make sure no starting subsequence of 'sequence' has been
    136                     # seen before.
    137                     for sub_len in range(2, len(sequence)+1):
    138                         subsequence = sequence[:sub_len]
    139                         assert subsequence not in emoji_map
    140                     emoji_map[sequence] = ligature.LigGlyph
    141 
    142     return emoji_map
    143 
    144 
    145 def assert_font_supports_any_of_chars(font, chars):
    146     best_cmap = get_best_cmap(font)
    147     for char in chars:
    148         if char in best_cmap:
    149             return
    150     sys.exit('None of characters in %s were found in %s' % (chars, font))
    151 
    152 
    153 def assert_font_supports_all_of_chars(font, chars):
    154     best_cmap = get_best_cmap(font)
    155     for char in chars:
    156         assert char in best_cmap, (
    157             'U+%04X was not found in %s' % (char, font))
    158 
    159 
    160 def assert_font_supports_none_of_chars(font, chars):
    161     best_cmap = get_best_cmap(font)
    162     for char in chars:
    163         assert char not in best_cmap, (
    164             'U+%04X was found in %s' % (char, font))
    165 
    166 
    167 def assert_font_supports_all_sequences(font, sequences):
    168     vs_dict = get_variation_sequences_cmap(font).uvsDict
    169     for base, vs in sorted(sequences):
    170         assert vs in vs_dict and (base, None) in vs_dict[vs], (
    171             '<U+%04X, U+%04X> was not found in %s' % (base, vs, font))
    172 
    173 
    174 def check_hyphens(hyphens_dir):
    175     # Find all the scripts that need automatic hyphenation
    176     scripts = set()
    177     for hyb_file in glob.iglob(path.join(hyphens_dir, '*.hyb')):
    178         hyb_file = path.basename(hyb_file)
    179         assert hyb_file.startswith('hyph-'), (
    180             'Unknown hyphenation file %s' % hyb_file)
    181         lang_code = hyb_file[hyb_file.index('-')+1:hyb_file.index('.')]
    182         scripts.add(lang_to_script(lang_code))
    183 
    184     HYPHENS = {0x002D, 0x2010}
    185     for script in scripts:
    186         fonts = _script_to_font_map[script]
    187         assert fonts, 'No fonts found for the "%s" script' % script
    188         for font in fonts:
    189             assert_font_supports_any_of_chars(font, HYPHENS)
    190 
    191 
    192 class FontRecord(object):
    193     def __init__(self, name, scripts, variant, weight, style, font):
    194         self.name = name
    195         self.scripts = scripts
    196         self.variant = variant
    197         self.weight = weight
    198         self.style = style
    199         self.font = font
    200 
    201 
    202 def parse_fonts_xml(fonts_xml_path):
    203     global _script_to_font_map, _fallback_chain
    204     _script_to_font_map = collections.defaultdict(set)
    205     _fallback_chain = []
    206     tree = ElementTree.parse(fonts_xml_path)
    207     for family in tree.findall('family'):
    208         name = family.get('name')
    209         variant = family.get('variant')
    210         langs = family.get('lang')
    211         if name:
    212             assert variant is None, (
    213                 'No variant expected for LGC font %s.' % name)
    214             assert langs is None, (
    215                 'No language expected for LGC fonts %s.' % name)
    216         else:
    217             assert variant in {None, 'elegant', 'compact'}, (
    218                 'Unexpected value for variant: %s' % variant)
    219 
    220         if langs:
    221             langs = langs.split()
    222             scripts = {lang_to_script(lang) for lang in langs}
    223         else:
    224             scripts = set()
    225 
    226         for child in family:
    227             assert child.tag == 'font', (
    228                 'Unknown tag <%s>' % child.tag)
    229             font_file = child.text
    230             weight = int(child.get('weight'))
    231             assert weight % 100 == 0, (
    232                 'Font weight "%d" is not a multiple of 100.' % weight)
    233 
    234             style = child.get('style')
    235             assert style in {'normal', 'italic'}, (
    236                 'Unknown style "%s"' % style)
    237 
    238             index = child.get('index')
    239             if index:
    240                 index = int(index)
    241 
    242             _fallback_chain.append(FontRecord(
    243                 name,
    244                 frozenset(scripts),
    245                 variant,
    246                 weight,
    247                 style,
    248                 (font_file, index)))
    249 
    250             if name: # non-empty names are used for default LGC fonts
    251                 map_scripts = {'Latn', 'Grek', 'Cyrl'}
    252             else:
    253                 map_scripts = scripts
    254             for script in map_scripts:
    255                 _script_to_font_map[script].add((font_file, index))
    256 
    257 
    258 def check_emoji_coverage(all_emoji, equivalent_emoji):
    259     emoji_fonts = [
    260         record.font for record in _fallback_chain
    261         if 'Zsye' in record.scripts]
    262     assert len(emoji_fonts) == 1, 'There are %d emoji fonts.' % len(emoji_fonts)
    263     emoji_font = emoji_fonts[0]
    264     coverage = get_emoji_map(emoji_font)
    265 
    266     for sequence in all_emoji:
    267         assert sequence in coverage, (
    268             '%s is not supported in the emoji font.' % printable(sequence))
    269 
    270     for sequence in coverage:
    271         if sequence in {0x0000, 0x000D, 0x0020}:
    272             # The font needs to support a few extra characters, which is OK
    273             continue
    274         assert sequence in all_emoji, (
    275             'Emoji font should not support %s.' % printable(sequence))
    276 
    277     for first, second in sorted(equivalent_emoji.items()):
    278         assert coverage[first] == coverage[second], (
    279             '%s and %s should map to the same glyph.' % (
    280                 printable(first),
    281                 printable(second)))
    282 
    283     for glyph in set(coverage.values()):
    284         maps_to_glyph = [seq for seq in coverage if coverage[seq] == glyph]
    285         if len(maps_to_glyph) > 1:
    286             # There are more than one sequences mapping to the same glyph. We
    287             # need to make sure they were expected to be equivalent.
    288             equivalent_seqs = set()
    289             for seq in maps_to_glyph:
    290                 equivalent_seq = seq
    291                 while equivalent_seq in equivalent_emoji:
    292                     equivalent_seq = equivalent_emoji[equivalent_seq]
    293                 equivalent_seqs.add(equivalent_seq)
    294             assert len(equivalent_seqs) == 1, (
    295                 'The sequences %s should not result in the same glyph %s' % (
    296                     printable(equivalent_seqs),
    297                     glyph))
    298 
    299 
    300 def check_emoji_defaults(default_emoji):
    301     missing_text_chars = _emoji_properties['Emoji'] - default_emoji
    302     emoji_font_seen = False
    303     for record in _fallback_chain:
    304         if 'Zsye' in record.scripts:
    305             emoji_font_seen = True
    306             # No need to check the emoji font
    307             continue
    308         # For later fonts, we only check them if they have a script
    309         # defined, since the defined script may get them to a higher
    310         # score even if they appear after the emoji font.
    311         if emoji_font_seen and not record.scripts:
    312             continue
    313 
    314         # Check default emoji-style characters
    315         assert_font_supports_none_of_chars(record.font, sorted(default_emoji))
    316 
    317         # Mark default text-style characters appearing in fonts above the emoji
    318         # font as seen
    319         if not emoji_font_seen:
    320             missing_text_chars -= set(get_best_cmap(record.font))
    321 
    322     # Noto does not have monochrome glyphs for Unicode 7.0 wingdings and
    323     # webdings yet.
    324     missing_text_chars -= _chars_by_age['7.0']
    325     # TODO: Remove these after b/26113320 is fixed
    326     missing_text_chars -= {
    327         0x263A, # WHITE SMILING FACE
    328         0x270C, # VICTORY HAND
    329         0x2744, # SNOWFLAKE
    330         0x2764, # HEAVY BLACK HEART
    331     }
    332     assert missing_text_chars == set(), (
    333         'Text style version of some emoji characters are missing: ' + repr(missing_text_chars))
    334 
    335 
    336 # Setting reverse to true returns a dictionary that maps the values to sets of
    337 # characters, useful for some binary properties. Otherwise, we get a
    338 # dictionary that maps characters to the property values, assuming there's only
    339 # one property in the file.
    340 def parse_unicode_datafile(file_path, reverse=False):
    341     if reverse:
    342         output_dict = collections.defaultdict(set)
    343     else:
    344         output_dict = {}
    345     with open(file_path) as datafile:
    346         for line in datafile:
    347             if '#' in line:
    348                 line = line[:line.index('#')]
    349             line = line.strip()
    350             if not line:
    351                 continue
    352 
    353             chars, prop = line.split(';')
    354             chars = chars.strip()
    355             prop = prop.strip()
    356 
    357             if ' ' in chars:  # character sequence
    358                 sequence = [int(ch, 16) for ch in chars.split(' ')]
    359                 additions = [tuple(sequence)]
    360             elif '..' in chars:  # character range
    361                 char_start, char_end = chars.split('..')
    362                 char_start = int(char_start, 16)
    363                 char_end = int(char_end, 16)
    364                 additions = xrange(char_start, char_end+1)
    365             else:  # singe character
    366                 additions = [int(chars, 16)]
    367             if reverse:
    368                 output_dict[prop].update(additions)
    369             else:
    370                 for addition in additions:
    371                     assert addition not in output_dict
    372                     output_dict[addition] = prop
    373     return output_dict
    374 
    375 
    376 def parse_standardized_variants(file_path):
    377     emoji_set = set()
    378     text_set = set()
    379     with open(file_path) as datafile:
    380         for line in datafile:
    381             if '#' in line:
    382                 line = line[:line.index('#')]
    383             line = line.strip()
    384             if not line:
    385                 continue
    386             sequence, description, _ = line.split(';')
    387             sequence = sequence.strip().split(' ')
    388             base = int(sequence[0], 16)
    389             vs = int(sequence[1], 16)
    390             description = description.strip()
    391             if description == 'text style':
    392                 text_set.add((base, vs))
    393             elif description == 'emoji style':
    394                 emoji_set.add((base, vs))
    395     return text_set, emoji_set
    396 
    397 
    398 def parse_ucd(ucd_path):
    399     global _emoji_properties, _chars_by_age
    400     global _text_variation_sequences, _emoji_variation_sequences
    401     global _emoji_sequences, _emoji_zwj_sequences
    402     _emoji_properties = parse_unicode_datafile(
    403         path.join(ucd_path, 'emoji-data.txt'), reverse=True)
    404     _chars_by_age = parse_unicode_datafile(
    405         path.join(ucd_path, 'DerivedAge.txt'), reverse=True)
    406     sequences = parse_standardized_variants(
    407         path.join(ucd_path, 'StandardizedVariants.txt'))
    408     _text_variation_sequences, _emoji_variation_sequences = sequences
    409     _emoji_sequences = parse_unicode_datafile(
    410         path.join(ucd_path, 'emoji-sequences.txt'))
    411     _emoji_zwj_sequences = parse_unicode_datafile(
    412         path.join(ucd_path, 'emoji-zwj-sequences.txt'))
    413 
    414     # filter modern pentathlon, as it seems likely to be removed from final spec
    415     # also filter rifle
    416     def is_excluded(n):
    417         return n in [0x1f93b, 0x1f946]
    418 
    419     def contains_excluded(t):
    420         if type(t) == int:
    421             return is_excluded(t)
    422         return any(is_excluded(cp) for cp in t)
    423 
    424     # filter modern pentathlon, as it seems likely to be removed from final spec
    425     _emoji_properties['Emoji'] = set(
    426         t for t in _emoji_properties['Emoji'] if not contains_excluded(t))
    427     _emoji_sequences = dict(
    428         (t, v) for (t, v) in _emoji_sequences.items() if not contains_excluded(t))
    429 
    430 def flag_sequence(territory_code):
    431     return tuple(0x1F1E6 + ord(ch) - ord('A') for ch in territory_code)
    432 
    433 
    434 UNSUPPORTED_FLAGS = frozenset({
    435     flag_sequence('BL'), flag_sequence('BQ'), flag_sequence('DG'),
    436     flag_sequence('EA'), flag_sequence('EH'), flag_sequence('FK'),
    437     flag_sequence('GF'), flag_sequence('GP'), flag_sequence('GS'),
    438     flag_sequence('MF'), flag_sequence('MQ'), flag_sequence('NC'),
    439     flag_sequence('PM'), flag_sequence('RE'), flag_sequence('TF'),
    440     flag_sequence('WF'), flag_sequence('XK'), flag_sequence('YT'),
    441 })
    442 
    443 EQUIVALENT_FLAGS = {
    444     flag_sequence('BV'): flag_sequence('NO'),
    445     flag_sequence('CP'): flag_sequence('FR'),
    446     flag_sequence('HM'): flag_sequence('AU'),
    447     flag_sequence('SJ'): flag_sequence('NO'),
    448     flag_sequence('UM'): flag_sequence('US'),
    449 }
    450 
    451 COMBINING_KEYCAP = 0x20E3
    452 
    453 LEGACY_ANDROID_EMOJI = {
    454     0xFE4E5: flag_sequence('JP'),
    455     0xFE4E6: flag_sequence('US'),
    456     0xFE4E7: flag_sequence('FR'),
    457     0xFE4E8: flag_sequence('DE'),
    458     0xFE4E9: flag_sequence('IT'),
    459     0xFE4EA: flag_sequence('GB'),
    460     0xFE4EB: flag_sequence('ES'),
    461     0xFE4EC: flag_sequence('RU'),
    462     0xFE4ED: flag_sequence('CN'),
    463     0xFE4EE: flag_sequence('KR'),
    464     0xFE82C: (ord('#'), COMBINING_KEYCAP),
    465     0xFE82E: (ord('1'), COMBINING_KEYCAP),
    466     0xFE82F: (ord('2'), COMBINING_KEYCAP),
    467     0xFE830: (ord('3'), COMBINING_KEYCAP),
    468     0xFE831: (ord('4'), COMBINING_KEYCAP),
    469     0xFE832: (ord('5'), COMBINING_KEYCAP),
    470     0xFE833: (ord('6'), COMBINING_KEYCAP),
    471     0xFE834: (ord('7'), COMBINING_KEYCAP),
    472     0xFE835: (ord('8'), COMBINING_KEYCAP),
    473     0xFE836: (ord('9'), COMBINING_KEYCAP),
    474     0xFE837: (ord('0'), COMBINING_KEYCAP),
    475 }
    476 
    477 ZWJ_IDENTICALS = {
    478     # KISS
    479     (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F48B, 0x200D, 0x1F468): 0x1F48F,
    480     # COUPLE WITH HEART
    481     (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F468): 0x1F491,
    482     # FAMILY
    483     (0x1F468, 0x200D, 0x1F469, 0x200D, 0x1F466): 0x1F46A,
    484 }
    485 
    486 def compute_expected_emoji():
    487     equivalent_emoji = {}
    488     sequence_pieces = set()
    489     all_sequences = set()
    490     all_sequences.update(_emoji_variation_sequences)
    491 
    492     for sequence in _emoji_sequences.keys():
    493         sequence = tuple(ch for ch in sequence if ch != EMOJI_VS)
    494         all_sequences.add(sequence)
    495         sequence_pieces.update(sequence)
    496 
    497     for sequence in _emoji_zwj_sequences.keys():
    498         sequence = tuple(ch for ch in sequence if ch != EMOJI_VS)
    499         all_sequences.add(sequence)
    500         sequence_pieces.update(sequence)
    501         # Add reverse of all emoji ZWJ sequences, which are added to the fonts
    502         # as a workaround to get the sequences work in RTL text.
    503         reversed_seq = tuple(reversed(sequence))
    504         all_sequences.add(reversed_seq)
    505         equivalent_emoji[reversed_seq] = sequence
    506 
    507     # Add all two-letter flag sequences, as even the unsupported ones should
    508     # resolve to a flag tofu.
    509     all_letters = [chr(code) for code in range(ord('A'), ord('Z')+1)]
    510     all_two_letter_codes = itertools.product(all_letters, repeat=2)
    511     all_flags = {flag_sequence(code) for code in all_two_letter_codes}
    512     all_sequences.update(all_flags)
    513     tofu_flags = UNSUPPORTED_FLAGS | (all_flags - set(_emoji_sequences.keys()))
    514 
    515     all_emoji = (
    516         _emoji_properties['Emoji'] |
    517         all_sequences |
    518         sequence_pieces |
    519         set(LEGACY_ANDROID_EMOJI.keys()))
    520     default_emoji = (
    521         _emoji_properties['Emoji_Presentation'] |
    522         all_sequences |
    523         set(LEGACY_ANDROID_EMOJI.keys()))
    524 
    525     first_tofu_flag = sorted(tofu_flags)[0]
    526     for flag in tofu_flags:
    527         if flag != first_tofu_flag:
    528             equivalent_emoji[flag] = first_tofu_flag
    529     equivalent_emoji.update(EQUIVALENT_FLAGS)
    530     equivalent_emoji.update(LEGACY_ANDROID_EMOJI)
    531     equivalent_emoji.update(ZWJ_IDENTICALS)
    532     for seq in _emoji_variation_sequences:
    533         equivalent_emoji[seq] = seq[0]
    534 
    535     return all_emoji, default_emoji, equivalent_emoji
    536 
    537 
    538 def main():
    539     target_out = sys.argv[1]
    540     global _fonts_dir
    541     _fonts_dir = path.join(target_out, 'fonts')
    542 
    543     fonts_xml_path = path.join(target_out, 'etc', 'fonts.xml')
    544     parse_fonts_xml(fonts_xml_path)
    545 
    546     hyphens_dir = path.join(target_out, 'usr', 'hyphen-data')
    547     check_hyphens(hyphens_dir)
    548 
    549     check_emoji = sys.argv[2]
    550     if check_emoji == 'true':
    551         ucd_path = sys.argv[3]
    552         parse_ucd(ucd_path)
    553         all_emoji, default_emoji, equivalent_emoji = compute_expected_emoji()
    554         check_emoji_coverage(all_emoji, equivalent_emoji)
    555         check_emoji_defaults(default_emoji)
    556 
    557 
    558 if __name__ == '__main__':
    559     main()
    560