1 Star 0 Fork 12

李振华/nototools

forked from src-openEuler/nototools 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
nototools-python3.patch 181.85 KB
一键复制 编辑 原始数据 按行查看 历史
compile_success 提交于 2019-11-30 16:42 . add package init
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/add_vs_cmap.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/add_vs_cmap.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/add_vs_cmap.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/add_vs_cmap.py 2019-07-09 17:20:41.507204218 +0200
@@ -44,7 +44,7 @@
cmap_table = font_data.get_cmap(font)
emoji = set(cmap_table.keys()) & emoji_variants
if not emoji:
- print 'no emoji match those in %s' % font_name
+ print('no emoji match those in %s' % font_name)
return
uvs = VS_EMOJI if presentation == 'emoji' else VS_TEXT
cmap14 = _c_m_a_p.CmapSubtable.newSubtable(14)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: add_vs_cmap.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/android_patches.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/android_patches.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/android_patches.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/android_patches.py 2019-07-09 17:18:45.223412508 +0200
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
+# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
@@ -76,7 +76,7 @@
& coverage.character_set(lgc_font_file))
if chars_to_add:
- print 'patch hyphens', font_name
+ print('patch hyphens', font_name)
merger.merge_chars_from_bank(
path.join(srcdir, font_name),
path.join(srcdir, lgc_font_name),
@@ -86,9 +86,9 @@
if copy_unchanged:
shutil.copy2(
path.join(srcdir,font_name), path.join(dstdir, font_name))
- print '%s already has hyphens, copying' % font_name
+ print('%s already has hyphens, copying' % font_name)
else:
- print '%s already has hyphens' % font_name
+ print('%s already has hyphens' % font_name)
def _remove_cjk_emoji(cjk_font_names, srcdir, dstdir):
@@ -122,7 +122,7 @@
)
for font_name in cjk_font_names:
- print 'remove cjk emoji', font_name
+ print('remove cjk emoji', font_name)
_remove_from_cmap(
path.join(srcdir, font_name),
path.join(dstdir, font_name),
@@ -149,12 +149,12 @@
result to dstdir using the same name."""
if not path.isdir(srcdir):
- print '%s is not a directory' % srcdir
+ print('%s is not a directory' % srcdir)
return
ttc_files = [f for f in os.listdir(srcdir) if f.endswith('.ttc')]
if not ttc_files:
- print 'no .ttc file to patch in %s' % srcdir
+ print('no .ttc file to patch in %s' % srcdir)
return
tool_utils.ensure_dir_exists(dstdir)
@@ -284,7 +284,7 @@
lines.append('}\n')
with codecs.open(filename, 'w', 'UTF-8') as f:
f.write('\n'.join(lines))
- print 'wrote', filename
+ print('wrote', filename)
def subset_symbols(srcdir, dstdir):
@@ -339,7 +339,7 @@
target_coverage -= set(range(0x23BE, 0x23CC+1))
for font_file in glob.glob(path.join(srcdir, 'NotoSansSymbols-*.ttf')):
- print 'main subset', font_file
+ print('main subset', font_file)
out_file = path.join(
dstdir, path.basename(font_file)[:-4] + '-Subsetted.ttf')
subset.subset_font(font_file, out_file, include=target_coverage)
@@ -349,7 +349,7 @@
target_coverage = EMOJI | unicode_data.get_unicode_emoji_variants()
for font_file in glob.glob(path.join(srcdir, 'NotoSansSymbols-*.ttf')):
- print 'secondary subset', font_file
+ print('secondary subset', font_file)
out_file = path.join(
dstdir, path.basename(font_file)[:-4] + '-Subsetted2.ttf')
subset.subset_font(font_file, out_file, include=target_coverage)
@@ -360,10 +360,10 @@
# Leave alone OTF
for font_file in glob.glob(path.join(srcdir, '*.ttf')):
- print 'change post table to 3.0', font_file
+ print('change post table to 3.0', font_file)
out_file = path.join(dstdir, path.basename(font_file))
if path.isfile(out_file):
- print ' repatching', out_file
+ print(' repatching', out_file)
font_file = out_file
font = ttLib.TTFont(font_file)
font['post'].formatType = 3.0
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: android_patches.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/autofix_for_phase3.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/autofix_for_phase3.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/autofix_for_phase3.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/autofix_for_phase3.py 2019-07-09 17:20:20.707420340 +0200
@@ -87,16 +87,16 @@
# add '/' to distinguish between noto-fonts/ and noto-fonts-alpha/
for repo_tag in ['[fonts]', '[fonts_alpha]', '[source]']:
prefix = tool_utils.resolve_path(repo_tag) + '/'
- print 'trying prefix "%s"' % prefix
+ print('trying prefix "%s"' % prefix)
if all(tool_utils.resolve_path(f).startswith(prefix) for f in fonts):
return _get_fonts_repo_version_info(repo_tag)
# else report the first failure
for f in fonts:
if not tool_utils.resolve_path(f).startswith(prefix):
- print '# failed at "%s"' % tool_utils.resolve_path(f)
+ print('# failed at "%s"' % tool_utils.resolve_path(f))
break
- print 'no prefix succeeded'
+ print('no prefix succeeded')
return None
@@ -158,12 +158,12 @@
dst_dir = tool_utils.ensure_dir_exists(dst_dir)
font_names = sorted(_expand_font_names(font_names))
- print 'Processing %d fonts\n %s' % (
- len(font_names), '\n '.join(font_names[:5]) + '...')
+ print('Processing %d fonts\n %s' % (
+ len(font_names), '\n '.join(font_names[:5]) + '...'))
src_root = tool_utils.resolve_path(src_root)
- print 'Src root: %s' % src_root
- print 'Dest dir: %s' % dst_dir
+ print('Src root: %s' % src_root)
+ print('Dest dir: %s' % dst_dir)
if release_dir is None:
rel_dir = None
@@ -181,7 +181,7 @@
if not version_info:
raise Exception('could not compute version info from fonts')
- print 'Computed version_info: %s' % version_info
+ print('Computed version_info: %s' % version_info)
else:
_check_version_info(version_info)
@@ -189,7 +189,7 @@
_check_autohint(autohint)
if dry_run:
- print '*** dry run %s***' % ('(autohint) ' if autohint else '')
+ print('*** dry run %s***' % ('(autohint) ' if autohint else ''))
for f in font_names:
f = path.join(src_root, f)
fix_font(f, dst_dir, rel_dir, version, version_info, autohint, dry_run)
@@ -237,7 +237,7 @@
rversion = _extract_version(relfont) if relfont else None
if rversion:
- print 'Existing release version: %s' % rversion
+ print('Existing release version: %s' % rversion)
r_mm, r_is_phase2 = _version_str_to_mm(rversion)
mm, is_phase2 = _version_str_to_mm(version)
@@ -245,7 +245,7 @@
if nversion == 'keep':
if rversion is not None:
if r_is_phase2:
- print 'Warning, keeping phase 2 release version %s' % rversion
+ print('Warning, keeping phase 2 release version %s' % rversion)
return rversion
else:
n_mm, n_is_phase_2 = _version_str_to_mm(nversion)
@@ -309,11 +309,11 @@
def autohint_font(src, dst, script, dry_run):
code = _autohint_code(src, script)
if code == 'not-hinted':
- print 'Warning: no hinting information for %s, script %s' % (src, script)
+ print('Warning: no hinting information for %s, script %s' % (src, script))
return
if code == None:
- print 'Warning: unable to autohint %s' % src
+ print('Warning: unable to autohint %s' % src)
return
if code == 'no-script':
@@ -321,14 +321,14 @@
else:
args = ['ttfautohint', '-t', '-W', '-f', code, src, dst]
if dry_run:
- print 'dry run would autohint:\n "%s"' % ' '.join(args)
+ print('dry run would autohint:\n "%s"' % ' '.join(args))
return
hinted_dir = tool_utils.ensure_dir_exists(path.dirname(dst))
try:
subprocess.check_call(args)
except Exception as e:
- print '### failed to autohint %s' % src
+ print('### failed to autohint %s' % src)
# we failed to autohint, let's continue anyway
# however autohint will have left an empty file there, remove it.
try:
@@ -337,7 +337,7 @@
pass
- print 'wrote autohinted %s using %s' % (dst, code)
+ print('wrote autohinted %s using %s' % (dst, code))
def _alert(val_name, cur_val, new_val):
@@ -345,7 +345,7 @@
tmpl = 'update %s\n from: "%s"\n to: "%s"'
else:
tmpl = 'update %s\n from: %4d\n to: %4d'
- print tmpl % (val_name, cur_val, new_val)
+ print( tmpl % (val_name, cur_val, new_val))
def _alert_and_check(val_name, cur_val, expected_val, max_diff):
@@ -396,7 +396,7 @@
def fix_font(f, dst_dir, rel_dir, version, version_info, autohint, dry_run):
- print '\n-----\nfont:', f
+ print('\n-----\nfont:', f)
font = ttLib.TTFont(f)
relfont = _get_release_font(f, rel_dir)
@@ -419,7 +419,7 @@
expected_upem = 1000
upem = font['head'].unitsPerEm
if upem != expected_upem:
- print 'expected %d upem but got %d upem' % (expected_upem, upem)
+ print('expected %d upem but got %d upem' % (expected_upem, upem))
if _is_ui_metrics(f):
if upem == 2048:
@@ -450,10 +450,10 @@
fname = path.basename(f)
udst = path.join(dst_dir, 'unhinted', fname)
if dry_run:
- print 'dry run would write:\n "%s"' % udst
+ print('dry run would write:\n "%s"' % udst)
else:
font.save(udst)
- print 'wrote %s' % udst
+ print('wrote %s' % udst)
if autohint:
hdst = path.join(dst_dir, 'hinted', fname)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: autofix_for_phase3.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/autofix_for_release.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/autofix_for_release.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/autofix_for_release.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/autofix_for_release.py 2019-07-09 17:18:03.180849356 +0200
@@ -58,7 +58,7 @@
expected_font_revision = major_version+'.'+minor_version
if font_revision != expected_font_revision:
font['head'].fontRevision = float(expected_font_revision)
- print 'Fixed fontRevision to %s' % expected_font_revision
+ print('Fixed fontRevision to %s' % expected_font_revision)
return True
return False
@@ -68,7 +68,7 @@
"""Fix the fsType of the font."""
if font['OS/2'].fsType != 0:
font['OS/2'].fsType = 0
- print 'Updated fsType to 0'
+ print('Updated fsType to 0')
return True
return False
@@ -77,7 +77,7 @@
"""Fix the vendor ID of the font."""
if font['OS/2'].achVendID != 'GOOG':
font['OS/2'].achVendID = 'GOOG'
- print 'Changed font vendor ID to GOOG'
+ print('Changed font vendor ID to GOOG')
return True
return False
@@ -107,7 +107,7 @@
copyright_data = u'Copyright %s Google Inc. All Rights Reserved.' % year
if copyright_data != name_records[0]:
- print 'Updated copyright message to "%s"' % copyright_data
+ print('Updated copyright message to "%s"' % copyright_data)
font_data.set_name_record(font, 0, copyright_data)
modified = True
@@ -120,8 +120,8 @@
break
if record != name_records[name_id]:
font_data.set_name_record(font, name_id, record)
- print 'Updated name table record #%d from "%s" to "%s"' % (
- name_id, oldrecord, record)
+ print('Updated name table record #%d from "%s" to "%s"' % (
+ name_id, oldrecord, record))
modified = True
trademark_names = ['Noto', 'Arimo', 'Tinos', 'Cousine']
@@ -132,29 +132,29 @@
trademark_name = name
break
if not trademark_name:
- print 'no trademarked name in \'%s\'' % font_family
+ print('no trademarked name in \'%s\'' % font_family)
else:
trademark_line = TRADEMARK_TEMPLATE % trademark_name
if name_records[7] != trademark_line:
old_line = name_records[7]
font_data.set_name_record(font, 7, trademark_line)
modified = True
- print 'Updated name table record 7 from "%s" to "%s"' % (old_line, trademark_line)
+ print('Updated name table record 7 from "%s" to "%s"' % (old_line, trademark_line))
if name_records[11] != NOTO_URL:
font_data.set_name_record(font, 11, NOTO_URL)
modified = True
- print 'Updated name table record 11 to "%s"' % NOTO_URL
+ print('Updated name table record 11 to "%s"' % NOTO_URL)
if name_records[_LICENSE_ID] != _SIL_LICENSE:
font_data.set_name_record(font, _LICENSE_ID, _SIL_LICENSE)
modified = True
- print 'Updated license id'
+ print('Updated license id')
if name_records[_LICENSE_URL_ID] != _SIL_LICENSE_URL:
font_data.set_name_record(font, _LICENSE_URL_ID, _SIL_LICENSE_URL)
modified = True
- print 'Updated license url'
+ print('Updated license url')
# TODO: check preferred family/subfamily(16&17)
@@ -177,7 +177,7 @@
modified = True
if modified:
- print 'Fixed GDEF.AttachList'
+ print('Fixed GDEF.AttachList')
return modified
@@ -193,7 +193,7 @@
if glyph.program.bytecode:
glyph.program.bytecode = array.array('B')
modified = True
- print 'Dropped hints from glyph "%s"' % glyph_name
+ print('Dropped hints from glyph "%s"' % glyph_name)
return modified
@@ -203,7 +203,7 @@
for table in tables:
if table in font:
modified = True
- print 'Dropped table "%s"' % table
+ print('Dropped table "%s"' % table)
modified = True
del font[table]
return modified
@@ -249,8 +249,8 @@
old_bitmap_string = font_data.unicoderange_bitmap_to_string(os2_bitmap)
font_data.set_os2_unicoderange_bitmap(font, expected_bitmap)
bitmap_string = font_data.unicoderange_bitmap_to_string(expected_bitmap)
- print 'Change unicoderanges from:\n %s\nto:\n %s' % (
- old_bitmap_string, bitmap_string)
+ print('Change unicoderanges from:\n %s\nto:\n %s' % (
+ old_bitmap_string, bitmap_string))
return True
return False
@@ -259,17 +259,17 @@
modified = False
hhea_table = font["hhea"]
if hhea_table.lineGap != 0:
- print 'hhea lineGap was %s, setting to 0' % hhea_table.lineGap
+ print('hhea lineGap was %s, setting to 0' % hhea_table.lineGap)
hhea_table.lineGap = 0
modified = True
vhea_table = font.get("vhea")
if vhea_table and vhea_table.lineGap != 0:
- print 'vhea lineGap was %s, setting to 0' % vhea_table.lineGap
+ print('vhea lineGap was %s, setting to 0' % vhea_table.lineGap)
vhea_table.lineGap = 0
modified = True
os2_table = font["OS/2"]
if os2_table.sTypoLineGap != 0:
- print 'os/2 sTypoLineGap was %d, setting to 0' % os2_table.sTypoLineGap
+ print('os/2 sTypoLineGap was %d, setting to 0' % os2_table.sTypoLineGap)
os2_table.sTypoLineGap = 0
modified = True
return modified
@@ -282,7 +282,7 @@
src_file = os.path.join(src_root, file_path)
- print 'Font file: %s' % src_file
+ print('Font file: %s' % src_file)
font = ttLib.TTFont(src_file)
modified = False
@@ -305,11 +305,11 @@
fixed_path = fix_path(file_path, is_hinted)
if fixed_path != file_path:
- print 'changed file_path from "%s" to "%s"' % (file_path, fixed_path)
+ print('changed file_path from "%s" to "%s"' % (file_path, fixed_path))
modified = True
if not modified:
- print 'No modification necessary'
+ print('No modification necessary')
if modified or save_unmodified:
# wait until we need it before we create the dest directory
dst_file = os.path.join(dst_root, fixed_path)
@@ -317,7 +317,7 @@
if not path.isdir(dst_dir):
os.makedirs(dst_dir)
font.save(dst_file)
- print 'Wrote %s' % dst_file
+ print('Wrote %s' % dst_file)
def fix_fonts(src_root, dst_root, name_pat, save_unmodified):
@@ -353,17 +353,17 @@
if not args.src_root:
# not on command line and not in user's .notoconfig
- print 'no src root specified.'
+ print('no src root specified.')
return
src_root = path.expanduser(args.src_root)
if not path.isdir(src_root):
- print '%s does not exist or is not a directory' % src_root
+ print('%s does not exist or is not a directory' % src_root)
return
dst_root = path.expanduser(args.dst_root)
if not path.isdir(dst_root):
- print '%s does not exist or is not a directory' % dst_root
+ print('%s does not exist or is not a directory' % dst_root)
return
fix_fonts(src_root, dst_root, args.name_pat, args.save_unmodified)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: autofix_for_release.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/check_familyname_and_styles.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/check_familyname_and_styles.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/check_familyname_and_styles.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/check_familyname_and_styles.py 2019-07-09 17:10:55.065307030 +0200
@@ -90,9 +90,9 @@
def check_familyname(name, styles):
notofont = noto_fonts.get_noto_font('unhinted/' + name + '-Regular.ttf')
if not notofont:
- print 'Error: could not parse', name
+ print('Error: could not parse', name)
return False
- print name, noto_fonts.noto_font_to_wws_family_id(notofont), styles
+ print(name, noto_fonts.noto_font_to_wws_family_id(notofont), styles)
return True
@@ -156,7 +156,7 @@
f.write(allnames)
f.write('\n')
else:
- print allnames
+ print(allnames)
def main():
@@ -180,17 +180,17 @@
if args.check:
passed = check_familynames(args.familynamedata)
if not passed:
- print 'Check failed, some files had errors.'
+ print('Check failed, some files had errors.')
return
- print 'Check succeeded.'
+ print('Check succeeded.')
if args.write:
outfile = None if args.write == 'stdout' else args.write
if not outfile and args.check:
- print
+ print()
generate_filenames(args.familynamedata, outfile, args.sort)
if outfile:
- print 'Wrote', outfile
+ print('Wrote', outfile)
if __name__ == '__main__':
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: check_familyname_and_styles.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/cldr_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/cldr_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/cldr_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/cldr_data.py 2019-07-09 17:16:03.238095658 +0200
@@ -137,11 +137,11 @@
_LANG_TO_SCRIPTS[lang].add(script)
if langs_missing_likely_subtag_data:
- print 'cldr_data: %d keys not in likely subtags:' % len(
- langs_missing_likely_subtag_data)
+ print('cldr_data: %d keys not in likely subtags:' % len(
+ langs_missing_likely_subtag_data))
for k in sorted(langs_missing_likely_subtag_data):
- print ' ', k
- print 'cldr_data: defaulting script to Latn'
+ print(' ', k)
+ print('cldr_data: defaulting script to Latn')
# raise Exception('oops')
# Use likely subtag data mapping script to lang to extend lang_to_scripts.
@@ -155,8 +155,8 @@
lang = _LIKELY_SUBTAGS[und_scr][0]
if lang != 'und' and script not in _LANG_TO_SCRIPTS[lang]:
if _DEBUG:
- print 'lang to scripts missing script %s for %s (from %s)' % (
- script, lang, ', '.join(_LANG_TO_SCRIPTS[lang]))
+ print('lang to scripts missing script %s for %s (from %s)' % (
+ script, lang, ', '.join(_LANG_TO_SCRIPTS[lang])))
_LANG_TO_SCRIPTS[lang].add(script)
if _USE_EXTRA_LOCALE_DATA:
@@ -175,13 +175,13 @@
lang_scripts = _LANG_TO_SCRIPTS[lang]
if script not in lang_scripts:
if _DEBUG:
- print ('extra likely subtags lang %s has script %s but supplemental '
+ print(('extra likely subtags lang %s has script %s but supplemental '
'only has [%s]') % (
- lang, script, ', '.join(sorted(lang_scripts)))
+ lang, script, ', '.join(sorted(lang_scripts))))
if len(lang_scripts) == 1:
replacement = set([script])
if _DEBUG:
- print 'replacing %s with %s' % (lang_scripts, replacement)
+ print('replacing %s with %s' % (lang_scripts, replacement))
_LANG_TO_SCRIPTS[lang] = replacement
else:
_LANG_TO_SCRIPTS[lang].add(script)
@@ -189,8 +189,8 @@
# skip ZZ region
if region != 'ZZ' and lang_script not in _REGION_TO_LANG_SCRIPTS[region]:
if _DEBUG:
- print 'extra lang_script %s not in cldr for %s, adding' % (
- lang_script, region)
+ print('extra lang_script %s not in cldr for %s, adding' % (
+ lang_script, region))
_REGION_TO_LANG_SCRIPTS[region].add(lang_script)
_LANG_TO_REGIONS[lang].add(region)
@@ -265,7 +265,7 @@
m = LSRV_RE.match(lang_tag)
if not m:
if _DEBUG:
- print 'regex did not match locale \'%s\'' % loc_tag
+ print('regex did not match locale \'%s\'' % loc_tag)
return result
lang = m.group(1)
script = m.group(2)
@@ -291,7 +291,7 @@
break
if _DEBUG:
- print 'no likely subtag for %s' % lang_tag
+ print('no likely subtag for %s' % lang_tag)
tags = lang_tag.split('-')
return (tags[0], tags[1] if len(tags) > 1 else 'Zzzz',
tags[2] if len(tags) > 2 else 'ZZ')
@@ -321,7 +321,7 @@
return False
# we really should throw an exception
if _DEBUG:
- print 'No script metadata for %s' % script
+ print('No script metadata for %s' % script)
return False
@@ -467,7 +467,7 @@
except KeyError:
pass
if _DEBUG:
- print 'No English name for \'%s\'' % lang_scr
+ print('No English name for \'%s\'' % lang_scr)
return None
@@ -477,7 +477,7 @@
return _ENGLISH_TERRITORY_NAMES[region]
except KeyError:
if _DEBUG:
- print 'No English name for region %s' % region
+ print('No English name for region %s' % region)
return ''
@@ -582,7 +582,7 @@
if accept(s)]
exemplars.extend(unicode_set_string_to_list(tag.text))
except Exception as e:
- print 'failed parse of %s' % cldr_file_path
+ print('failed parse of %s' % cldr_file_path)
raise e
break
@@ -643,7 +643,7 @@
m = LSRV_RE.match(loc_tag)
if not m:
if _DEBUG:
- print 'regex did not match locale \'%s\'' % loc_tag
+ print('regex did not match locale \'%s\'' % loc_tag)
return None
lang = m.group(1)
script = m.group(2)
@@ -697,8 +697,7 @@
# population and converting the list to a tuple
for lang_scr, values in tmp_map.iteritems():
_lang_scr_to_lit_pops[lang_scr] = tuple(
- sorted(values, key=lambda (r, p): (-p, r)))
-
+ sorted(values, key=lambda x: (-x[1], x[0])))
def get_lang_scr_to_lit_pops():
"""Return a mapping from lang_scr to a list of tuples of region and
@@ -759,28 +758,28 @@
_USE_EXTRA_LOCALE_DATA = False
if args.region_to_lang != None:
- print 'region to lang+script'
+ print('region to lang+script')
regions = args.region_to_lang or sorted(known_regions())
for r in regions:
- print '%s (%s):' % (r, get_english_region_name(r))
+ print('%s (%s):' % (r, get_english_region_name(r)))
for ls in sorted(region_to_lang_scripts(r)):
- print ' %s' % ls
+ print(' %s' % ls)
if args.lang_to_region != None:
- print 'lang to region'
+ print('lang to region')
langs = args.lang_to_region or sorted(known_langs())
for l in langs:
- print '%s (%s):' % (l, get_english_language_name(l))
+ print('%s (%s):' % (l, get_english_language_name(l)))
for r in sorted(lang_to_regions(l)):
- print ' %s' % r
+ print(' %s' % r)
if args.lang_to_script != None:
- print 'lang to script'
+ print('lang to script')
langs = args.lang_to_script or sorted(known_langs())
for l in langs:
- print '%s (%s):' % (l, get_english_language_name(l))
+ print('%s (%s):' % (l, get_english_language_name(l)))
for s in sorted(lang_to_scripts(l)):
- print ' %s' % s
+ print(' %s' % s)
if __name__ == "__main__":
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: cldr_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/cmap_block_coverage.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/cmap_block_coverage.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/cmap_block_coverage.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/cmap_block_coverage.py 2019-07-09 17:09:04.418461664 +0200
@@ -85,8 +85,8 @@
initial_cp = start_cp
while num < details - 1 and num < defined_count:
if initial_cp in defined_cps:
- print '%13d %04x %s' % (
- num + 1, initial_cp, unicode_data.name(initial_cp, '(unnamed)'))
+ print('%13d %04x %s' % (
+ num + 1, initial_cp, unicode_data.name(initial_cp, '(unnamed)')))
num += 1
initial_cp += 1
if num < defined_count:
@@ -102,11 +102,11 @@
middle_cp = final_cp - 1
while middle_cp >= initial_cp:
if middle_cp in defined_cps:
- print '%13s' % '...'
+ print('%13s' % '...')
break
middle_cp -= 1
if final_name:
- print '%13d %04x %s' % (defined_count, final_cp, final_name)
+ print('%13d %04x %s' % (defined_count, final_cp, final_name))
def _is_empty_scripts(scripts):
return (not scripts
@@ -135,10 +135,10 @@
script_names = '(all)'
else:
script_names = _script_names(scripts)
- print '%13s %6d %3s in %3d %7s: %s' % (
+ print('%13s %6d %3s in %3d %7s: %s' % (
range_text, defined_count, 'cps' if defined_count != 1 else 'cp',
num_scripts, 'scripts' if num_scripts != 1 else 'script',
- script_names)
+ script_names))
if details > 0:
_list_details(start_cp, limit_cp, defined_cps, defined_count, details)
@@ -162,7 +162,7 @@
if block and block != 'No_Block':
if not (skip_empty and _is_empty_scripts(scripts)):
if not showed_block:
- print '...' if block == 'No_Block' else block
+ print('...') if block == 'No_Block' else block
showed_block = True
_list_range(
start_cp, cp, defined_cps, defined_count, scripts, all_scripts,
@@ -178,7 +178,7 @@
defined_count += 1
if not (skip_empty and _is_empty_scripts(scripts)):
if not showed_block:
- print '...' if block == 'No_Block' else block
+ print('...') if block == 'No_Block' else block
_list_range(
start_cp, limit, defined_cps, defined_count, scripts, all_scripts,
only_scripts, details)
@@ -186,13 +186,13 @@
def _summarize_block(block, block_count, defined_count, script_counts):
if block == 'No_Block':
- print '...'
+ print('...')
return
if block_count == defined_count:
- print '%s (%d cps)' % (block, defined_count)
+ print('%s (%d cps)' % (block, defined_count))
else:
- print '%s (%d of %d cps)' % (block, defined_count, block_count)
+ print('%s (%d of %d cps)' % (block, defined_count, block_count))
lower_limit = int(defined_count / 10)
groups = collections.defaultdict(list)
@@ -223,7 +223,7 @@
else:
count = '%d-%d' % (low, hi)
script_names = _script_names(scripts)
- print '%6s: %s' % (count, script_names)
+ print('%6s: %s' % (count, script_names))
def _summarize_blocks(start, limit, defined_cps, cp_to_scripts, all_scripts):
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: cmap_block_coverage.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/cmap_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/cmap_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/cmap_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/cmap_data.py 2019-07-09 17:07:47.021269334 +0200
@@ -222,13 +222,13 @@
'2,6.4',
])
cmapdata = CmapData(meta, table)
- print cmapdata
+ print(cmapdata)
xml_text = write_cmap_data(cmapdata)
newdata = read_cmap_data(xml_text)
- print newdata
+ print(newdata)
write_cmap_data_file(cmapdata, 'test_cmap_data.xml', pretty=True)
newdata = read_cmap_data_file('test_cmap_data.xml')
- print newdata
+ print(newdata)
if __name__ == "__main__":
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: cmap_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/collect_cldr_punct.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/collect_cldr_punct.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/collect_cldr_punct.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/collect_cldr_punct.py 2019-07-09 17:07:22.869521366 +0200
@@ -99,7 +99,7 @@
script = cldr_data.get_likely_script(filename)
if script == 'Zzzz':
if filename != 'root':
- print >> sys.stderr, 'no script for %s' % filename
+ sys.stderr.write('no script for %s\n' % filename)
else:
script_to_punct[script] |= punct
@@ -135,13 +135,13 @@
def _write_script_to_punct(script_to_punct):
- print 'SCRIPT_TO_PUNCT = {'
+ print('SCRIPT_TO_PUNCT = {')
for script in sorted(script_to_punct):
chars = script_to_punct[script]
int_chars = [ord(cp) for cp in chars]
- print ' # %s' % ('|'.join(sorted(chars)))
- print " '%s': '%s'," % (script, tool_utils.write_int_ranges(int_chars))
- print '}'
+ print(' # %s' % ('|'.join(sorted(chars))))
+ print(" '%s': '%s'," % (script, tool_utils.write_int_ranges(int_chars)))
+ print('}')
def main():
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: collect_cldr_punct.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/compare_cmap_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/compare_cmap_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/compare_cmap_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/compare_cmap_data.py 2019-07-09 17:06:19.111186700 +0200
@@ -106,8 +106,8 @@
if target_script not in base_map:
missing_scripts.append(target_script)
if missing_scripts:
- print 'Cannot compare %s and %s, %s not in cmap data.' % (
- base_script, target_script, ', '.join(missing_scripts))
+ print('Cannot compare %s and %s, %s not in cmap data.' % (
+ base_script, target_script, ', '.join(missing_scripts)))
return
base_data = base_map[base_script]
target_data = base_map[target_script]
@@ -139,10 +139,10 @@
def show_undefined(start, end):
if start >= 0:
if end > start:
- print ' %04x-%04x Zzzz <%d undefined>' % (
- start, end, end - start - 1)
+ print(' %04x-%04x Zzzz <%d undefined>' % (
+ start, end, end - start - 1))
else:
- print ' %04x Zzzz <1 undefined>' % start
+ print(' %04x Zzzz <1 undefined>' % start)
for cp in sorted(cps):
block = unicode_data.block(cp)
@@ -150,7 +150,7 @@
show_undefined(undefined_start, undefined_end)
undefined_start, undefined_end = -1, -1
if block != last_block:
- print ' %s' % block
+ print(' %s' % block)
last_block = block
script = unicode_data.script(cp)
if script == 'Zzzz':
@@ -178,14 +178,14 @@
else:
script_text = ', '.join(scripts)
extra = ' (font %s)' % script_text
- print ' %6s %4s %2s %3s %s%s%s' % (
+ print(' %6s %4s %2s %3s %s%s%s' % (
'%04x' % cp,
script,
unicode_data.category(cp),
unicode_data.age(cp),
unicode_data.name(cp, ''),
extensions,
- extra)
+ extra))
show_undefined(undefined_start, undefined_end)
@@ -195,16 +195,16 @@
def report_cps(label, cps, inverted=None):
if not cps:
return
- print ' %s (%d): %s' % (
- label, len(cps), lint_config.write_int_ranges(cps))
+ print(' %s (%d): %s' % (
+ label, len(cps), lint_config.write_int_ranges(cps)))
if detailed:
_print_detailed(cps, inverted)
if report_same:
- print label
+ print(label)
if added or removed or xadded or xremoved:
if not report_same:
- print label
+ print(label)
removed_to_fallback = removed & xadded if removed and xadded else None
if removed_to_fallback:
removed -= removed_to_fallback
@@ -235,8 +235,8 @@
base_title = title_from_metadata(base_cmap_data.meta)
target_title = title_from_metadata(target_cmap_data.meta)
- print 'base: %s' % base_title
- print 'target: %s' % target_title
+ print('base: %s' % base_title)
+ print('target: %s' % target_title)
for script in sorted(compare):
added, removed, xadded, xremoved = compare[script]
label = '%s # %s' % (script, base_map[script].name)
@@ -248,7 +248,7 @@
compare, base_cmap_data = compare_result
base_map = cmap_data.create_map_from_table(base_cmap_data.table)
title = title_from_metadata(base_cmap_data.meta)
- print 'data: %s' % title
+ print('data: %s' % title)
for t in sorted(compare):
added, removed, xadded, xremoved = compare[t]
base_script, target_script = t
@@ -313,7 +313,7 @@
if not args.target:
if not scripts or len(scripts) < 2:
- print 'Interscript comparison requires two or more scripts.'
+ print('Interscript comparison requires two or more scripts.')
return
result = compare_interscript_data_file(args.base, args.scripts, opts)
report_interscript_compare(result, detailed=args.detailed)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: compare_cmap_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/compare_fonts.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/compare_fonts.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/compare_fonts.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/compare_fonts.py 2019-07-09 17:03:38.868858885 +0200
@@ -82,10 +82,10 @@
failed = False
for test in test_list:
if test not in FontCompare.test_names:
- print 'unknown test: \'%s\'' % test
+ print('unknown test: \'%s\'' % test)
failed = True
if failed:
- print 'tests are: %s' % (','.join(sorted(FontCompare.test_names)))
+ print('tests are: %s' % (','.join(sorted(FontCompare.test_names))))
return None
return frozenset(test_list)
@@ -131,25 +131,25 @@
def _log(self, msg):
"""Write a message that should not go to config output."""
if not self.emit_config:
- print msg
+ print(msg)
def _logerr(self, msg):
"""Write an error that should not go to config output."""
# this is an error, but lint doesn't check for it, so no point in emitting a comment.
if not self.emit_config:
- print msg
+ print(msg)
def _err(self, msg):
"""Write a message that should go to config as a comment, or just be logged."""
if self.emit_config:
- print '# ' + msg
+ print('# ' + msg)
else:
- print msg
+ print(msg)
def _config(self, msg):
"""Write a message that should go to config."""
if self.emit_config:
- print msg
+ print(msg)
def _check_attribute(self, target_obj, test_obj, attr):
target_value = getattr(target_obj, attr)
@@ -238,9 +238,9 @@
for attr, test_val, target_val in sorted(failed_attrs):
if self.emit_config:
- print 'enable head/hhea/%s' % attr.lower()
+ print('enable head/hhea/%s' % attr.lower())
else:
- print 'font hhea %s was %d but target was %d' % (attr, test_val, target_val)
+ print('font hhea %s was %d but target was %d' % (attr, test_val, target_val))
def check_os2(self):
if self._skip('OS/2'):
@@ -260,9 +260,9 @@
for attr, test_val, target_val in sorted(failed_attrs):
if self.emit_config:
- print 'enable head/os2/%s' % attr_name_map[attr]
+ print('enable head/os2/%s' % attr_name_map[attr])
else:
- print 'font OS/2 %s was %d but target was %d' % (attr, test_val, target_val)
+ print('font OS/2 %s was %d but target was %d' % (attr, test_val, target_val))
def check_glyph_bounds(self):
# Don't compare the actual bounds, but whether they exceed the limits when the target
@@ -451,14 +451,14 @@
target = ttLib.TTFont(target_file)
test = ttLib.TTFont(test_file)
if reverse:
- print 'reversing comparison'
+ print('reversing comparison')
temp = target
target = test
test = temp
- print
+ print()
if not emit_config:
- print 'target is previous version' if incremental_version else 'target is reference font'
+ print('target is previous version' if incremental_version else 'target is reference font')
FontCompare(target, test, incremental_version, emit_config, ignored_cp, only_cp,
enabled_tests).check_all()
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: compare_fonts.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/compare_summary.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/compare_summary.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/compare_summary.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/compare_summary.py 2019-07-09 17:05:16.470840372 +0200
@@ -55,7 +55,7 @@
def print_keys(key_list):
for k in key_list:
- print ' ' + k
+ print(' ' + k)
def compare_table_info(base_info, target_info):
biggest_deltas = []
@@ -110,7 +110,7 @@
def print_difference(k, base_tuple, target_tuple, other_difference):
b_path, b_version, b_name, b_size, b_numglyphs, b_numchars, b_cmap, b_tableinfo = base_tuple
t_path, t_version, t_name, t_size, t_numglyphs, t_numchars, t_cmap, t_tableinfo = target_tuple
- print ' ' + k
+ print(' ' + k)
versions_differ = b_version != t_version
diff_list = []
if versions_differ:
@@ -118,10 +118,10 @@
msg = '(base is newer!)'
else:
msg = ''
- print ' version: %s vs %s %s' % (b_version, t_version, msg)
+ print(' version: %s vs %s %s' % (b_version, t_version, msg))
if b_name != t_name:
diff_list.append('name')
- print " name: '%s' vs '%s'" % (b_name, t_name)
+ print(" name: '%s' vs '%s'" % (b_name, t_name))
if b_size != t_size:
diff_list.append('size')
delta = int(t_size) - int(b_size)
@@ -129,11 +129,11 @@
msg = '%d byte%s smaller' % (-delta, '' if delta == -1 else 's')
else:
msg = '%d byte%s bigger' % (delta, '' if delta == 1 else 's')
- print ' size: %s vs %s (%s)' % (b_size, t_size, msg)
+ print(' size: %s vs %s (%s)' % (b_size, t_size, msg))
table_diffs = compare_table_info(b_tableinfo, t_tableinfo)
if table_diffs:
diff_list.append('table')
- print ' tables: %s' % table_diffs
+ print(' tables: %s' % table_diffs)
if b_numglyphs != t_numglyphs:
diff_list.append('glyph count')
delta = int(t_numglyphs) - int(b_numglyphs)
@@ -141,7 +141,7 @@
msg = '%d fewer glyph%s' % (-delta, '' if delta == -1 else 's')
else:
msg = '%d more glyph%s' % (delta, '' if delta == 1 else 's')
- print ' glyphs: %s vs %s (%s)' % (b_numglyphs, t_numglyphs, msg)
+ print(' glyphs: %s vs %s (%s)' % (b_numglyphs, t_numglyphs, msg))
if b_numchars != t_numchars:
diff_list.append('char count')
delta = int(t_numchars) - int(b_numchars)
@@ -149,20 +149,20 @@
msg = '%d fewer char%s' % (-delta, '' if delta == -1 else 's')
else:
msg = '%d more char%s' % (delta, '' if delta == 1 else 's')
- print ' chars: %s vs %s (%s)' % (b_numchars, t_numchars, msg)
+ print(' chars: %s vs %s (%s)' % (b_numchars, t_numchars, msg))
if b_cmap != t_cmap:
removed_from_base = b_cmap - t_cmap
if removed_from_base:
- print ' cmap removed: ' + noto_lint.printable_unicode_range(
- removed_from_base)
+ print(' cmap removed: ' + noto_lint.printable_unicode_range(
+ removed_from_base))
added_in_target = t_cmap - b_cmap
if added_in_target:
- print ' cmap added: ' + noto_lint.printable_unicode_range(
- added_in_target)
+ print(' cmap added: ' + noto_lint.printable_unicode_range(
+ added_in_target))
if diff_list and not versions_differ:
- print ' %s differs but revision number is the same' % ', '.join(diff_list)
+ print(' %s differs but revision number is the same' % ', '.join(diff_list))
if not diff_list and other_difference:
- print ' other difference'
+ print(' other difference')
def print_changed(key_list, base_map, target_map, comparefn):
for k in key_list:
@@ -195,16 +195,16 @@
def header_line(msg):
if have_output_hack[0]:
- print
+ print()
else:
have_output_hack[0] = True
if msg:
- print msg
+ print(msg)
if show_paths:
header_line(None)
- print 'base root: ' + base_root
- print 'target root: ' + target_root
+ print('base root: ' + base_root)
+ print('target root: ' + target_root)
if show_added and added:
header_line('added')
print_keys(added)
@@ -239,11 +239,11 @@
args.target_root = tool_utils.resolve_path(args.target_root)
if not os.path.isdir(args.base_root):
- print 'base_root %s does not exist or is not a directory' % args.base_root
+ print('base_root %s does not exist or is not a directory' % args.base_root)
return
if not os.path.isdir(args.target_root):
- print 'target_root %s does not exist or is not a directory' % args.target_root
+ print('target_root %s does not exist or is not a directory' % args.target_root)
return
comparefn = tuple_compare if args.compare_size else tuple_compare_no_size
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: compare_summary.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/coverage.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/coverage.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/coverage.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/coverage.py 2019-07-09 17:01:54.449948532 +0200
@@ -77,7 +77,7 @@
name = unicode_data.name(char)
except ValueError:
name = '<Unassigned>'
- print 'U+%04X %s' % (char, name)
+ print('U+%04X %s' % (char, name))
def _write_char_text(chars, filepath, chars_per_line, sep):
@@ -91,8 +91,8 @@
if m:
filename = m.group(1)
filename += '_chars.txt'
- print 'writing file: %s' % filename
- print '%d characters (of %d)' % (len(text), len(chars))
+ print('writing file: %s' % filename)
+ print('%d characters (of %d)' % (len(text), len(chars)))
if chars_per_line > 0:
lines = []
for n in range(0, len(text), chars_per_line):
@@ -108,7 +108,7 @@
if args.limit_set:
char_set = char_set & args.limit_set
if not char_set:
- print 'limit excludes all chars in %s' % filepath
+ print('limit excludes all chars in %s' % filepath)
return
sorted_chars = sorted(char_set)
if args.info:
@@ -116,7 +116,7 @@
if args.text:
_write_char_text(sorted_chars, filepath, args.chars_per_line, args.sep)
if args.ranges:
- print 'ranges:\n ' + lint_config.write_int_ranges(sorted_chars, True)
+ print('ranges:\n ' + lint_config.write_int_ranges(sorted_chars, True))
def main():
@@ -151,13 +151,13 @@
if args.limit:
args.limit_set = lint_config.parse_int_ranges(args.limit)
- print 'limit to: ' + lint_config.write_int_ranges(args.limit_set)
+ print('limit to: ' + lint_config.write_int_ranges(args.limit_set))
else:
# make sure it exists so checks don't have to care
args.limit_set = None
for fontpath in args.files:
- print 'Font: ' + path.normpath(fontpath)
+ print('Font: ' + path.normpath(fontpath))
_process_font(fontpath, args)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: coverage.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/create_image.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/create_image.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/create_image.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/create_image.py 2019-07-09 17:01:27.806226568 +0200
@@ -220,11 +220,11 @@
ovr = extents[0][2] > width + params.horiz_margin
if ovl or ovr:
if ovl:
- print 'Error: image overflows left bounds'
+ print('Error: image overflows left bounds')
if ovr:
- print 'Error: image overflows right bounds'
- print 'extents: %s, width: %s, margin: %s' % (
- extents, params.width, params.horiz_margin)
+ print('Error: image overflows right bounds')
+ print('extents: %s, width: %s, margin: %s' % (
+ extents, params.width, params.horiz_margin))
top_usage = min(extents[0][1], extents[1][1], 0)
bottom_usage = max(extents[0][3], extents[1][3])
@@ -250,7 +250,7 @@
real_surface = cairo.SVGSurface(
output_path, params.width, calculated_height)
- print 'writing', output_path
+ print('writing', output_path)
draw_on_surface(real_surface, text, params)
real_surface.flush()
real_surface.finish()
@@ -268,7 +268,7 @@
real_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
params.width, calculated_height)
draw_on_surface(real_surface, text, params)
- print 'writing', output_path
+ print('writing', output_path)
real_surface.write_to_png(output_path)
@@ -281,7 +281,7 @@
elif ext == '.svg':
create_svg(text, output_path, **kwargs)
else:
- print 'extension % not supported' % ext
+ print('extension % not supported' % ext)
def test():
@@ -421,7 +421,7 @@
text, file_name, family=font, weight=weight_name, style=style_name,
stretch=stretch_name, language=lang, font_size=font_size,
maxheight=maxheight, horiz_margin=horiz_margin)
- print 'generated ' + file_name
+ print('generated ' + file_name)
def main():
@@ -467,7 +467,7 @@
test()
return
if args.codes and args.text:
- print 'choose either codes or text'
+ print('choose either codes or text')
return
if args.codes:
render_codes(
@@ -481,12 +481,12 @@
args.text = f.read()
else:
args.text = args.text.decode('unicode-escape')
- print 'text length %d' % len(args.text)
+ print('text length %d' % len(args.text))
render_text(
args.out, args.text, args.font, args.bold, args.italic, args.size,
args.lang, args.type, args.maxheight, args.horiz_margin)
else:
- print 'nothing to do'
+ print('nothing to do')
if __name__ == '__main__':
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: create_image.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/dump_otl.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/dump_otl.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/dump_otl.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/dump_otl.py 2019-07-09 17:00:06.734072577 +0200
@@ -35,7 +35,7 @@
def print_indented(output_string, indents=1):
"""Prints a string indented with a specified number of spaces."""
- print ' ' * indents + output_string
+ print(' ' * indents + output_string)
def printable_glyph_class(glyph_list, quote=False):
"""Returns a printable form for a class of glyphs."""
@@ -66,11 +66,11 @@
def dump_lang_sys(script, lang, lang_sys):
"""Dumps a language system."""
- print '%s %s:' % (script, lang),
+ print('%s %s:' % (script, lang),)
assert lang_sys.LookupOrder is None
if lang_sys.ReqFeatureIndex != 65535:
- print '<%s>' % lang_sys.ReqFeatureIndex,
- print lang_sys.FeatureIndex
+ print('<%s>' % lang_sys.ReqFeatureIndex,)
+ print(lang_sys.FeatureIndex)
def extract_glyphs_from_coverage(coverage):
@@ -249,7 +249,7 @@
printable_device(anchor.XDeviceTable),
printable_device(anchor.YDeviceTable))
else:
- print vars(anchor)
+ print(vars(anchor))
assert False, "don't know about anchor format"
@@ -435,7 +435,7 @@
record = feature_record[index]
tag = record.FeatureTag
feature = record.Feature
- print index, tag, feature.LookupListIndex
+ print(index, tag, feature.LookupListIndex)
if feature.FeatureParams is not None:
print_indented('# name <%s>;' % feature.FeatureParams.UINameID)
@@ -444,8 +444,8 @@
"""Prints out a lookup list."""
for index in range(len(lookup_list)):
lookup = lookup_list[index]
- print 'lookup %d { # type=%d flag=0x%X' % (
- index, lookup.LookupType, lookup.LookupFlag)
+ print('lookup %d { # type=%d flag=0x%X' % (
+ index, lookup.LookupType, lookup.LookupFlag))
for subtable in lookup.SubTable:
if table_name == 'GSUB':
@@ -453,26 +453,26 @@
elif table_name == 'GPOS':
dump_gpos_subtable(lookup.LookupType, subtable)
- print '}'
+ print('}')
def dump_otl_table(font, table_name):
"""Prints out an OpenType Layout table."""
if table_name not in font:
- print 'no %s table' % table_name
- print
+ print('no %s table' % table_name)
+ print()
return
else:
- print '%s' % table_name
- print '----'
+ print('%s' % table_name)
+ print('----')
table = font[table_name].table
dump_script_record(table.ScriptList.ScriptRecord)
- print
+ print()
dump_feature_record(table.FeatureList.FeatureRecord)
- print
+ print()
dump_lookup_list(table.LookupList.Lookup, table_name)
- print
+ print()
@@ -480,7 +480,7 @@
"""Dump the OpenType Layout tables for all input arguments."""
for font_file_name in sys.argv[1:]:
font = ttLib.TTFont(font_file_name)
- print '%s: %s' % (font_file_name, internal_font_name(font))
+ print('%s: %s' % (font_file_name, internal_font_name(font)))
dump_otl_table(font, 'GPOS')
dump_otl_table(font, 'GSUB')
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: dump_otl.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/extract_ohchr_attributions.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/extract_ohchr_attributions.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/extract_ohchr_attributions.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/extract_ohchr_attributions.py 2019-07-09 16:56:32.504304964 +0200
@@ -100,7 +100,7 @@
def outdent(self):
if not self.margin:
- print '*** cannot outdent ***'
+ print('*** cannot outdent ***')
else:
self.margin = self.margin[:-2]
@@ -113,11 +113,11 @@
def handle_starttag(self, tag, attrs):
if tag not in ['link', 'meta', 'area', 'img', 'br']:
if self.trace:
- print self.margin + tag + '>'
+ print(self.margin + tag + '>')
self.tag_stack.append((tag, self.getpos()))
self.indent()
elif self.trace:
- print self.margin + tag
+ print(self.margin + tag)
if self.state == 'before_table' and tag == 'table':
table_id = self.get_attr(attrs, 'id')
@@ -149,11 +149,11 @@
self.outdent()
if tag != prev_tag:
if self.trace:
- print 'no close tag for %s at %s' % (prev_tag, prev_pos)
+ print('no close tag for %s at %s' % (prev_tag, prev_pos))
else:
break
if self.trace:
- print self.margin + '<'
+ print(self.margin + '<')
if self.state == 'in_table':
if tag == 'table':
self.state = 'after_table'
@@ -200,9 +200,9 @@
lines.append('\t'.join([ohchr_code, s, lang, attrib]))
data = '\n'.join(lines) + '\n'
- print 'outfile: "%s"' % outfile
+ print('outfile: "%s"' % outfile)
if not outfile or outfile == '-':
- print data
+ print(data)
else:
with open(outfile, 'w') as f:
f.write(data)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: extract_ohchr_attributions.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/font_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/font_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/font_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/font_data.py 2019-07-09 16:54:55.264312043 +0200
@@ -130,7 +130,7 @@
# for this without explicit per-script data, we're really just
# trying to catch obvious errors.
- expected_bitmap = 0L
+ expected_bitmap = 0
for count, info in ur_info:
bit = info[2]
# any non-bmp character causes bit 57 to be set
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: font_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_coverage_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_coverage_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_coverage_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_coverage_data.py 2019-07-09 16:53:45.091038802 +0200
@@ -170,7 +170,7 @@
if out_file:
tree.write(out_file, encoding='utf-8', xml_declaration=True)
else:
- print ET.tostring(tree.getroot(), encoding='utf-8')
+ print(ET.tostring(tree.getroot(), encoding='utf-8'))
def _read_meta(meta_elem):
@@ -237,7 +237,7 @@
cps = get_cps_from_cmap_data_file(cmap_path)
paths = None
else:
- print 'Please specify font files, directories, or a cmap data file.'
+ print('Please specify font files, directories, or a cmap data file.')
return
coverage = create(args.name, cps, paths=paths, cmap_data=cmap_path)
write(coverage, args.output_file)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_coverage_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_dingbats_html.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_dingbats_html.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_dingbats_html.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_dingbats_html.py 2019-07-09 16:52:32.368791963 +0200
@@ -382,7 +382,7 @@
dump_metrics = False
if dump_metrics:
- print '$ %s' % self.name
+ print('$ %s' % self.name)
def context_string(codelist, cp):
cps = unichr(codelist.mapped_code(cp))
@@ -414,7 +414,7 @@
metrics_font = _get_font(fontname)
else:
metrics_font = None
- print >> sys.stderr, 'no metrics font'
+ sys.stderr.write('no metrics font\n')
lines = ['<h3 id="target_%d">%s</h3>' % (tindex, self.name)]
char_line = _character_string_html(self.codelist, self.used_fonts[-1])
@@ -457,7 +457,7 @@
if cp_metrics:
lsb, rsb, wid, adv, cy = cp_metrics
if dump_metrics:
- print '%04x # %4d, %4d, %4d, %s' % (cp, lsb, adv, cy, name)
+ print('%04x # %4d, %4d, %4d, %s' % (cp, lsb, adv, cy, name))
if cp in metrics:
nlsb, nadv, ncy = metrics[cp]
@@ -878,20 +878,20 @@
def generate_text(outfile, title, fonts, targets, flag_sets, metrics, data_dir):
- print >> outfile, title
- print >> outfile
- print >> outfile, 'Fonts:'
+ outfile.write(title + '\n')
+ outfile.write('\n')
+ outfile.write('Fonts:\n')
max_keylen = max(len(key) for key, _ in fonts)
fmt = ' %%%ds: %%s (%%s)' % max_keylen
for key, keyinfos in fonts:
for font, name, _ in keyinfos:
rel_font = path.relpath(font, data_dir) if font else '(no font)'
- print >> outfile, fmt % (key, name, rel_font)
- print >> outfile
+ outfile.write(fmt % (key, name, rel_font) + '\n')
+ outfile.write('\n')
for target in targets:
- print >> outfile
- print >> outfile, target.generate_text(flag_sets, metrics)
+ outfile.write('\n')
+ outfile.write(target.generate_text(flag_sets, metrics) + '\n')
def _generate_fontkey(fonts, targets, data_dir):
@@ -1125,7 +1125,7 @@
lines.append(string + '<br/>')
lines.append('</div>')
- print >> outfile, '\n'.join(lines)
+ outfile.write('\n'.join(lines) + '\n')
"""
def generate_html(
@@ -1138,19 +1138,19 @@
styles = _generate_styles(fonts, relpath)
mstyles = _METRICS_STYLES if metrics != None else ''
contextfont = _CONTEXT_FONT if context else 'sansserif'
- print >> outfile, template.substitute(
- title=title, styles=styles, mstyles=mstyles, contextfont=contextfont)
+ outfile.write(template.substitute(
+ title=title, styles=styles, mstyles=mstyles, contextfont=contextfont) + '\n')
- print >> outfile, _generate_fontkey(fonts, targets, data_dir)
+ outfile.write(_generate_fontkey(fonts, targets, data_dir) + '\n')
# hardcode font key for now
# _generate_html_lines(outfile, 'sym4')
for index, target in enumerate(targets):
- print >> outfile, target.generate_html(
- index, context, metrics, flag_sets, cp_to_targets)
+ outfile.write(target.generate_html(
+ index, context, metrics, flag_sets, cp_to_targets) + '\n')
- print >> outfile, _HTML_FOOTER
+ outfile.write(_HTML_FOOTER + '\n')
def _build_cp_to_targets(targets):
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_dingbats_html.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_lang_font_table.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_lang_font_table.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_lang_font_table.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_lang_font_table.py 2019-07-09 16:49:24.267740057 +0200
@@ -55,14 +55,14 @@
families = noto_fonts.get_families(fonts).values()
def write_csv_header(outfile):
- print >> outfile, 'Code,Script,Style,UI,Font Name'
+ outfile.write('Code,Script,Style,UI,Font Name\n')
def write_csv(outfile, lang, script, style, ui, members):
if members:
- print >> outfile, ','.join(
+ outfile.write(','.join(
[lang, script, style, ui,
- noto_fonts.get_font_family_name(members[0].filepath)])
+ noto_fonts.get_font_family_name(members[0].filepath)]))
with open('lang_to_font_table.csv', 'w') as outfile:
@@ -87,4 +87,4 @@
ui_members)
if not found_font:
- print '## no font found for lang %s' % lang
+ print('## no font found for lang %s' % lang)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_lang_font_table.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_sample_from_exemplar.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_sample_from_exemplar.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_sample_from_exemplar.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_sample_from_exemplar.py 2019-07-09 16:46:07.585777023 +0200
@@ -36,10 +36,10 @@
try:
from icu import Locale, Collator
- print 'will use icu locale-specific order'
+ print('will use icu locale-specific order')
_HAVE_ICU = True
except ImportError as e:
- print 'will use default locale sort order'
+ print('will use default locale sort order')
_HAVE_ICU = False
NOTO_TOOLS = path.abspath(path.join(path.dirname(__file__), os.pardir))
@@ -64,33 +64,33 @@
exemplar_list = cldr_data.get_exemplar_from_file(path.join(data_dir, filename))
if not exemplar_list:
if _VERBOSE:
- print ' no exemplar list for %s' % path.join(data_dir, filename)
+ print(' no exemplar list for %s' % path.join(data_dir, filename))
continue
lsrv = cldr_data.loc_tag_to_lsrv(filename[:-4])
if not lsrv:
if _VERBOSE:
- print ' no lsrv for %s' % path.join(data_dir, filename)
+ print(' no lsrv for %s' % path.join(data_dir, filename))
continue
src = path.join(directory, filename)
script = lsrv[1]
if not script:
if _VERBOSE:
- print ' no script for %s' % path.join(data_dir, filename)
+ print(' no script for %s' % path.join(data_dir, filename))
continue
loc_tag = cldr_data.lsrv_to_loc_tag(lsrv)
loc_to_exemplar_info = script_map[script]
if loc_tag in loc_to_exemplar_info:
if _VERBOSE:
- print 'skipping %s, already have exemplars for %s from %s' % (
- src, loc_tag, loc_to_exemplar_info[loc_tag][1])
+ print('skipping %s, already have exemplars for %s from %s' % (
+ src, loc_tag, loc_to_exemplar_info[loc_tag][1]))
continue
# fix exemplars that look incorrect
if script == 'Arab' and 'd' in exemplar_list:
if _VERBOSE:
- print 'found \'d\' in %s for %s' % (src, lsrv)
+ print('found \'d\' in %s for %s' % (src, lsrv))
no_latin = True
else:
no_latin = False
@@ -118,8 +118,8 @@
exemplar_chars.add(cp)
fixed_exemplar_list.append(cp)
if len(dup_chars) > 0 and _VERBOSE:
- print 'duplicate exemplars in %s: %s' % (
- src, ', '.join([u'\u200e%s\u200e (%x)' % (cp, ord(cp)) for cp in dup_chars]))
+ print('duplicate exemplars in %s: %s' % (
+ src, ', '.join([u'\u200e%s\u200e (%x)' % (cp, ord(cp)) for cp in dup_chars])))
loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(fixed_exemplar_list))
# supplement with extra locale data
@@ -131,8 +131,8 @@
src = '[extra locale data]/%s' % loc_tag
if loc_tag in loc_to_exemplar_info:
if _VERBOSE:
- print 'skipping %s, already have exemplars for %s from %s' % (
- src, loc_tag, loc_to_exemplar_info[loc_tag][1])
+ print('skipping %s, already have exemplars for %s from %s' % (
+ src, loc_tag, loc_to_exemplar_info[loc_tag][1]))
continue
# restrict to letters, except for zsym
@@ -143,7 +143,7 @@
if 'Zsym' not in loc_tag:
filtered_exemplar_list = filter(accept_cp, exemplar_list)
if len(filtered_exemplar_list) != len(exemplar_list) and _VERBOSE:
- print 'filtered some characters from %s' % src
+ print('filtered some characters from %s' % src)
else:
filtered_exemplar_list = exemplar_list
loc_to_exemplar_info[loc_tag] = (lsrv, src, tuple(filtered_exemplar_list))
@@ -178,21 +178,21 @@
script_tag = '-' + script
if unique_chars:
- print '%s has %d unique chars: %s%s' % (
+ print('%s has %d unique chars: %s%s' % (
loc_tag, len(unique_chars), ' '.join(unique_chars[:100]),
- '...' if len(unique_chars) > 100 else '')
+ '...' if len(unique_chars) > 100 else ''))
if dual_chars:
- print '%s shares %d chars (%s%s) with 1 other lang: %s' % (
+ print('%s shares %d chars (%s%s) with 1 other lang: %s' % (
loc_tag, len(dual_chars), ' '.join(dual_chars[:20]),
'...' if len(dual_chars) > 20 else '',
- ', '.join(sorted([loc.replace(script_tag, '') for loc in dual_shared_with])))
+ ', '.join(sorted([loc.replace(script_tag, '') for loc in dual_shared_with]))))
if triple_chars:
- print '%s shares %d chars (%s%s) with 2 other langs: %s' % (
+ print('%s shares %d chars (%s%s) with 2 other langs: %s' % (
loc_tag, len(triple_chars), ' '.join(triple_chars[:20]),
'...' if len(triple_chars) > 20 else '',
- ', '.join(sorted([loc.replace(script_tag, '') for loc in triple_shared_with])))
+ ', '.join(sorted([loc.replace(script_tag, '') for loc in triple_shared_with]))))
if not (unique_chars or dual_chars or triple_chars):
- print '%s shares all chars with 3+ other langs' % loc_tag
+ print('%s shares all chars with 3+ other langs' % loc_tag)
def get_char_to_lang_map(loc_map):
@@ -202,8 +202,8 @@
exemplars = info[2]
for cp in exemplars:
if loc_tag in char_to_lang_map[cp]:
- print 'loc %s (from %s) already in char_to_lang_map for %s (%x)' % (
- loc_tag, info[1], cp, ord(cp))
+ print('loc %s (from %s) already in char_to_lang_map for %s (%x)' % (
+ loc_tag, info[1], cp, ord(cp)))
else:
char_to_lang_map[cp].append(loc_tag)
return char_to_lang_map
@@ -221,11 +221,11 @@
if num_shared_langs >= len(hist):
for shared_lang in char_to_lang_map[cp]:
if shared_lang not in loc_map:
- print 'loc map does not have \'%s\'!' % shared_lang
+ print('loc map does not have \'%s\'!' % shared_lang)
freq_list.append((num_shared_langs, cp))
if num_shared_langs >= len(hist):
- print 'num shared langs is %d but size of hist is %d' % (num_shared_langs, len(hist))
+ print('num shared langs is %d but size of hist is %d' % (num_shared_langs, len(hist)))
hist[num_shared_langs] += 1
freq_list.sort()
return [cp for nl, cp in freq_list], hist
@@ -241,14 +241,14 @@
without_script_str = ', '.join(sorted(without_script))
if count > limit:
without_script_str += '...'
- print u'char %s\u200e (%x): %d %s' % (cp, ord(cp), count, without_script_str)
- print 'total chars listed: %d' % len(char_to_lang_map)
+ print(u'char %s\u200e (%x): %d %s' % (cp, ord(cp), count, without_script_str))
+ print('total chars listed: %d' % len(char_to_lang_map))
def show_shared_langs_hist(hist):
# histogram - number of chars per number of shared languages
for i in range(1, len(hist)):
- print '[%3d] %3d %s' % (i, hist[i], 'x' * hist[i])
+ print('[%3d] %3d %s' % (i, hist[i], 'x' * hist[i]))
def get_upper_case_list(char_list):
@@ -272,11 +272,11 @@
if not subset:
break
tier_chars = sorted(subset)
- print 'tier %d: %s' % (tier, ' '.join(tier_chars))
+ print('tier %d: %s' % (tier, ' '.join(tier_chars)))
upper_case_chars = get_upper_case_list(tier_chars)
if upper_case_chars:
- print ' upper: ' + ' '.join(upper_case_chars)
+ print(' upper: ' + ' '.join(upper_case_chars))
def get_rare_char_info(char_to_lang_map, shared_lang_threshold):
@@ -304,7 +304,7 @@
script = lsrv[1]
if script not in _lang_for_script_map:
lang = lsrv[0]
- # print '%s lang => %s' % (script, lang)
+ # print('%s lang => %s' % (script, lang))
_lang_for_script_map[script] = lang
@@ -359,15 +359,15 @@
def show_selected_rare_chars(selected):
- print 'langs with rare chars by lang pop:'
+ print('langs with rare chars by lang pop:')
for lang_tag, chars in selected:
- print '%10s: %s' % (lang_tag, ', '.join(sorted(chars)))
+ print('%10s: %s' % (lang_tag, ', '.join(sorted(chars))))
def sort_for_script(cp_list, script):
lang = lang_for_script(script)
if not lang:
- print 'cannot sort for script, no lang for %s' % script
+ print('cannot sort for script, no lang for %s' % script)
return cp_list
if _HAVE_ICU:
from icu import Locale, Collator
@@ -388,7 +388,7 @@
if cased_sample:
cased_sample = ' '.join(cased_sample)
if _VERBOSE:
- print 'add case for %s' % script
+ print('add case for %s' % script)
return sample + '\n' + cased_sample
return sample
@@ -486,31 +486,31 @@
def generate_samples(dstdir, imgdir, summary):
if imgdir:
imgdir = tool_utils.ensure_dir_exists(imgdir)
- print 'writing images to %s' % imgdir
+ print('writing images to %s' % imgdir)
if dstdir:
dstdir = tool_utils.ensure_dir_exists(dstdir)
- print 'writing files to %s' % dstdir
+ print('writing files to %s' % dstdir)
verbose = summary
script_map = get_script_to_exemplar_data_map()
for script in sorted(script_map):
sample, info = generate_sample_for_script(script, script_map[script])
if summary:
- print
- print info
- print sample
+ print()
+ print(info)
+ print(sample)
if imgdir:
path = os.path.join(imgdir, 'und-%s_chars.png' % script)
- print 'writing image %s.png' % script
+ print('writing image %s.png' % script)
rtl = script in ['Adlm', 'Arab', 'Hebr', 'Nkoo', 'Syrc', 'Tfng', 'Thaa']
create_image.create_png(
sample, path, font_size=34, line_spacing=40, width=800, rtl=rtl)
if dstdir:
filename = 'und-%s_chars.txt' % script
- print 'writing data %s' % filename
+ print('writing data %s' % filename)
filepath = os.path.join(dstdir, filename)
with codecs.open(filepath, 'w', 'utf-8') as f:
f.write(sample + '\n')
@@ -531,7 +531,7 @@
args = parser.parse_args()
if not args.save and not args.imgdir and not args.summary:
- print 'nothing to do.'
+ print('nothing to do.')
return
if args.verbose:
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_sample_from_exemplar.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_sample_text.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_sample_text.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_sample_text.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_sample_text.py 2019-07-09 16:43:31.316395449 +0200
@@ -36,7 +36,7 @@
else:
chars.append(char_rep_to_code(arg))
chars = u' '.join([unichr(code) for code in chars])
- print chars.encode('UTF-8')
+ print(chars.encode('UTF-8'))
if __name__ == '__main__':
main(sys.argv)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_sample_text.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_sample_text_html.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_sample_text_html.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_sample_text_html.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_sample_text_html.py 2019-07-09 16:41:12.733830703 +0200
@@ -44,13 +44,13 @@
def generate_table(filename):
with codecs.open(filename, 'w', 'utf-8') as f:
script_to_samples = _get_script_to_samples()
- print >> f, _HTML_HEADER
- print >> f, '<table>'
- print >> f, '<tr><th>Script<br/>BCP<th>name<th>type<th>text'
+ f.write(_HTML_HEADER)
+ f.write('<table>\n')
+ f.write('<tr><th>Script<br/>BCP<th>name<th>type<th>text\n')
for script, samples in sorted(script_to_samples.iteritems()):
script_en = cldr_data.get_english_script_name(script)
- print >> f, '<tr><th colspan=4>%s' % script_en
+ f.write('<tr><th colspan=4>%s\n' % script_en)
for bcp, sample_type, sample_text in samples:
try:
lsrv = cldr_data.loc_tag_to_lsrv(bcp)
@@ -62,7 +62,7 @@
if bcp_en == 'Unknown Language' and sample_type == 'chars':
bcp_en = '(characters)'
except:
- print 'could not get english name for %s' % bcp
+ print('could not get english name for %s' % bcp)
bcp_en = bcp
cols = ['<tr>']
@@ -70,10 +70,10 @@
cols.append(bcp_en)
cols.append(sample_type)
cols.append(sample_text)
- print >> f, '<td>'.join(cols)
- print >> f, '<tr><td colspan=4>&nbsp;'
- print >> f, '</table>'
- print >> f, _HTML_FOOTER
+ f.write('<td>'.join(cols) + '\n')
+ f.write('<tr><td colspan=4>&nbsp;')
+ f.write('</table>')
+ f.write(_HTML_FOOTER)
def _get_script_to_samples():
@@ -83,13 +83,13 @@
for f in sorted(os.listdir(sample_dir)):
base, ext = path.splitext(f)
if ext != '.txt' or '_' not in base:
- print 'skipping', f
+ print('skipping', f)
continue
bcp, sample_type = base.split('_')
try:
lang, script, region, variant = cldr_data.loc_tag_to_lsrv(bcp)
except:
- print 'bcp %s did not parse as lsrv' % bcp
+ print('bcp %s did not parse as lsrv' % bcp)
continue
if script == 'Latn':
continue
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_sample_text_html.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_samples.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_samples.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_samples.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_samples.py 2019-07-09 16:28:39.952626989 +0200
@@ -158,8 +158,8 @@
ok_patterns = []
for pattern in select_patterns:
if pattern not in self.patterns:
- print 'No pattern named \'%s\' in %s' % (
- pattern, ', '.join(self.pattern_order))
+ print('No pattern named \'%s\' in %s' % (
+ pattern, ', '.join(self.pattern_order)))
continue
ok_patterns.append(pattern)
select_patterns = ok_patterns
@@ -176,7 +176,7 @@
with codecs.open(out_file, 'w', 'utf-8') as f:
f.write(output_text)
else:
- print output_text
+ print(output_text)
def _generate_output(self, output_lines, pattern, group, sep, label, sort):
@@ -365,12 +365,12 @@
for arg in value.split(','):
for expanded_arg in _expand_ranges(arg.strip()):
if expanded_arg in args:
- print 'The sequence "%s" is already in this group, ignoring it' % (
- 'U+%04X' % cp for cp in expanded_arg)
+ print('The sequence "%s" is already in this group, ignoring it' % (
+ 'U+%04X' % cp for cp in expanded_arg))
continue
args.append(expanded_arg)
except ValueError as e:
- print str(e)
+ print(str(e))
return None
if not args[-1]:
@@ -387,10 +387,10 @@
elif text[i] == ')':
count -= 1
if count < 0:
- print 'Unmatched close paren.'
+ print('Unmatched close paren.')
return None
if count > 0:
- print 'Unmatched open paren.'
+ print('Unmatched open paren.')
return None
return text
@@ -420,8 +420,8 @@
if name:
# angle brackets
if name not in groups:
- print 'Could not find "%s" in groups (%s)' % (
- name, ', '.join(sorted(groups)))
+ print('Could not find "%s" in groups (%s)' % (
+ name, ', '.join(sorted(groups))))
return None
pat_list.append(groups[name])
value = value[m.end():].strip()
@@ -454,7 +454,7 @@
continue
m = _LINE_RE.match(line)
if not m:
- print 'Could not parse "%s"' % original_lines[n]
+ print('Could not parse "%s"' % original_lines[n])
return None
name = m.group(1)
is_group = m.group(2) == '='
@@ -466,16 +466,16 @@
return None
value = _parse_pattern(value, groups)
if not value:
- print 'Could not parse values in "%s"' % original_lines[n]
+ print('Could not parse values in "%s"' % original_lines[n])
return None
if is_group:
if name in groups:
- print 'The group "%s" has already been defined' % name
+ print('The group "%s" has already been defined' % name)
return None
groups[name] = value
else:
if name in patterns:
- print 'The pattern "%s" has already been defined' % name
+ print('The pattern "%s" has already been defined' % name)
return None
pattern_order.append(name)
patterns[name] = value
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_samples.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_waterfall_html.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_waterfall_html.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_waterfall_html.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_waterfall_html.py 2019-07-09 16:26:52.031744684 +0200
@@ -128,9 +128,9 @@
if out_file:
with codecs.open(out_file, 'w', 'utf-8') as f:
f.write(html_text)
- print 'wrote %s' % out_file
+ print('wrote %s' % out_file)
else:
- print html_text
+ print(html_text)
def _get_font_list(root, name_str):
@@ -169,7 +169,7 @@
samples.append(f)
break
- print sorted(samples)
+ print(sorted(samples))
# limit to scripts supported by all fonts
selected = []
for sample in samples:
@@ -201,7 +201,7 @@
raise Exception (
'found %d sample files (%s) but need exactly 1' % (
len(samples), ', '.join(sorted(samples))))
- print 'selected sample %s' % samples[0]
+ print('selected sample %s' % samples[0])
with codecs.open(path.join(sample_dir, samples[0]), 'r', 'utf-8') as f:
text = f.read()
@@ -217,8 +217,8 @@
if not font_names:
raise Exception('no fonts matching "%s" in %s' % (font_str, root))
- print 'found %d fonts under %s:\n %s' % (
- len(font_names), root, '\n '.join(sorted(font_names)))
+ print('found %d fonts under %s:\n %s' % (
+ len(font_names), root, '\n '.join(sorted(font_names))))
if not font_sizes:
font_sizes = [10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 28, 32]
@@ -230,7 +230,7 @@
out_file = path.abspath(out_file)
file_dir = tool_utils.ensure_dir_exists(path.dirname(out_file))
if path.exists(out_file):
- print 'file %s already exists, overwriting' % out_file
+ print('file %s already exists, overwriting' % out_file)
font_dir = tool_utils.ensure_dir_exists(path.join(file_dir, 'fonts'))
for font_name in font_names:
src = path.join(root, font_name)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_waterfall_html.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_website_2_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_website_2_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_website_2_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_website_2_data.py 2019-07-09 16:01:51.861488185 +0200
@@ -88,8 +88,8 @@
# via the website.
#
# We'll keep the representative font and not try to change it.
- print 'Family %s has %d hinted members but %d unhinted memberts' % (
- family_id, len(hinted_members), len(unhinted_members))
+ print('Family %s has %d hinted members but %d unhinted memberts' % (
+ family_id, len(hinted_members), len(unhinted_members)))
# The namedtuples are immutable, so we need to break them apart and reform
# them
@@ -130,9 +130,9 @@
for lang_scr in ['bal-Arab', 'hnd-Arab', 'hno-Arab', 'ks-Arab', 'lah-Arab',
'pa-Arab', 'skr-Arab', 'ur-Arab']:
if not lang_scr in lang_scrs:
- print 'Map nastaliq: %s not found' % lang_scr
+ print('Map nastaliq: %s not found' % lang_scr)
else:
- print 'added %s to nastaliq' % lang_scr
+ print('added %s to nastaliq' % lang_scr)
nastaliq_lang_scrs.add(lang_scr)
# Kufi patches:
@@ -143,10 +143,10 @@
kufi_lang_scrs = family_id_to_lang_scrs['kufi-arab']
for lang_scr in ['ur-Arab', 'khw-Arab', 'ks-Arab']:
if not lang_scr in lang_scrs:
- print 'Patch kufi: %s not found' % lang_scr
+ print('Patch kufi: %s not found' % lang_scr)
else:
kufi_lang_scrs.remove(lang_scr)
- print 'removed %s from kufi' % lang_scr
+ print('removed %s from kufi' % lang_scr)
if not kufi_lang_scrs:
break
@@ -157,10 +157,10 @@
hebr_lang_scrs = family_id_to_lang_scrs['sans-hebr']
for lang_scr in ['lad-Hebr']:
if not lang_scr in lang_scrs:
- print 'Patch lad: %s not found' % lang_scr
+ print('Patch lad: %s not found' % lang_scr)
else:
hebr_lang_scrs.remove(lang_scr)
- print 'removed %s from sans-hebr' % lang_scr
+ print('removed %s from sans-hebr' % lang_scr)
if not hebr_lang_scrs:
break;
@@ -171,16 +171,16 @@
jpan_lang_scrs = family_id_to_lang_scrs['sans-jpan']
for lang_scr in ['ja-Kana', 'ja-Hira']:
if not lang_scr in lang_scrs:
- print 'Patch jpan: %s not found' % lang_scr
+ print('Patch jpan: %s not found' % lang_scr)
else:
jpan_lang_scrs.remove(lang_scr)
- print 'removed %s from sans-jpan' % lang_scr
+ print('removed %s from sans-jpan' % lang_scr)
if not jpan_lang_scrs:
break;
for f, ls in sorted(family_id_to_lang_scrs.iteritems()):
if not ls:
- print '!family %s has no lang' % f
+ print('!family %s has no lang' % f)
return family_id_to_lang_scrs
@@ -221,8 +221,8 @@
full_key = sample_key + '-' + family_id
if full_key in tested_keys:
if full_key in failed_keys:
- print 'family %s already rejected sample %s (lang %s)' % (
- family_id, sample_key, lang_scr)
+ print('family %s already rejected sample %s (lang %s)' % (
+ family_id, sample_key, lang_scr))
continue
else:
failed_cps = set()
@@ -237,15 +237,15 @@
failed_cps.add(ord(cp))
if failed_cps:
- print 'family %s rejects sample %s for lang %s:\n %s' % (
+ print('family %s rejects sample %s for lang %s:\n %s' % (
family_id, sample_key, lang_scr,
'\n '.join('%04x (%s)' % (
- cp, unichr(cp)) for cp in sorted(failed_cps)))
+ cp, unichr(cp)) for cp in sorted(failed_cps))))
failed_keys.add(full_key)
continue
- # print 'family %s accepts sample %s for lang %s' % (
- # family_id, sample_key, lang_scr)
+ # print('family %s accepts sample %s for lang %s' % (
+ # family_id, sample_key, lang_scr))
sample_key_for_lang = sample_key
if sample_key not in sample_key_to_info:
@@ -253,16 +253,16 @@
break
if not sample_key_for_lang:
- print '%s has no sample to display in %s' % (lang_scr, family_id)
+ print('%s has no sample to display in %s' % (lang_scr, family_id))
else:
lang_scr_to_sample_key[lang_scr] = sample_key_for_lang
if not lang_scr_to_sample_key:
- print '!%s can display no samples for any lang of %s' % (
- family_id, ', '.join(sorted(family_id_to_lang_scrs[family_id])))
+ print('!%s can display no samples for any lang of %s' % (
+ family_id, ', '.join(sorted(family_id_to_lang_scrs[family_id]))))
else:
- print '%s has samples for %s langs' % (
- family_id, len(lang_scr_to_sample_key))
+ print('%s has samples for %s langs' % (
+ family_id, len(lang_scr_to_sample_key)))
family_id_to_lang_scr_to_sample_key[family_id] = lang_scr_to_sample_key
return (family_id_to_lang_scr_to_sample_key, sample_key_to_info)
@@ -274,7 +274,7 @@
if region == 'ZZ':
continue
if len(region) > 2: # e.g. world
- print 'skipping region %s' % region
+ print('skipping region %s' % region)
continue
lang_scrs = cldr_data.region_to_lang_scripts(region)
for lang_scr in lang_scrs:
@@ -294,7 +294,7 @@
warnings.add(lang_scr)
for lang_scr in sorted(warnings):
- print 'no mapping from %s to any region' % lang_scr
+ print('no mapping from %s to any region' % lang_scr)
return family_id_to_regions
@@ -427,12 +427,12 @@
continue
tag, attrib = line.split(':')
ATTRIBUTION_DATA[tag.strip()] = attrib.strip()
- print 'read %d lines of attribution data' % len(ATTRIBUTION_DATA)
+ print('read %d lines of attribution data' % len(ATTRIBUTION_DATA))
try:
return ATTRIBUTION_DATA[lang_scr_typ + '.txt']
except KeyError:
if not lang_scr_typ.endswith('_chars'):
- print 'no attribution for %s' % lang_scr_typ
+ print('no attribution for %s' % lang_scr_typ)
return 'none'
@@ -488,7 +488,7 @@
add_exemplars(und_scr)
if not sample_infos:
- print '!No sample info for %s' % lang_scr
+ print('!No sample info for %s' % lang_scr)
return sample_infos
@@ -515,8 +515,8 @@
lang_scr = lang + '-' + primary_script
if lang_scr not in lang_scrs:
- print 'default lang_scr \'%s\' not listed for family %s %s' % (
- lang_scr, family_id, lang_scrs)
+ print('default lang_scr \'%s\' not listed for family %s %s' % (
+ lang_scr, family_id, lang_scrs))
family_id_to_default_lang_scr[family_id] = lang_scr
return family_id_to_default_lang_scr
@@ -628,8 +628,8 @@
for key in debug:
if not key in _DEBUG_KEYS:
- print 'Bad debug key(s) found. Keys are:\n %s' % (
- '\n '.join(sorted(_DEBUG_KEYS)))
+ print('Bad debug key(s) found. Keys are:\n %s' % (
+ '\n '.join(sorted(_DEBUG_KEYS))))
raise ValueError()
return frozenset(debug)
@@ -659,7 +659,7 @@
def clean_target_dir(self):
if path.exists(self.target):
- print 'Removing the old website directory from %s...' % self.target
+ print('Removing the old website directory from %s...' % self.target)
shutil.rmtree(self.target)
def write_json(self, obj, name):
@@ -701,7 +701,7 @@
for font in fonts:
pairs.append((font.filepath, path.basename(font.filepath)))
tool_utils.generate_zip_with_7za_from_filepairs(pairs, zippath)
- print 'Created zip %s' % zippath
+ print('Created zip %s' % zippath)
return os.stat(zippath).st_size
def get_readme_keys(self):
@@ -866,8 +866,8 @@
1 for f in (family.hinted_members or family.unhinted_members)
if not f.is_UI)
if num_fonts not in [1, 2, 3, 4, 9, 12, 36, 72]:
- print 'family %s (%s) has %d fonts' % (k, family.name, num_fonts)
- print '\n'.join(f.filepath for f in sorted(family.hinted_members or family.unhinted_members))
+ print('family %s (%s) has %d fonts' % (k, family.name, num_fonts))
+ print('\n'.join(f.filepath for f in sorted(family.hinted_members or family.unhinted_members)))
fail = True
family_obj['fonts'] = num_fonts
@@ -1082,9 +1082,9 @@
image_location = path.join(self.samples, image_file_name)
if path.isfile(image_location):
# Don't rebuild images when continuing.
- print "Continue: assuming image file '%s' is valid." % image_location
+ print("Continue: assuming image file '%s' is valid." % image_location)
continue
- print 'create %s' % image_file_name
+ print('create %s' % image_file_name)
create_image.create_img(
sample_text,
image_location,
@@ -1106,7 +1106,7 @@
sample_key_to_info):
for family_id in sorted(family_id_to_lang_scr_to_sample_key):
family = families[family_id]
- print 'Generating images for %s...' % family.name
+ print('Generating images for %s...' % family.name)
default_lang = family_id_to_default_lang_scr[family_id]
lang_scr_to_sample_key = family_id_to_lang_scr_to_sample_key[family_id]
@@ -1146,8 +1146,8 @@
(path.join(CJK_DIR, filename), filename)]
tool_utils.generate_zip_with_7za_from_filepairs(pairs, zip_path)
newsize = os.stat(zip_path).st_size
- print "Wrote " + zip_path
- print 'Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize)
+ print("Wrote " + zip_path)
+ print('Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize))
# NotoSans/SerifCJK.ttc.zip already has been zipped for size reasons
# because git doesn't like very large files. So it wasn't in the above
@@ -1163,7 +1163,7 @@
for filename in ['NotoSansCJK.ttc.zip', 'NotoSerifCJK.ttc.zip']:
src_zip = path.join(CJK_DIR, filename)
if not path.isfile(src_zip):
- print 'Warning: %s does not exist' % filename
+ print('Warning: %s does not exist' % filename)
continue
pairs = [(SIL_LICENSE_LOC, 'LICENSE_OFL.txt')]
if os.stat(src_zip).st_size < 100000000: # lower than 100MB
@@ -1205,8 +1205,8 @@
tool_utils.generate_zip_with_7za_from_filepairs(pairs, zip_path)
newsize = os.stat(zip_path).st_size
- print "Wrote " + zip_path
- print 'Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize)
+ print("Wrote " + zip_path)
+ print('Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize))
def generate(self):
if self.clean:
@@ -1226,25 +1226,25 @@
check_families(families)
if 'families' in self.debug:
- print '\n#debug families'
- print '%d found' % len(families)
+ print('\n#debug families')
+ print('%d found' % len(families))
for i, (family_id, family) in enumerate(sorted(families.iteritems())):
- print '%2d] %s (%s, %s)' % (
- i, family_id, family.name, noto_fonts.get_family_filename(family))
+ print('%2d] %s (%s, %s)' % (
+ i, family_id, family.name, noto_fonts.get_family_filename(family)))
if family.hinted_members:
- print ' hinted: %s' % ', '.join(sorted(
- [path.basename(m.filepath) for m in family.hinted_members]))
+ print(' hinted: %s' % ', '.join(sorted(
+ [path.basename(m.filepath) for m in family.hinted_members])))
if family.unhinted_members:
- print ' unhinted: %s' % ', '.join(sorted(
- [path.basename(m.filepath) for m in family.unhinted_members]))
+ print(' unhinted: %s' % ', '.join(sorted(
+ [path.basename(m.filepath) for m in family.unhinted_members])))
script_to_family_ids = get_script_to_family_ids(families)
if 'script_to_family_ids' in self.debug:
- print '\n#debug script to family ids'
- print '%d found' % len(script_to_family_ids)
+ print('\n#debug script to family ids')
+ print('%d found' % len(script_to_family_ids))
for i, (script, family_ids) in enumerate(
sorted(script_to_family_ids.iteritems())):
- print '%2d] %s: %s' % (i, script, ', '.join(sorted(family_ids)))
+ print('%2d] %s: %s' % (i, script, ', '.join(sorted(family_ids))))
all_lang_scrs = set(['und-' + script for script in script_to_family_ids])
all_lang_scrs.update(lang_data.lang_scripts())
@@ -1252,7 +1252,7 @@
for lang_scr in sorted(all_lang_scrs):
lang, script = lang_scr.split('-')
if not script in script_to_family_ids:
- print 'no family supports script in %s' % lang_scr
+ print('no family supports script in %s' % lang_scr)
continue
sample_infos = get_sample_infos(lang_scr)
@@ -1262,70 +1262,70 @@
lang_scr_to_sample_infos[lang_scr] = sample_infos
if 'lang_scr_to_sample_infos' in self.debug:
- print '\n#debug lang+script to sample infos'
- print '%d found' % len(lang_scr_to_sample_infos)
+ print('\n#debug lang+script to sample infos')
+ print('%d found' % len(lang_scr_to_sample_infos))
for lang_scr, info_list in sorted(lang_scr_to_sample_infos.iteritems()):
for info in info_list:
- print '%s: %s, %s, len %d' % (
- lang_scr, info[2], info[1], len(info[0]))
+ print('%s: %s, %s, len %d' % (
+ lang_scr, info[2], info[1], len(info[0])))
family_id_to_lang_scrs = get_family_id_to_lang_scrs(
lang_scr_to_sample_infos.keys(), script_to_family_ids)
if 'family_id_to_lang_scrs' in self.debug:
- print '\n#debug family id to list of lang+script'
- print '%d found' % len(family_id_to_lang_scrs)
+ print('\n#debug family id to list of lang+script')
+ print('%d found' % len(family_id_to_lang_scrs))
for i, (family_id, lang_scrs) in enumerate(
sorted(family_id_to_lang_scrs.iteritems())):
- print '%3d] %s: (%d) %s' % (
- i, family_id, len(lang_scrs), ' '.join(sorted(lang_scrs)))
+ print('%3d] %s: (%d) %s' % (
+ i, family_id, len(lang_scrs), ' '.join(sorted(lang_scrs))))
family_id_to_lang_scr_to_sample_key, sample_key_to_info = (
get_family_id_to_lang_scr_to_sample_key(
family_id_to_lang_scrs, families, lang_scr_to_sample_infos))
if 'family_id_to_lang_scr_to_sample_key' in self.debug:
- print '\n#debug family id to map from lang+script to sample key'
- print '%d found' % len(family_id_to_lang_scr_to_sample_key)
+ print('\n#debug family id to map from lang+script to sample key')
+ print('%d found' % len(family_id_to_lang_scr_to_sample_key))
for i, (family_id, lang_scr_to_sample_key) in enumerate(
sorted(family_id_to_lang_scr_to_sample_key.iteritems())):
- print '%2d] %s (%d):' % (i, family_id, len(lang_scr_to_sample_key))
+ print('%2d] %s (%d):' % (i, family_id, len(lang_scr_to_sample_key)))
for j, (lang_scr, sample_key) in enumerate(
sorted(lang_scr_to_sample_key.iteritems())):
- print ' [%2d] %s: %s' % (j, lang_scr, sample_key)
+ print(' [%2d] %s: %s' % (j, lang_scr, sample_key))
if 'sample_key_to_info' in self.debug:
- print '\n#debug sample key to sample info'
- print '%d found' % len(sample_key_to_info)
+ print('\n#debug sample key to sample info')
+ print('%d found' % len(sample_key_to_info))
for i, (sample_key, info) in enumerate(
sorted(sample_key_to_info.iteritems())):
- print '%2d] %s: %s, len %d' % (
- i, sample_key, info[1], len(info[0]))
+ print('%2d] %s: %s, len %d' % (
+ i, sample_key, info[1], len(info[0])))
family_id_to_regions = get_family_id_to_regions(
family_id_to_lang_scr_to_sample_key)
if 'family_id_to_regions' in self.debug:
- print '\n#debug family id to regions'
- print '%d found' % len(family_id_to_regions)
+ print('\n#debug family id to regions')
+ print('%d found' % len(family_id_to_regions))
for i, (family_id, regions) in enumerate(
sorted(family_id_to_regions.iteritems())):
- print '%2d] %s: (%d) %s' % (
- i, family_id, len(regions), ', '.join(sorted(regions)))
+ print('%2d] %s: (%d) %s' % (
+ i, family_id, len(regions), ', '.join(sorted(regions))))
region_to_family_ids = get_region_to_family_ids(family_id_to_regions)
if 'region_to_family_ids' in self.debug:
- print '\n#debug region to family ids'
- print '%d found' % len(region_to_family_ids)
+ print('\n#debug region to family ids')
+ print('%d found' % len(region_to_family_ids))
for i, (region, family_ids) in enumerate(
sorted(region_to_family_ids.iteritems())):
- print '%2d] %s: (%d) %s' % (
- i, region, len(family_ids), ', '.join(sorted(family_ids)))
+ print('%2d] %s: (%d) %s' % (
+ i, region, len(family_ids), ', '.join(sorted(family_ids))))
family_id_to_default_lang_scr = get_family_id_to_default_lang_scr(
family_id_to_lang_scrs, families)
if 'family_id_to_default_lang_scr' in self.debug:
- print '\n#debug family id to default lang scr'
- print '%d found' % len(family_id_to_default_lang_scr)
+ print('\n#debug family id to default lang scr')
+ print('%d found' % len(family_id_to_default_lang_scr))
for i, (family_id, lang_scr) in enumerate(
sorted(family_id_to_default_lang_scr.iteritems())):
- print '%2d] %s: %s' % (i, family_id, lang_scr)
+ print('%2d] %s: %s' % (i, family_id, lang_scr))
region_data = get_region_lat_lng_data(region_to_family_ids.keys())
@@ -1365,16 +1365,16 @@
(default_lang_scr, family_id))
if error_list:
- print 'Errors:\n' + '\n '.join(error_list)
+ print('Errors:\n' + '\n '.join(error_list))
if error_list or self.no_build:
- print 'skipping build output'
+ print('skipping build output')
return
# build outputs
# zips are required for data
if self.no_zips and self.no_data:
- print 'skipping zip output'
+ print('skipping zip output')
else:
self.build_readmes()
@@ -1387,12 +1387,12 @@
self.build_subset_zips()
if self.no_css:
- print 'skipping css output'
+ print('skipping css output')
else:
family_css_info = self.build_css(families)
if self.no_data:
- print 'skipping data output'
+ print('skipping data output')
else:
self.build_data_json(family_id_to_lang_scr_to_sample_key,
families, family_zip_info, universal_zip_info,
@@ -1406,7 +1406,7 @@
self.build_misc_json(sample_key_to_info, region_data)
if self.no_images:
- print 'skipping image output'
+ print('skipping image output')
else:
self.build_images(family_id_to_lang_scr_to_sample_key,
families, family_id_to_default_lang_scr,
@@ -1459,7 +1459,7 @@
repo_info[repo_name] = message
for rname, v in sorted(repo_info.iteritems()):
- print '--%s--\n%s' % (rname, v)
+ print('--%s--\n%s' % (rname, v))
if errors:
raise Exception('Some repos are not clean\n' + '\n'.join(errors))
return repo_info
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_website_2_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_website_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_website_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/generate_website_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/generate_website_data.py 2019-07-09 15:56:13.455054257 +0200
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
+# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All rights reserved.
#
@@ -378,7 +378,7 @@
if sample_text is not None:
return sample_text
- raise ValueError, 'language=%s script=%s' % (language, script)
+ raise ValueError('language=%s script=%s' % (language, script))
def xml_to_dict(element):
@@ -419,7 +419,7 @@
name = '%s (%s script)' % (
english_language_name[lang],
english_script_name[script])
- print "Constructing name '%s' for %s." % (name, lang_scr)
+ print("Constructing name '%s' for %s." % (name, lang_scr))
return name
used_in_regions = collections.defaultdict(set)
@@ -586,7 +586,7 @@
try:
script = find_likely_script(language)
except KeyError:
- print "no likely script for %s" % language
+ print("no likely script for %s" % language)
continue
lang_object['name'] = get_english_language_name(lang_scr)
@@ -716,11 +716,11 @@
def compress(filepath, compress_function):
- print 'Compressing %s.' % filepath
+ print('Compressing %s.' % filepath)
oldsize = os.stat(filepath).st_size
compress_function(filepath)
newsize = os.stat(filepath).st_size
- print 'Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize)
+ print('Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize))
zip_contents_cache = {}
@@ -890,7 +890,7 @@
'weight': css_weight(font.weight),
})
if len(font_list) not in [1, 2, 4, 7]:
- print key, font_list
+ print(key, font_list)
assert len(font_list) in [1, 2, 4, 7]
family_object['fonts'] = font_list
@@ -921,8 +921,8 @@
(SIL_LICENSE_LOC, 'LICENSE_CJK.txt')]
tool_utils.generate_zip_with_7za_from_filepairs(pairs, zip_path)
newsize = os.stat(zip_path).st_size
- print "Wrote " + zip_path
- print 'Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize)
+ print("Wrote " + zip_path)
+ print('Compressed from {0:,}B to {1:,}B.'.format(oldsize, newsize))
shutil.copy2(path.join(CJK_DIR, 'NotoSansCJK.ttc.zip'),
path.join(pkg_dir, 'NotoSansCJK.ttc.zip'))
@@ -932,7 +932,7 @@
for family_key in data_object['family']:
family_obj = data_object['family'][family_key]
font_family_name = family_obj['name']
- print 'Generating images for %s...' % font_family_name
+ print('Generating images for %s...' % font_family_name)
is_cjk_family = (
family_key.endswith('-hans') or
family_key.endswith('-hant') or
@@ -954,7 +954,7 @@
image_location = path.join(image_dir, image_file_name)
if path.isfile(image_location):
# Don't rebuild images when continuing.
- print "Continue: assuming image file '%s' is valid." % image_location
+ print("Continue: assuming image file '%s' is valid." % image_location)
continue
create_image.create_png(
sample_text,
@@ -992,7 +992,7 @@
if not args.continuing:
if path.exists(OUTPUT_DIR):
assert path.isdir(OUTPUT_DIR)
- print 'Removing the old website directory...'
+ print('Removing the old website directory...')
shutil.rmtree(OUTPUT_DIR)
os.mkdir(OUTPUT_DIR)
os.mkdir(path.join(OUTPUT_DIR, 'pkgs'))
@@ -1004,25 +1004,25 @@
os.mkdir(path.join(OUTPUT_DIR, 'images', 'samples'))
os.mkdir(path.join(OUTPUT_DIR, 'js'))
- print 'Finding all fonts...'
+ print('Finding all fonts...')
find_fonts()
- print 'Parsing CLDR data...'
+ print('Parsing CLDR data...')
parse_english_labels()
parse_supplemental_data()
for target_platform in ['windows', 'linux', 'other']:
- print 'Target platform %s:' % target_platform
+ print('Target platform %s:' % target_platform)
output_object = {}
- print 'Generating data objects and CSS...'
+ print('Generating data objects and CSS...')
output_object['region'] = create_regions_object()
output_object['lang'] = create_langs_object()
output_object['family'], all_font_files = create_families_object(
target_platform)
- print 'Creating comprehensive zip file...'
+ print('Creating comprehensive zip file...')
output_object['pkg'] = create_package_object(
all_font_files, target_platform)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: generate_website_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/grab_adobe_download.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/grab_adobe_download.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/grab_adobe_download.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/grab_adobe_download.py 2019-07-09 15:51:00.605350977 +0200
@@ -62,7 +62,7 @@
def unzip_to_directory_tree(drop_dir, filepath):
skip_re = re.compile('.*/OTF-Fallback/.*')
zf = zipfile.ZipFile(filepath, 'r')
- print 'extracting files from %s to %s' % (filepath, drop_dir)
+ print('extracting files from %s to %s' % (filepath, drop_dir))
count = 0
for name in zf.namelist():
# skip names representing portions of the path
@@ -75,14 +75,14 @@
try:
data = zf.read(name)
except KeyError:
- print 'did not find %s in zipfile' % name
+ print('did not find %s in zipfile' % name)
continue
dst_file = os.path.join(drop_dir, os.path.basename(name))
with open(dst_file, 'wb') as f:
f.write(data)
count += 1
- print 'extracted \'%s\'' % name
- print 'extracted %d files' % count
+ print('extracted \'%s\'' % name)
+ print('extracted %d files' % count)
def main():
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: grab_adobe_download.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/grab_download.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/grab_download.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/grab_download.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/grab_download.py 2019-07-09 15:49:26.580341788 +0200
@@ -47,13 +47,13 @@
for f in files:
if not os.path.exists(f):
- print 'file \'%s\' does not exist, skipping' % f
+ print('file \'%s\' does not exist, skipping' % f)
continue
filename = os.path.basename(f)
result = name_date_re.match(filename)
if not result:
- print 'could not parse %s, skipping' % f
+ print('could not parse %s, skipping' % f)
continue
name = result.group(1)
@@ -64,7 +64,7 @@
zip_filename = os.path.join(zip_dir, filename)
if os.path.exists(drop_dir):
if os.path.exists(zip_filename):
- print 'already have a %s drop and zip for %s' % (src_vendor, filename)
+ print('already have a %s drop and zip for %s' % (src_vendor, filename))
continue
else:
# clean up, assume needs rebuild
@@ -72,14 +72,14 @@
else:
if os.path.exists(zip_filename):
if os.path.realpath(f) != os.path.realpath(zip_filename):
- print 'already have a zip file named %s for %s' % (zip_filename, f)
+ print('already have a zip file named %s for %s' % (zip_filename, f))
continue
os.mkdir(drop_dir)
extract_fn(drop_dir, f)
if not os.path.exists(zip_filename):
- print 'writing %s to %s' % (f, zip_filename)
+ print('writing %s to %s' % (f, zip_filename))
shutil.copy2(f, zip_filename)
@@ -94,7 +94,7 @@
continue
filelist.append(path)
if not filelist:
- print "no files in %s matched '%s'" % (src, namere)
+ print("no files in %s matched '%s'" % (src, namere))
return filelist
@@ -120,13 +120,13 @@
The dest directory must exist and should have 'zips' and 'drops' subdirs."""
if not src_vendor:
- print 'must define src_vendor'
+ print('must define src_vendor')
return
if not name_date_re:
- print 'must define name_date_re'
+ print('must define name_date_re')
return
if not extract_fn:
- print 'must define extract_fn'
+ print('must define extract_fn')
return
default_srcdir = default_params.get('default_srcdir')
@@ -146,12 +146,12 @@
args = parser.parse_args()
if not os.path.exists(args.dstdir):
- print '%s does not exists or is not a directory' % args.dstdir
+ print('%s does not exists or is not a directory' % args.dstdir)
return
if not args.srcs:
if not os.path.isdir(args.srcdir):
- print '%s does not exist or is not a directory' % args.srcdir
+ print('%s does not exist or is not a directory' % args.srcdir)
return
filelist = matching_files_in_dir(args.srcdir, args.name)
else:
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: grab_download.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/grab_mt_download.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/grab_mt_download.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/grab_mt_download.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/grab_mt_download.py 2019-07-09 15:48:39.012843036 +0200
@@ -62,14 +62,14 @@
os.mkdir(dstdir)
with open(os.path.join(dstdir, filename), 'wb') as f:
f.write(data)
- print 'extracted \'%s\' into %s' % (filename, subdir)
+ print('extracted \'%s\' into %s' % (filename, subdir))
def unzip_to_directory_tree(drop_dir, filepath):
hint_rx = re.compile(r'_((?:un)?hinted)/(.+)')
plain_rx = re.compile(r'[^/]+')
zf = zipfile.ZipFile(filepath, 'r')
- print 'extracting files from %s to %s' % (filepath, drop_dir)
+ print('extracting files from %s to %s' % (filepath, drop_dir))
count = 0
mapped_names = []
unmapped = []
@@ -81,7 +81,7 @@
try:
data = zf.read(name)
except KeyError:
- print 'did not find %s in zipfile' % name
+ print('did not find %s in zipfile' % name)
continue
result = hint_rx.search(name)
@@ -95,7 +95,7 @@
result = plain_rx.match(name)
if not result:
- print "subdir structure without hint/unhint: '%s'" % name
+ print("subdir structure without hint/unhint: '%s'" % name)
continue
# we have to figure out where it goes.
@@ -131,7 +131,7 @@
write_data_to_file(data, drop_dir, subdir, name)
count += 1
- print 'extracted %d files' % count
+ print('extracted %d files' % count)
def main():
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: grab_mt_download.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/lang_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/lang_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/lang_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/lang_data.py 2019-07-09 15:47:30.001570365 +0200
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
+# -*- coding: utf-8 -*-
#
# Copyright 2015 Google Inc. All rights reserved.
#
@@ -40,7 +40,7 @@
# controls printing of debug/trace information
# normally disabled
def _log(msg):
- # print >> sys.stderr, '#lang_data: ' + msg
+ # sys.stderr.write('#lang_data: ' + msg + '\n')
pass
def is_excluded_script(script_code):
@@ -290,34 +290,34 @@
def main():
lang_data = _get_lang_data()
- print
- print '--------'
+ print()
+ print('--------')
langs_without_scripts = _langs_with_no_scripts(lang_data)
if langs_without_scripts:
- print 'langs without scripts: ' + ', '.join(sorted(langs_without_scripts))
+ print('langs without scripts: ' + ', '.join(sorted(langs_without_scripts)))
_remove_keys_from_dict(langs_without_scripts, lang_data)
- print
+ print()
- print 'lang data'
+ print('lang data')
for k in sorted(lang_data):
used, unused = lang_data[k]
used_msg = 'used: ' + ', '.join(sorted(used)) if used else None
unused_msg = 'unused: ' + ', '.join(sorted(unused)) if unused else None
msg = '; '.join([m for m in (used_msg, unused_msg) if m])
- print k, msg
+ print(k, msg)
- print
- print 'lang_script to names'
+ print()
+ print('lang_script to names')
lang_script_to_names = _get_lang_script_to_names()
for t in sorted(lang_script_to_names.iteritems()):
- print '%s: %s' % t
+ print('%s: %s' % t)
- print
- print 'script to default lang'
+ print()
+ print('script to default lang')
script_to_default_lang = _get_script_to_default_lang()
for t in sorted(script_to_default_lang.iteritems()):
- print '%s: %s' % t
+ print('%s: %s' % t)
if __name__ == '__main__':
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: lang_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/lint_cmap_reqs.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/lint_cmap_reqs.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/lint_cmap_reqs.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/lint_cmap_reqs.py 2019-07-09 15:45:37.397757323 +0200
@@ -115,7 +115,7 @@
needed_chars |= set([0, 0xd, 0x20])
if verbose:
- print >> sys.stderr, script,
+ sys.stderr.write(script + '\n')
needed_chars &= unicode_data.defined_characters(version=unicode_version)
@@ -156,7 +156,7 @@
if script[0] < 'A' or script[0] > 'Z':
bad_scripts.append(script)
if bad_scripts:
- print 'bad scripts: %s' % ', '.join(bad_scripts)
+ print('bad scripts: %s' % ', '.join(bad_scripts))
raise ValueError('bad scripts')
return set(scripts)
@@ -211,10 +211,10 @@
if args.outfile:
if args.outfile == '-default-':
args.outfile = 'lint_cmap_%s.xml' % args.unicode_version
- print >> sys.stderr, 'writing %s' % args.outfile
+ sys.stderr.write('writing %s\n' % args.outfile)
cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
else:
- print cmap_data.write_cmap_data(cmapdata, pretty=True)
+ print(cmap_data.write_cmap_data(cmapdata, pretty=True))
if __name__ == "__main__":
main()
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: lint_cmap_reqs.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/lint_config.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/lint_config.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/lint_config.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/lint_config.py 2019-07-09 18:07:40.238943767 +0200
@@ -156,7 +156,7 @@
def __init__(self, accept_if_in, intset):
self.accept_if_in = accept_if_in
self.intset = intset
- # print 'IntSetFilter %s %s' % ('only' if accept_if_in else 'except', intset)
+ # print('IntSetFilter %s %s' % ('only' if accept_if_in else 'except', intset))
def accept(self, cp):
return self.accept_if_in == (cp in self.intset)
@@ -468,7 +468,7 @@
arg_type = m.group(4)
comment = m.group(5)
- while line_indent <= indent[0]:
+ while len(line_indent) <= indent[0]:
if indent[2]:
indent = indent[2]
else:
@@ -479,8 +479,8 @@
else:
tag = tag_part
tag_data[tag] = (relation, arg_type, comment)
- if line_indent > indent[0]:
- indent = (line_indent, tag, indent)
+ if len(line_indent) > indent[0]:
+ indent = (len(line_indent), tag, indent)
return tag_data
tag_data = _process_data(data)
@@ -764,11 +764,11 @@
args = parser.parse_args()
if not (args.tags or args.comments or args.filters or args.spec or args.parsefile):
- print 'nothing to do.'
+ print('nothing to do.')
return
if args.spec:
- print spec_format
+ print(spec_format)
return
for tag in sorted(TestSpec.tag_set):
@@ -780,14 +780,14 @@
filter = None
show_tag = args.tags or comment or filter
if show_tag:
- print tag
+ print(tag)
if filter:
- print ' ' + filter
+ print(' ' + filter)
if comment:
- print ' -- ' + comment
+ print(' -- ' + comment)
if args.parsefile:
- print parse_spec_file(args.parsefile)
+ print(parse_spec_file(args.parsefile))
if __name__ == '__main__':
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: lint_config.py.~1~
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: lint_config.py.~2~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/match_font_names.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/match_font_names.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/match_font_names.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/match_font_names.py 2019-07-09 15:42:45.067573875 +0200
@@ -30,7 +30,7 @@
def match_files(src_dir, names):
matched_files = set()
src_dir = tool_utils.resolve_path(src_dir)
- print '# root: %s' % src_dir
+ print('# root: %s' % src_dir)
name_re = _build_regex(names)
for root, dirs, files in os.walk(src_dir):
effective_root = root[len(src_dir)+1:]
@@ -44,7 +44,7 @@
if not names:
return
for n in names:
- print n
+ print(n)
def _collect_names(names):
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: match_font_names.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/missing_coverage.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/missing_coverage.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/missing_coverage.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/missing_coverage.py 2019-07-09 15:39:59.796316020 +0200
@@ -38,18 +38,18 @@
def show_cps_by_block(cps):
- print '%d missing codepoints' % len(cps)
+ print('%d missing codepoints' % len(cps))
block = None
for cp in sorted(cps):
new_block = unicode_data.block(cp)
if new_block != block:
- print '# %s' % new_block
+ print('# %s' % new_block)
block = new_block
- print '%5s %s' % ('%04x' % cp, unicode_data.name(cp))
+ print('%5s %s' % ('%04x' % cp, unicode_data.name(cp)))
def display_missing(cmap_file):
- print 'Checking data in %s' % cmap_file
+ print('Checking data in %s' % cmap_file)
filename = tool_utils.resolve_path(cmap_file)
cps = _covered_cps(filename)
defined_cps = unicode_data.defined_characters(version=9.0)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: missing_coverage.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/mti_cmap_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/mti_cmap_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/mti_cmap_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/mti_cmap_data.py 2019-07-09 15:38:35.605203492 +0200
@@ -94,7 +94,7 @@
if v[-1] == '*':
xdata[i].add(int(v[:-1], 16))
elif v[-1] == '+':
- print '> %s added %s' % (header[i], v[:-1])
+ print('> %s added %s' % (header[i], v[:-1]))
data[i].add(int(v[:-1], 16))
else:
data[i].add(int(v, 16))
@@ -131,10 +131,10 @@
def csv_to_xml(csv_file, xml_file, scripts, exclude_scripts):
cmapdata = cmap_data_from_csv_file(csv_file, scripts, exclude_scripts)
if xml_file:
- print >> sys.stderr, 'writing %s' % xml_file
+ sys.stderr.write('writing %s\n' % xml_file)
cmap_data.write_cmap_data_file(cmapdata, xml_file, pretty=True)
else:
- print cmap_data.write_cmap_data(cmapdata, pretty=True)
+ print(cmap_data.write_cmap_data(cmapdata, pretty=True))
def _script_to_name(script):
@@ -183,7 +183,7 @@
num_cells += num_cols # headers are not empty
all_cells = num_cols * max_lines
fmt = 'Columns: %d\nRows: %d\nNon-empty cells: %d\nCells: %d'
- print >> sys.stderr, fmt % (num_cols, max_lines, num_cells, all_cells)
+ sys.stderr.write(fmt % (num_cols, max_lines, num_cells, all_cells) + '\n')
cmap_lines = []
cmap_lines.append(','.join(col[0] for col in cols))
for i in range(1, max_lines):
@@ -198,7 +198,7 @@
with open(csv_file, 'w') as f:
f.write(csv_data)
else:
- print csv_data
+ print(csv_data)
def _check_scripts(scripts):
@@ -210,7 +210,7 @@
['CJK', 'EXCL', 'LGC', 'MONO', 'MUSIC', 'SYM2', 'Zsye'])
for s in scripts:
if s not in all_scripts:
- print >> sys.stderr, 'unknown script:', s
+ sys.stderr.write('unknown script:\n', s)
have_unknown = True
return not have_unknown
@@ -236,10 +236,10 @@
args = parser.parse_args()
if not _check_scripts(args.scripts):
- print >> sys.stderr, 'some scripts failed'
+ sys.stderr.write('some scripts failed\n')
return
if not _check_scripts(args.exclude_scripts):
- print >> sys.stderr, 'some exclude scripts failed'
+ sys.stderr.write('some exclude scripts failed\n')
return
if args.outfile == '-default-':
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: mti_cmap_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_cmap_reqs.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_cmap_reqs.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_cmap_reqs.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_cmap_reqs.py 2019-07-09 15:35:37.193084156 +0200
@@ -107,14 +107,14 @@
def _report(self, text):
if self._log_events:
- print text
+ print(text)
def _finish_block(self):
if self._block and self._log_events and not self._log_details:
for text in sorted(self._block_count):
- print '%s: %s' % (
+ print('%s: %s' % (
text, tool_utils.write_int_ranges(
- self._block_count[text]))
+ self._block_count[text])))
def _report_cp(self, cp, text, script):
if not self._log_events:
@@ -123,18 +123,18 @@
if cp_block != self._block:
self._finish_block()
self._block = cp_block
- print '# block: ' + self._block
+ print('# block: ' + self._block)
self._block_count = collections.defaultdict(set)
if self._log_details:
if not (
self._block in self._suppressed_blocks or
script in self._suppressed_scripts):
- print self._cp_info(cp), text
+ print(self._cp_info(cp), text)
else:
self._block_count[text].add(cp)
def _error(self, text):
- print >> sys.stderr, text
+ sys.stderr.write(text + '\n')
raise ValueError('failed')
def _verify_script_exists(self, script):
@@ -332,15 +332,15 @@
block_info = '%s %s' % (block, ', '.join('%s/%d' % t for t in info))
if block in assigned_primaries:
max_script = assigned_primaries[block]
- # print 'assigning primary', block_info, '->', max_script
+ # print('assigning primary', block_info, '->', max_script)
else:
- print >> sys.stderr, 'ERROR: no primary', block, block_info
+ sys.stderr.write('ERROR: no primary\n', block, block_info)
max_script = None
elif max_script == 'Zinh':
if block in inherited_primaries:
max_script = inherited_primaries[block]
else:
- print >> sys.stderr, 'ERROR: no inherited primary', block, block_info
+ sys.stderr.write('ERROR: no inherited primary\n', block, block_info)
max_script = None
block_to_script[block] = max_script
return block_to_script
@@ -390,9 +390,9 @@
for cp in cmap_ops.script_chars('Zinh'):
primary_script = _primary_script_for_block(unicode_data.block(cp))
if not primary_script:
- print >> sys.stderr, 'Error: no primary script for %04X' % cp
+ sys.stderr.write('Error: no primary script for %04X\n' % cp)
elif primary_script == 'Zinh':
- print >> sys.stderr, 'Error: primary script for %04X is Zinh' % cp
+ sys.stderr.write('Error: primary script for %04X is Zinh\n' % cp)
else:
cmap_ops.ensure_script(primary_script)
cmap_ops.add(cp, primary_script)
@@ -518,7 +518,7 @@
if block != last_block:
last_block = block
if block not in block_assignments:
- print >> sys.stderr, 'ERROR: no assignment for block %s' % block
+ sys.stderr.write('ERROR: no assignment for block %s\n' % block)
new_script = None
else:
new_script = block_assignments[block]
@@ -528,21 +528,21 @@
cmap_ops.remove(cp, 'Zyyy')
cmap_ops.add(cp, new_script)
else:
- print >> sys.stderr, ' could not assign %04x %s' % (
- cp, unicode_data.name(cp))
+ sys.stderr.write(' could not assign %04x %s\n' % (
+ cp, unicode_data.name(cp)))
if len(used_assignments) != len(block_assignments):
- print >> sys.stderr, 'ERROR: some block assignments unused'
+ sys.stderr.write('ERROR: some block assignments unused\n')
unused = set([block for block in block_assignments
if block not in used_assignments])
for block in unicode_data.block_names():
if block in unused:
- print >> sys.stderr, ' %s' % block
+ sys.stderr.write(' %s\n' % block)
unused.remove(block)
if unused:
- print >> sys.stderr, 'ERROR: unknown block names'
+ sys.stderr.write('ERROR: unknown block names\n')
for block in sorted(unused):
- print >> sys.stderr, ' %s' % block
+ sys.stderr.write(' %s\n' % block)
cmap_ops.delete_script('Zyyy')
@@ -595,8 +595,8 @@
if not unicode_data.is_defined(cp):
continue
if cp not in char_to_scripts and to_script != 'EXCL':
- print >> sys.stderr, 'reassign missing %04X %s' % (
- cp, unicode_data.name(cp, '<unnamed>'))
+ sys.stderr.write('reassign missing %04X %s\n' % (
+ cp, unicode_data.name(cp, '<unnamed>')))
continue
if all_scripts:
from_list = char_to_scripts[cp]
@@ -917,15 +917,15 @@
name = unicode_data.name(cp, '<unnamed">')
if cp not in chars:
if block == None:
- print "'%s': tool_utils.parse_int_ranges(\"\"\"" % script
+ print("'%s': tool_utils.parse_int_ranges(\"\"\"" % script)
cp_block = unicode_data.block(cp)
if cp_block != block:
block = cp_block
- print ' # %s' % block
- print ' %04X # %s' % (cp, name)
+ print(' # %s' % block)
+ print(' %04X # %s' % (cp, name))
chars.add(cp)
if block != None:
- print ' """),'
+ print(' """),')
# maintained using 'regen_script_required' fn
_SCRIPT_REQUIRED = [
@@ -2642,9 +2642,9 @@
except KeyError:
pass
script_name = script_name.replace(unichr(0x2019), "'")
- print ' # %s - %s' % (script, script_name)
+ print(' # %s - %s' % (script, script_name))
if script in script_to_comment_and_data:
- print " ('%s'," % script
+ print(" ('%s'," % script)
lines = []
comment, data = script_to_comment_and_data[script]
lines.append(' # Comment')
@@ -2665,8 +2665,8 @@
cp_name = unicode_data.name(cp, '<unnamed>')
lines.append('%04X # %s' % (cp, cp_name))
lines.append('"""),')
- print '\n '.join(lines)
- print
+ print('\n '.join(lines))
+ print()
def _assign_script_required(cmap_ops):
@@ -2760,11 +2760,11 @@
# ignore plane 2 and above
not_in_legacy -= set(range(0x20000, 0x120000))
if not_in_legacy:
- print 'not in legacy (%d):' % len(not_in_legacy)
+ print('not in legacy (%d):' % len(not_in_legacy))
compare_cmap_data._print_detailed(not_in_legacy)
not_in_new = legacy_cjk_chars - cjk_chars
if not_in_new:
- print 'not in new (%d):' % len(not_in_new)
+ print('not in new (%d):' % len(not_in_new))
compare_cmap_data._print_detailed(not_in_new)
@@ -2997,13 +2997,13 @@
try:
cps = tool_utils.parse_int_ranges(ranges)
except Exception as err:
- print >> sys.stderr, err
- print >> sys.stderr, cols[2]
- print >> sys.stderr, 'problem on %d "%s"' % (lineix, line)
+ sys.stderr.write(err + '\n')
+ sys.stderr.write(cols[2] + '\n')
+ sys.stderr.write('problem on %d "%s"\n' % (lineix, line))
raise err
if len(cps) > 50:
- print >> sys.stderr, 'large range (%d) on %d "%s"' % (
- len(cps), lineix, line)
+ sys.stderr.write('large range (%d) on %d "%s"\n' % (
+ len(cps), lineix, line))
cmap_ops.log('group: %s (%d)' % (name, len(cps)))
if add:
@@ -3269,7 +3269,7 @@
else:
xcmap = None # not a tuple, so probably no fallback data
else:
- print >> sys.stderr, 'no script %s found in %s' % (script, srcfile)
+ sys.stderr.write('no script %s found in %s\n' % (script, srcfile))
merged_cmap[script] = (cmap, xcmap)
return merged_cmap
@@ -3285,10 +3285,10 @@
for block in unicode_data.block_names():
block_range = unicode_data.block_range(block)
primary_script = _primary_script_for_block(block)
- print '%13s %6s %s' % (
+ print('%13s %6s %s\n' % (
'%04X-%04X' % block_range,
'\'%s\'' % primary_script if primary_script else '------',
- block)
+ block))
def main():
@@ -3323,9 +3323,9 @@
cmapdata = _get_cmap_data(script_to_chars, metadata)
if args.outfile:
cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
- print 'wrote %s' % args.outfile
+ print('wrote %s' % args.outfile)
else:
- print cmap_data.write_cmap_data(cmapdata, pretty=True)
+ print(cmap_data.write_cmap_data(cmapdata, pretty=True))
if __name__ == "__main__":
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_data.py 2019-07-10 07:26:49.014607637 +0200
@@ -205,7 +205,7 @@
return _char_set('0041..005a,0061..007a')
def char_range(start, end):
- return range(start, end+1)
+ return list(range(start, end+1))
COPTIC_EPACT = char_range(0x102E0, 0x102FB)
ARABIC_MATH = char_range(0x1EE00, 0x1EEF1)
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: noto_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_font_cmaps.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_font_cmaps.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_font_cmaps.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_font_cmaps.py 2019-07-09 14:54:31.436400443 +0200
@@ -143,7 +143,7 @@
if args.outfile:
cmap_data.write_cmap_data_file(cmapdata, args.outfile, pretty=True)
else:
- print cmap_data.write_cmap_data(cmapdata, pretty=True)
+ print(cmap_data.write_cmap_data(cmapdata, pretty=True))
if __name__ == "__main__":
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_font_coverage.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_font_coverage.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_font_coverage.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_font_coverage.py 2019-07-09 15:25:48.281314544 +0200
@@ -25,7 +25,7 @@
"""Write the names of the families in sorted order."""
family_names = [family.name for family in families.itervalues()]
for name in sorted(family_names):
- print name
+ print(name)
def check_cp(families, cp):
@@ -87,17 +87,17 @@
out_family_str = '\n '.join(sorted(out_families))
else:
out_family_str = '<no coverage>'
- print '%s:\n %s' % (to_ranges_str(out_cps), out_family_str)
+ print('%s:\n %s' % (to_ranges_str(out_cps), out_family_str))
cps = codepoints(args.each)
- print 'families that contain any of %s, by cp' % to_ranges_str(cps)
+ print('families that contain any of %s, by cp' % to_ranges_str(cps))
for family in families.itervalues():
family_cps = family.charset & cps
for cp in family_cps:
cp_to_families[cp].add(family.name)
if not cp_to_families:
- print 'no family supports any codepoint'
+ print('no family supports any codepoint')
else:
cp_list = sorted(cps)
cp = cp_list[0]
@@ -117,7 +117,7 @@
missing = set()
result = {}
cps = sorted(codepoints(args.any))
- print 'families that contain any of %s' % to_ranges_str(cps)
+ print('families that contain any of %s' % to_ranges_str(cps))
for cp in cps:
family_names = check_cp(families, cp)
if family_names:
@@ -130,21 +130,21 @@
missing.add(cp)
if result:
for k, v in sorted(result.iteritems()):
- print ' %s: %s' % (k, to_ranges_str(v))
+ print(' %s: %s' % (k, to_ranges_str(v)))
if missing:
- print ' not supported: %s' % to_ranges_str(missing)
+ print(' not supported: %s' % to_ranges_str(missing))
if args.all:
cps = sorted(codepoints(args.all))
- print 'families that contain all of %s' % to_ranges_str(cps)
+ print('families that contain all of %s' % to_ranges_str(cps))
result = set([family.name for family in families.itervalues()])
for cp in cps:
family_names = check_cp(families, cp)
result &= family_names
if result:
- print '\n'.join([' %s' % name for name in sorted(result)])
+ print('\n'.join([' %s' % name for name in sorted(result)]))
else:
- print 'no family contains all the codepoints'
+ print('no family contains all the codepoints')
def main():
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_fonts.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_fonts.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/noto_fonts.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/noto_fonts.py 2019-07-09 15:24:04.415417171 +0200
@@ -449,7 +449,7 @@
of these. 'paths' defaults to the standard noto font paths, using notoconfig."""
font_dirs = filter(None, [tool_utils.resolve_path(p) for p in paths])
- print 'Getting fonts from: %s' % font_dirs
+ print('Getting fonts from: %s' % font_dirs)
all_fonts = []
for font_dir in font_dirs:
@@ -460,8 +460,8 @@
filepath = path.join(font_dir, filename)
font = get_noto_font(filepath)
if not font:
- print >> sys.stderr, 'bad font filename in %s: \'%s\'.' % (
- (font_dir, filename))
+ sys.stderr.write('bad font filename in %s: \'%s\'.\n' % (
+ (font_dir, filename)))
continue
all_fonts.append(font)
@@ -577,8 +577,8 @@
familyname = fontname if ix == -1 else fontname[:ix]
wws_key = noto_font_to_wws_family_id(font)
if wws_key_to_family_name.get(wws_key, familyname) != familyname:
- print '!!! mismatching font names for key %s: %s and %s' % (
- wws_key, wws_key_to_family_name[wws_key], familyname)
+ print('!!! mismatching font names for key %s: %s and %s' % (
+ wws_key, wws_key_to_family_name[wws_key], familyname))
else:
wws_key_to_family_name[wws_key] = familyname
return wws_key_to_family_name
@@ -588,7 +588,7 @@
"""test name generation to make sure we match the font name from the wws id"""
wws_key_to_family_name = _all_noto_font_key_to_names(paths)
for key, val in sorted(wws_key_to_family_name.items()):
- print key, val
+ print(key, val)
name = ''.join(wws_family_id_to_name_parts(key))
if name != val:
raise Exception('!!! generated name %s does not match' % name)
@@ -617,9 +617,9 @@
args.dirs = NOTO_FONT_PATHS
fonts = get_noto_fonts(paths=args.dirs)
for font in fonts:
- print font.filepath
+ print(font.filepath)
for attr in font._fields:
- print ' %15s: %s' % (attr, getattr(font, attr))
+ print(' %15s: %s' % (attr, getattr(font, attr)))
if __name__ == "__main__":
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/sample_with_font.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/sample_with_font.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/sample_with_font.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/sample_with_font.py 2019-07-09 15:23:00.221098659 +0200
@@ -42,7 +42,7 @@
def _build_text(name_map, initial_text=''):
text = initial_text
- print 'build text using map of length %d' % len(name_map)
+ print('build text using map of length %d' % len(name_map))
while True:
line = raw_input('> ')
if not line:
@@ -53,12 +53,12 @@
_help()
continue
if line == 'names':
- print 'names:\n ' + '\n '.join(sorted(name_map.keys()))
+ print('names:\n ' + '\n '.join(sorted(name_map.keys())))
continue
if line == 'dump':
- print 'dump: \'%s\'' % text
+ print('dump: \'%s\'' % text)
for cp in text:
- print '%06x %s' % (ord(cp), unicode_data.name(ord(cp)))
+ print('%06x %s' % (ord(cp), unicode_data.name(ord(cp))))
continue
if line == 'clear':
text = ''
@@ -74,17 +74,17 @@
if line in name:
matches.append(name)
if not matches:
- print 'no match for "%s"'% line
+ print('no match for "%s"'% line)
continue
if len(matches) == 1:
- print matches[0]
+ print(matches[0])
text += unichr(name_map[matches[0]])
continue
# if we match a full line, then use that
if line in matches:
- print line
+ print(line)
text += unichr(name_map[line])
continue
@@ -95,14 +95,14 @@
# if we match a full word, and only one line has this full word, use that
if len(new_matches) == 1:
- print new_matches[0]
+ print(new_matches[0])
text += unichr(name_map[new_matches[0]])
continue
select_multiple = True
while select_multiple:
- print 'multiple matches:\n ' + '\n '.join(
- '[%2d] %s' % (i, n) for i, n in enumerate(matches))
+ print('multiple matches:\n ' + '\n '.join(
+ '[%2d] %s' % (i, n) for i, n in enumerate(matches)))
while True:
line = raw_input('0-%d or q to skip> ' % (len(matches) - 1))
if line == 'q':
@@ -118,13 +118,13 @@
break
if n < 0 or n >= len(matches):
- print '%d out of range' % n
+ print('%d out of range' % n)
continue
text += unichr(name_map[matches[n]])
select_multiple = False
- print 'done.'
+ print('done.')
return text
@@ -148,7 +148,7 @@
def _write_text(filename, text):
with codecs.open(filename, 'w', 'utf-8') as f:
f.write(text)
- print 'wrote %s' % filename
+ print('wrote %s' % filename)
def main():
@@ -175,7 +175,7 @@
charset = coverage.character_set(args.font)
name_map = _get_char_names(charset)
text = _build_text(name_map, text)
- print 'text: ' + text
+ print('text: ' + text)
else:
charset = None
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/spreadsheet.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/spreadsheet.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/spreadsheet.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/spreadsheet.py 2019-07-09 15:21:57.239767278 +0200
@@ -54,7 +54,7 @@
if not m:
m = re.match(r'Noto (Sans) (Myanmar) (UI)(.*)', font)
if not m:
- print 'could not parse Myanmar exception: "%s"' % font
+ print('could not parse Myanmar exception: "%s"' % font)
continue
style, script, ui, weight = m.groups()
@@ -82,15 +82,15 @@
script = 'Cuneiform'
fontname = ''.join(['Noto', style, script, ui, '-', weight, '.', ext])
- # print '%s:\n--> %s\n--> %s' % (
- # font, str((style, script, ui, weight)), fontname)
+ # print('%s:\n--> %s\n--> %s' % (
+ # font, str((style, script, ui, weight)), fontname))
if not hinting in [
'hinted',
'hinted (CFF)',
'unhinted']:
- print 'unrecognized hinting value \'%s\' on line %d (%s)' % (
- hinting, index, fontname)
+ print('unrecognized hinting value \'%s\' on line %d (%s)' % (
+ hinting, index, fontname))
continue
hinted = 'hinted' if hinting in ['hinted', 'hinted (CFF)'] else 'unhinted'
@@ -103,8 +103,8 @@
'Design approved',
'Design re-approved',
'Released']:
- print 'unrecognized status value \'%s\' on line %d (%s)' % (
- status, index, fontname)
+ print('unrecognized status value \'%s\' on line %d (%s)' % (
+ status, index, fontname))
continue
expect_font = status in [
@@ -129,11 +129,11 @@
spreadsheet_extra = spreadsheet_filenames - noto_filenames
spreadsheet_missing = noto_filenames - spreadsheet_filenames
if spreadsheet_extra:
- print 'spreadsheet extra:\n ' + '\n '.join(
- sorted(spreadsheet_extra))
+ print('spreadsheet extra:\n ' + '\n '.join(
+ sorted(spreadsheet_extra)))
if spreadsheet_missing:
- print 'spreadsheet missing:\n ' + '\n '.join(
- sorted(spreadsheet_missing))
+ print('spreadsheet missing:\n ' + '\n '.join(
+ sorted(spreadsheet_missing)))
spreadsheet_match = spreadsheet_filenames & noto_filenames
for filename in sorted(spreadsheet_match):
@@ -144,10 +144,10 @@
approved_version = data[4]
if approved_version:
warn = '!!!' if approved_version != font_version else ''
- print '%s%s version: %s approved: %s' % (
- warn, filename, font_version, approved_version)
+ print('%s%s version: %s approved: %s' % (
+ warn, filename, font_version, approved_version))
else:
- print '%s version: %s' % (filename, font_version)
+ print('%s version: %s' % (filename, font_version))
def main():
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/summary.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/summary.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/summary.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/summary.py 2019-07-09 15:20:31.022682550 +0200
@@ -112,7 +112,7 @@
line = [to_str(idx, val) for idx, val in enumerate(tup)
if not (short and (idx == 3 or idx == 6 or idx == 7))]
- print '\t'.join(line)
+ print('\t'.join(line))
def print_summary(summary_list, short):
labels = ('path', 'version', 'name', 'size', 'num_glyphs', 'num_chars', 'cmap', 'table_info')
@@ -130,10 +130,10 @@
args = parser.parse_args()
if not os.path.isdir(args.root):
- print '%s does not exist or is not a directory' % args.root
+ print('%s does not exist or is not a directory' % args.root)
else:
root = os.path.abspath(args.root)
- print "root: %s, name: %s" % (root, args.name if args.name else '[all]')
+ print("root: %s, name: %s" % (root, args.name if args.name else '[all]'))
print_summary(summarize(root, name=args.name), args.short)
if __name__ == "__main__":
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/swat_license.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/swat_license.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/swat_license.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/swat_license.py 2019-07-09 15:16:14.937401163 +0200
@@ -133,15 +133,15 @@
version = names[_VERSION_ID]
m = re.match(r'Version (\d{1,5})\.(\d{1,5})( uh)?(;.*)?', version)
if not m:
- print '! Could not match version string (%s)' % version
+ print('! Could not match version string (%s)' % version)
return None, None
major_version = m.group(1)
minor_version = m.group(2)
- print 'old version: "%s"' % version
+ print('old version: "%s"' % version)
if is_hinted == None:
is_hinted = not bool(m.group(3))
- print 'computed hinted = %s' % is_hinted
+ print('computed hinted = %s' % is_hinted)
version_remainder = m.group(4)
accuracy = len(minor_version)
@@ -155,8 +155,8 @@
# bump the minor version keeping significant digits:
new_minor_version = str(int(minor_version) + 1).zfill(accuracy)
new_revision = major_version + '.' + new_minor_version
- print 'Update revision from \'%s\' to \'%s\'' % (
- expected_revision, new_revision)
+ print('Update revision from \'%s\' to \'%s\'' % (
+ expected_revision, new_revision))
# double check we are going to properly round-trip this value
float_revision = float(new_revision)
fixed_revision = misc.fixedTools.floatToFixed(float_revision, 16)
@@ -184,10 +184,10 @@
filepath = noto_font.filepath
basename = path.basename(filepath)
if noto_font.is_cjk:
- print '# Skipping cjk font %s' % basename
+ print('# Skipping cjk font %s' % basename)
return
if noto_font.fmt == 'ttc':
- print '# Deferring ttc font %s' % basename
+ print('# Deferring ttc font %s' % basename)
_ttc_fonts[noto_font] = ttc_utils.ttcfile_filenames(filepath)
return
@@ -200,7 +200,7 @@
if not rel_filepath:
raise ValueError('Could not identify noto root of %s' % filepath)
- print '-----\nUpdating %s' % rel_filepath
+ print('-----\nUpdating %s' % rel_filepath)
dst_file = path.join(dst_root, rel_filepath)
@@ -208,10 +208,10 @@
new_revision, new_version_string = get_bumped_version(
ttfont, noto_font.is_hinted)
except ValueError as e:
- print e
+ print(e)
return
- print '%s: %s' % ('Would write' if dry_run else 'Writing', dst_file)
+ print('%s: %s' % ('Would write' if dry_run else 'Writing', dst_file))
new_trademark = "%s is a trademark of Google Inc." % noto_font.family
@@ -286,7 +286,7 @@
label = _NAME_ID_LABELS[name_id]
oldText = '\'%s\'' % old if old else 'None'
newText = newText or ('\'%s\'' % new)
- print '%s:\n old: %s\n new: %s' % (label, oldText, newText or new)
+ print('%s:\n old: %s\n new: %s' % (label, oldText, newText or new))
label_change = _changes.get(label)
if not label_change:
@@ -342,7 +342,7 @@
if not path.isdir(dst_dir):
os.makedirs(dst_dir)
ttfont.save(dst_file)
- print 'Wrote file.'
+ print('Wrote file.')
def _construct_ttc_fonts(fonts, dst_root, dry_run):
@@ -364,7 +364,7 @@
for ttcfont, components in sorted(_ttc_fonts.iteritems()):
rel_filepath = _noto_relative_path(ttcfont.filepath)
- print '-----\nBuilding %s' % rel_filepath
+ print('-----\nBuilding %s' % rel_filepath)
component_list = []
# note the component order must match the original ttc, so
@@ -372,8 +372,8 @@
for component in components:
possible_components = basename_to_fonts.get(component)
if not possible_components:
- print '! no match for component named %s in %s' % (
- component, rel_path)
+ print('! no match for component named %s in %s' % (
+ component, rel_path))
component_list = []
break
@@ -381,23 +381,23 @@
for possible_component in possible_components:
if possible_component.is_hinted == ttcfont.is_hinted:
if matched_possible_component:
- print '! already matched possible component %s for %s' % (
+ print('! already matched possible component %s for %s' % (
matched_possible_component.filename,
- possible_component_filename)
+ possible_component_filename))
matched_possible_component = None
break
matched_possible_component = possible_component
if not matched_possible_component:
- print 'no matched component named %s' % component
+ print('no matched component named %s' % component)
component_list = []
break
component_list.append(matched_possible_component)
if not component_list:
- print '! cannot generate ttc font %s' % rel_path
+ print('! cannot generate ttc font %s' % rel_path)
continue
- print 'components:\n ' + '\n '.join(
- _noto_relative_path(font.filepath) for font in component_list)
+ print('components:\n ' + '\n '.join(
+ _noto_relative_path(font.filepath) for font in component_list))
if dry_run:
continue
@@ -405,7 +405,7 @@
src_files = [path.join(dst_root, _noto_relative_path(font.filepath))
for font in component_list]
ttc_utils.build_ttc(dst_ttc, src_files)
- print 'Built %s' % dst_ttc
+ print('Built %s' % dst_ttc)
def main():
@@ -421,33 +421,33 @@
_swat_fonts(args.dst_root, args.dry_run)
- print '------\nchange summary\n'
+ print('------\nchange summary\n')
for name_key in sorted(_changes):
- print '%s:' % name_key
+ print('%s:' % name_key)
new_vals = _changes[name_key]
for new_val in sorted(new_vals):
- print ' change to \'%s\':' % new_val
+ print(' change to \'%s\':' % new_val)
old_vals = new_vals[new_val]
for old_val in sorted(old_vals):
- print ' from %s (%d files)%s' % (
+ print(' from %s (%d files)%s' % (
'\'%s\'' % old_val if old_val else 'None',
- len(old_vals[old_val]), ':' if args.details else '')
+ len(old_vals[old_val]), ':' if args.details else ''))
if args.details:
for file_name in sorted(old_vals[old_val]):
x = file_name.rfind('/')
if x > 0:
x = file_name.rfind('/', 0, x)
- print ' ' + file_name[x:]
+ print(' ' + file_name[x:])
- print '------\nautofix summary\n'
+ print('------\nautofix summary\n')
for fix_key in sorted(_autofix):
fixed_files = _autofix[fix_key]
- print '%s (%d):' % (fix_key, len(fixed_files))
+ print('%s (%d):' % (fix_key, len(fixed_files)))
for file_name in sorted(fixed_files):
x = file_name.rfind('/')
if x > 0:
x = file_name.rfind('/', 0, x)
- print ' ' + file_name[x:]
+ print(' ' + file_name[x:])
if __name__ == "__main__":
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/sync_repos.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/sync_repos.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/sync_repos.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/sync_repos.py 2019-07-09 15:13:08.125387956 +0200
@@ -37,8 +37,8 @@
errors.append(r)
if errors:
- print >> sys.stderr, '%s %s not clean' % (
- ' '.join(errors), 'is' if len(errors) == 1 else 'are')
+ sys.stderr.write('%s %s not clean\n' % (
+ ' '.join(errors), 'is' if len(errors) == 1 else 'are'))
return False
return True
@@ -54,7 +54,7 @@
for p in _REPO_PATHS:
tool_utils.git_checkout(p, 'master')
else:
- print 'would have checked out master in %s' % (', '.join(_REPOS))
+ print('would have checked out master in %s' % (', '.join(_REPOS)))
return True
@@ -84,7 +84,7 @@
failed_tags.append('%s: %s' % (r, t))
if failed_tags:
- print >> sys.stderr, 'failed to find:\n %s' % '\n '.join(failed_tags)
+ sys.stderr.write('failed to find:\n %s' % '\n \n'.join(failed_tags))
return False
if not dry_run:
@@ -92,9 +92,9 @@
tool_utils.git_checkout(p, t)
if verbose or dry_run:
- print '%schecked out:\n %s' % (
+ print('%schecked out:\n %s' % (
'would have ' if dry_run else '',
- '\n '.join('%s: %s' % (r, t) for r, t in zip(_REPOS, resolved_tags)))
+ '\n '.join('%s: %s' % (r, t) for r, t in zip(_REPOS, resolved_tags))))
return True
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/test_vertical_extents.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/test_vertical_extents.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/test_vertical_extents.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/test_vertical_extents.py 2019-07-09 14:52:32.871687740 +0200
@@ -159,7 +159,7 @@
sys.stdin, font_file_name, ymin, ymax, language)
for line_bounds, text_piece in exceeding_lines:
- print text_piece.encode('UTF-8'), line_bounds
+ print(text_piece.encode('UTF-8'), line_bounds)
# print test_all_combinations(3, font_file_name, ymin, ymax)
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/tool_utils.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/tool_utils.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/tool_utils.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/tool_utils.py 2019-07-09 15:10:57.975786710 +0200
@@ -127,7 +127,7 @@
if not os.path.isdir(path):
if os.path.exists(path):
raise ValueError('%s exists and is not a directory' % path)
- print "making '%s'" % path
+ print("making '%s'" % path)
os.makedirs(path)
elif clean:
shutil.rmtree(path)
@@ -195,7 +195,7 @@
result = subprocess.check_output(
['git', 'checkout', branch_or_tag], stderr=subprocess.STDOUT)
if verbose:
- print '%s:\n%s\n-----' % (repo, result)
+ print('%s:\n%s\n-----' % (repo, result))
def git_mv(repo, old, new):
@@ -270,13 +270,13 @@
protected_files.append(f)
if files_not_under_version_control:
- print >> sys.stderr, '%d files were not under version control:\n %s' % (
+ sys.stderr.write('%d files were not under version control:\n %s\n' % (
len(files_not_under_version_control),
- ', '.join(files_not_under_version_control))
+ ', '.join(files_not_under_version_control)))
if protected_files:
- print >> sys.stderr, '%d files protected:\n %s' % (
- len(protected_files), ', '.join(protected_files))
+ sys.stderr.write('%d files protected:\n %s\n' % (
+ len(protected_files), ', '.join(protected_files)))
return tool_generated_files
@@ -298,10 +298,10 @@
def dumplines(msg, text, limit):
if text:
lines = text.splitlines()
- print '%s (%d lines):\n %s' % (
- msg, len(lines), '\n '.join(lines[:limit]))
+ print('%s (%d lines):\n %s' % (
+ msg, len(lines), '\n '.join(lines[:limit])))
if len(lines) > limit:
- print ' ...'
+ print(' ...')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
@@ -315,7 +315,7 @@
if subprocess.call(
['git', 'diff-files', '--quiet', '--ignore-submodules', '--']):
if (print_errors):
- print 'There are unstaged changes:'
+ print('There are unstaged changes:')
capture_and_show_errors(
['git', 'diff-files', '--name-status', '-r', '--ignore-submodules',
'--'])
@@ -324,7 +324,7 @@
['git', 'diff-index', '--cached', '--quiet', 'HEAD',
'--ignore-submodules', '--']):
if (print_errors):
- print 'There are uncommitted changes:'
+ print('There are uncommitted changes:')
capture_and_show_errors(
['git', 'diff-index', '--cached', '--name-status', '-r', 'HEAD',
'--ignore-submodules', '--'])
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/ttc_utils.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/ttc_utils.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/ttc_utils.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/ttc_utils.py 2019-07-09 15:08:27.247406673 +0200
@@ -125,17 +125,17 @@
table_map = {}
for font_index, font_entry in enumerate(ttc.fonts):
- print '[%2d] %s' % (font_index, names[font_index])
+ print('[%2d] %s' % (font_index, names[font_index]))
for table_index, table_entry in enumerate(font_entry.tables):
table = ttc.tables[table_entry]
if table_entry not in table_map:
table_map[table_entry] = (font_index, table_index)
- print ' [%2d] %s %8d %8d' % (
- table_index, table.tag, table.offset, table.length)
+ print(' [%2d] %s %8d %8d' % (
+ table_index, table.tag, table.offset, table.length))
else:
table_from = table_map[table_entry]
- print ' [%2d] %s @%d.%d' % (
- table_index, table.tag, table_from[0], table_from[1])
+ print(' [%2d] %s @%d.%d' % (
+ table_index, table.tag, table_from[0], table_from[1]))
def ttcfile_filenames(ttcfile):
@@ -289,7 +289,7 @@
if args.op == 'dump':
ttcfile_dump(args.ttcfile)
elif args.op == 'names':
- print '\n'.join(ttcfile_filenames(args.ttcfile))
+ print('\n'.join(ttcfile_filenames(args.ttcfile)))
elif args.op == 'extract':
ttcfile_extract_and_write_namesfile(args.ttcfile, args.filedir)
elif args.op=='build':
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/unicode_data.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/unicode_data.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/unicode_data.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/unicode_data.py 2019-07-09 17:58:19.876725080 +0200
@@ -148,7 +148,7 @@
def _char_to_int(char):
"""Converts a potential character to its scalar value."""
- if type(char) in [str, unicode]:
+ if type(char) in [str, type(u'')]:
return ord(char)
else:
return char
@@ -249,7 +249,7 @@
"""Returns a frozenset of the cps in the named block."""
load_data()
first, last = _block_range[block]
- return frozenset(xrange(first, last + 1))
+ return frozenset(range(first, last + 1))
def block_names():
@@ -417,7 +417,7 @@
_HARD_CODED_FOLDED_SCRIPT_NAME_TO_CODE = {
_folded_script_name(name): code for code, name in
- _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES.iteritems()
+ _HARD_CODED_HUMAN_READABLE_SCRIPT_NAMES.items()
}
def human_readable_script_name(code):
@@ -572,7 +572,7 @@
elif char_name.endswith("Last>"):
# Ignore surrogates
if "Surrogate" not in char_name:
- for char in xrange(last_range_opener, code+1):
+ for char in range(last_range_opener, code+1):
_general_category_data[char] = general_category
_combining_class_data[char] = combining_class
if bidi_mirroring:
@@ -599,7 +599,7 @@
for first, last, script_name in script_ranges:
folded_script_name = _folded_script_name(script_name)
script = _folded_script_name_to_code[folded_script_name]
- for char_code in xrange(first, last+1):
+ for char_code in range(first, last+1):
_script_data[char_code] = script
@@ -610,7 +610,7 @@
for first, last, script_names in script_extensions_ranges:
script_set = frozenset(script_names.split(' '))
- for character_code in xrange(first, last+1):
+ for character_code in range(first, last+1):
_script_extensions_data[character_code] = script_set
@@ -622,7 +622,7 @@
for first, last, block_name in block_ranges:
_block_names.append(block_name)
_block_range[block_name] = (first, last)
- for character_code in xrange(first, last + 1):
+ for character_code in range(first, last + 1):
_block_data[character_code] = block_name
@@ -632,7 +632,7 @@
age_ranges = _parse_code_ranges(derived_age_txt.read())
for first, last, char_age in age_ranges:
- for char_code in xrange(first, last+1):
+ for char_code in range(first, last+1):
_age_data[char_code] = char_age
@@ -642,7 +642,7 @@
dcp_ranges = _parse_code_ranges(dcp_txt.read())
for first, last, property_name in dcp_ranges:
- for character_code in xrange(first, last+1):
+ for character_code in range(first, last+1):
try:
_core_properties_data[property_name].add(character_code)
except KeyError:
@@ -680,13 +680,13 @@
with open_unicode_data_file("IndicPositionalCategory.txt") as inpc_txt:
positional_ranges = _parse_code_ranges(inpc_txt.read())
for first, last, char_position in positional_ranges:
- for char_code in xrange(first, last+1):
+ for char_code in range(first, last+1):
_indic_positional_data[char_code] = char_position
with open_unicode_data_file("IndicSyllabicCategory.txt") as insc_txt:
syllabic_ranges = _parse_code_ranges(insc_txt.read())
for first, last, char_syllabic_category in syllabic_ranges:
- for char_code in xrange(first, last+1):
+ for char_code in range(first, last+1):
_indic_syllabic_data[char_code] = char_syllabic_category
@@ -784,7 +784,7 @@
continue
# discourage lots of redundant copies of seq_type
- seq_type = intern(m.group(3).strip().encode('ascii'))
+ seq_type = m.group(3).strip().encode('ascii')
seq = tuple(int(s, 16) for s in m.group(1).split())
name = m.group(4).strip()
age = float(m.group(5))
@@ -1003,7 +1003,7 @@
# maps from sequence to (name, age, type), we only need the name
seq_data = _read_emoji_data(_LEGACY_ANDROID_SEQUENCES.splitlines())
- seq_to_name = {k: v[0] for k, v in seq_data.iteritems()}
+ seq_to_name = {k: v[0] for k, v in seq_data.items()}
return _get_order_patch(_LEGACY_ANDROID_ORDER, seq_to_name)
@@ -1052,8 +1052,8 @@
group_list.extend(_read_emoji_test_data(_SUPPLEMENTAL_EMOJI_GROUP_DATA))
for i, (seq, group, subgroup, name) in enumerate(group_list):
if seq in _emoji_group_data:
- print 'seq %s alredy in group data as %s' % (seq_to_string(seq), _emoji_group_data[seq])
- print ' new value would be %s' % str((i, group, subgroup, name))
+ print('seq %s alredy in group data as %s' % (seq_to_string(seq), _emoji_group_data[seq]))
+ print(' new value would be %s' % str((i, group, subgroup, name)))
_emoji_group_data[seq] = (i, group, subgroup, name)
assert len(group_list) == len(_emoji_group_data)
@@ -1100,7 +1100,7 @@
exist, and an empty list if subgroup does not exist in group."""
_load_emoji_group_data()
result = None
- for seq, (index, g, sg, _) in _emoji_group_data.iteritems():
+ for seq, (index, g, sg, _) in _emoji_group_data.items():
if g == group:
if result == None:
result = []
@@ -1131,9 +1131,9 @@
_emoji_non_vs_to_canonical = {}
def add_data(data):
- for k, t in data.iteritems():
+ for k, t in data.items():
if k in _emoji_sequence_data:
- print 'already have data for sequence:', seq_to_string(k), t
+ print('already have data for sequence:', seq_to_string(k), t)
_emoji_sequence_data[k] = t
if EMOJI_VS in k:
_emoji_non_vs_to_canonical[strip_emoji_vs(k)] = k
@@ -1149,7 +1149,7 @@
# Get names for single emoji from the test data. We will prefer these over
# those in UnicodeData (e.g. prefer "one o'clock" to "clock face one oclock"),
# and if they're not in UnicodeData these are proposed new emoji.
- for seq, (_, _, _, emoji_name) in _emoji_group_data.iteritems():
+ for seq, (_, _, _, emoji_name) in _emoji_group_data.items():
non_vs_seq = strip_emoji_vs(seq)
if len(non_vs_seq) > 1:
continue
@@ -1596,10 +1596,10 @@
text_p += 1
else:
presentation = '<error>'
- print '%s%04x %5s %s' % (
- ' ' if cp < 0x10000 else '', cp, presentation, cp_name)
- print '%d total emoji, %d text presentation, %d emoji presentation' % (
- len(get_emoji()), text_p, emoji_p)
+ print('%s%04x %5s %s' % (
+ ' ' if cp < 0x10000 else '', cp, presentation, cp_name))
+ print('%d total emoji, %d text presentation, %d emoji presentation' % (
+ len(get_emoji()), text_p, emoji_p))
def _load_nameslist_data():
@@ -1670,16 +1670,16 @@
all_sequences = sorted(get_emoji_sequences());
for k in all_sequences:
if not get_emoji_group_data(k):
- print 'no data:', seq_to_string(k)
+ print('no data:', seq_to_string(k))
for group in get_emoji_groups():
- print 'group:', group
+ print('group:', group)
for subgroup in get_emoji_subgroups(group):
- print ' subgroup:', subgroup
- print ' %d items' % len(get_emoji_in_group(group, subgroup))
+ print(' subgroup:', subgroup)
+ print(' %d items' % len(get_emoji_in_group(group, subgroup)))
# dump some information for annotations
for k in get_sorted_emoji_sequences(all_sequences):
age = get_emoji_sequence_age(k)
if age == 12:
- print seq_to_string(k).replace('_', ' '), '#', get_emoji_sequence_name(k)
+ print(seq_to_string(k).replace('_', ' '), '#', get_emoji_sequence_name(k))
nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools のみに存在: unicode_data.py.~1~
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/unittests/font_tests.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/unittests/font_tests.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/unittests/font_tests.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/unittests/font_tests.py 2019-07-09 15:05:30.075310802 +0200
@@ -715,7 +715,7 @@
def test_individual_spacing_marks(self):
"""Tests that spacing marks are spacing by themselves."""
for font in self.font_files:
- print 'Testing %s for stand-alone spacing marks...' % font
+ print('Testing %s for stand-alone spacing marks...' % font)
for mark in self.marks_to_test:
mark = unichr(mark)
advances = layout.get_advances(mark, font)
@@ -725,13 +725,13 @@
def test_spacing_marks_in_combination(self):
"""Tests that spacing marks do not combine with base letters."""
for font in self.font_files:
- print 'Testing %s for spacing marks in combination...' % font
+ print('Testing %s for spacing marks in combination...' % font)
for base_letter in (u'A\u00C6BCDEFGHIJKLMNO\u00D8\u01A0PRST'
u'U\u01AFVWXYZ'
u'a\u00E6bcdefghi\u0131j\u0237klmn'
u'o\u00F8\u01A1prs\u017Ftu\u01B0vwxyz'
u'\u03D2'):
- print 'Testing %s combinations' % base_letter
+ print('Testing %s combinations' % base_letter)
for mark in self.marks_to_test:
if mark == 0x02DE:
# Skip rhotic hook, as it's perhaps OK for it to form
@@ -758,14 +758,14 @@
"""Tests that soft-dotted characters lose their dots when combined."""
for font in self.font_files:
- print 'Testing %s for soft-dotted combinations...' % font
+ print('Testing %s for soft-dotted combinations...' % font)
# TODO: replace the following list with actual derivation based on
# Unicode's soft-dotted property
for base_letter in (u'ij\u012F\u0249\u0268\u029D\u02B2\u03F3\u0456'
u'\u0458\u1D62\u1D96\u1DA4\u1DA8\u1E2D\u1ECB'
u'\u2071\u2C7C'):
- print 'Testing %s combinations' % base_letter.encode('UTF-8')
+ print('Testing %s combinations' % base_letter.encode('UTF-8'))
for mark in self.marks_to_test:
mark = unichr(mark)
letter_only = layout.get_glyphs(base_letter, font)
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/update_alpha.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/update_alpha.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/update_alpha.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/update_alpha.py 2019-07-09 15:04:19.898065031 +0200
@@ -89,7 +89,7 @@
font_paths.append(src_path)
if not font_paths:
- print 'All .ttf files compare identical. Exiting.'
+ print('All .ttf files compare identical. Exiting.')
return
# summarize fonts in this commit
@@ -132,8 +132,8 @@
# generate compare file to use as checkin log
checkin_msg_file = '/tmp/svn_checkin.txt'
with RedirectStdout(checkin_msg_file):
- print one_line_msg
- print
+ print(one_line_msg)
+ print()
compare_summary.compare_summary(
alphadir, srcdir, None, compare_summary.tuple_compare, True, False, False, False)
@@ -157,9 +157,9 @@
with open(checkin_msg_file) as f:
checkin_msg = f.read().strip();
- print '%s\n-----\n%s\n-----' % ('dry run' if dry_run else 'summary', checkin_msg)
+ print('%s\n-----\n%s\n-----' % ('dry run' if dry_run else 'summary', checkin_msg))
if not dry_run:
- print 'command to update: svn commit -F \'%s\'' % checkin_msg_file
+ print('command to update: svn commit -F \'%s\'' % checkin_msg_file)
def main():
@@ -175,11 +175,11 @@
args = parser.parse_args()
if not os.path.isdir(args.srcdir):
- print '%s does not exist or is not a directory' % args.srcdir
+ print('%s does not exist or is not a directory' % args.srcdir)
return
if not os.path.exists(args.alpha):
- print '%s does not exist or is not a directory' % args.alpha
+ print('%s does not exist or is not a directory' % args.alpha)
return
push_to_noto_alpha(args.alpha, args.srcdir, args.dry_run)
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/update_cldr.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/update_cldr.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/update_cldr.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/update_cldr.py 2019-07-09 15:02:17.092384887 +0200
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# -*- coding: UTF-8 -*-
+# -*- coding: utf-8 -*-
#
# Copyright 2015 Google Inc. All rights reserved.
#
@@ -60,7 +60,7 @@
tool_utils.check_dir_exists(cldr_repo)
if not tool_utils.git_is_clean(noto_repo):
- print 'Please fix'
+ print('Please fix')
return
if update:
@@ -79,13 +79,13 @@
for subdir in CLDR_SUBDIRS:
src = os.path.join(cldr_repo, subdir)
dst = os.path.join(noto_cldr, subdir)
- print 'replacing directory %s...' % subdir
+ print('replacing directory %s...' % subdir)
shutil.rmtree(dst)
shutil.copytree(src, dst)
# replace files
for f in CLDR_FILES:
- print 'replacing file %s...' % f
+ print('replacing file %s...' % f)
src = os.path.join(cldr_repo, f)
dst = os.path.join(noto_cldr, f)
shutil.copy(src, dst)
@@ -95,7 +95,7 @@
# print commit message
tag_string = (' tag %s' % cldr_tag) if cldr_tag else ''
- print 'Update CLDR data to SVN r%s%s.' % (cldr_version, tag_string)
+ print('Update CLDR data to SVN r%s%s.' % (cldr_version, tag_string))
def main():
@@ -113,13 +113,13 @@
args = parser.parse_args()
if not args.cldr or not args.noto:
- print "Missing either or both of cldr and noto locations."
+ print("Missing either or both of cldr and noto locations.")
return
if args.branch:
cur_branch = tool_utils.git_get_branch(args.noto)
if cur_branch != args.branch:
- print "Expected branch '%s' but %s is in branch '%s'." % (args.branch, args.noto, cur_branch)
+ print("Expected branch '%s' but %s is in branch '%s'." % (args.branch, args.noto, cur_branch))
return
update_cldr(args.noto, args.cldr, args.update_cldr, args.cldr_tag)
diff -ru nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/update_udhr_samples.py nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/update_udhr_samples.py
--- nototools-9c4375f07c9adc00c700c5d252df6a25d7425870.orig/nototools/update_udhr_samples.py 2019-03-20 03:30:53.000000000 +0100
+++ nototools-9c4375f07c9adc00c700c5d252df6a25d7425870/nototools/update_udhr_samples.py 2019-07-09 15:00:55.284264110 +0200
@@ -45,7 +45,7 @@
fetch_dir = tool_utils.ensure_dir_exists(fetch_dir)
dstfile = os.path.join(fetch_dir, UDHR_XML_ZIP_NAME)
result = urllib.urlretrieve(UDHR_XML_ZIP_URL, dstfile)
- print 'Fetched: ' + result[0]
+ print('Fetched: ' + result[0])
def update_udhr(udhr_dir, fetch_dir, in_repo):
@@ -71,7 +71,7 @@
date = datetime.datetime.now().strftime('%Y-%m-%d')
dst = 'in %s ' % udhr_dir if not in_repo else ''
- print 'Update UDHR files %sfrom %s as of %s.' % (dst, fetch_dir, date)
+ print('Update UDHR files %sfrom %s as of %s.' % (dst, fetch_dir, date))
def parse_index(src_dir):
@@ -99,7 +99,7 @@
bcp = e.attrib.get('bcp47')
if not bcp:
# don't know what to do with this, maybe we could supply a mapping.
- print 'no bcp for %s' % code
+ print('no bcp for %s' % code)
continue
script = e.attrib.get('iso15924')
@@ -304,9 +304,9 @@
result[new_bcp] = code
if errors:
- print 'fix_index had %d errors:' % len(errors)
+ print('fix_index had %d errors:' % len(errors))
for e in errors:
- print ' ', e
+ print(' ', e)
raise Exception('correct the fixes whitelist')
return result
@@ -340,7 +340,7 @@
# otherwise, we assume the 4-char value is a script, and leave it alone.
except KeyError:
# if we can't provide a script, it's no use for a script sample, so exclude it
- print 'no likely subtag (script) data for %s, excluding' % parts[0]
+ print('no likely subtag (script) data for %s, excluding' % parts[0])
continue
result[new_bcp] = code
return result
@@ -400,12 +400,12 @@
for lang_scr in sorted(options):
if lang_scr in bcp_to_code_attrib_sample:
- print '%s exists with variants %s' % (
- lang_scr, ', '.join(sorted(options[lang_scr])))
+ print('%s exists with variants %s' % (
+ lang_scr, ', '.join(sorted(options[lang_scr]))))
del options[lang_scr]
for lang_scr in sorted(options):
- print '%s options: %s' % (lang_scr, options[lang_scr])
+ print('%s options: %s' % (lang_scr, options[lang_scr]))
if not lang_scr in OPTION_MAP:
errors.append('%s missing from option map' % lang_scr)
elif not OPTION_MAP[lang_scr] in options[lang_scr]:
@@ -413,13 +413,13 @@
lang_scr, OPTION_MAP[lang_scr]))
else:
alias = OPTION_MAP[lang_scr]
- print 'adding %s (from %s)' % (lang_scr, alias)
+ print('adding %s (from %s)' % (lang_scr, alias))
bcp_to_code_attrib_sample[lang_scr] = bcp_to_code_attrib_sample[alias]
if errors:
- print 'add_default_lang_script encountered %d errors:' % len(errors)
+ print('add_default_lang_script encountered %d errors:' % len(errors))
for e in errors:
- print ' ', e
+ print(' ', e)
raise Exception('oops')
@@ -469,7 +469,7 @@
attr = code_to_attrib.get(ohchr)
if not attr:
attr = 'none'
- print '%s (%s) not in ohchr attribution data' % (code, ohchr)
+ print('%s (%s) not in ohchr attribution data' % (code, ohchr))
sample = bcp_to_sample[bcp]
bcp_to_code_attrib_sample[bcp] = (code, attr, sample)
@@ -479,10 +479,10 @@
def print_bcp_to_code_attrib_sample(bcp_to_code_attrib_sample):
- print 'index size: %s' % len(bcp_to_code_attrib_sample)
+ print('index size: %s' % len(bcp_to_code_attrib_sample))
for bcp, (code, attrib, sample) in sorted(
bcp_to_code_attrib_sample.iteritems()):
- print '%s: %s, %s\n "%s"' % (bcp, code, attrib, sample)
+ print('%s: %s, %s\n "%s"' % (bcp, code, attrib, sample))
def extract_para(src_path):
@@ -514,9 +514,9 @@
return sample
if new_sample == sample:
- print 'sample for %s was not changed by fix' % bcp
+ print('sample for %s was not changed by fix' % bcp)
else:
- print 'fixed sample for %s' % bcp
+ print('fixed sample for %s' % bcp)
return new_sample
@@ -526,7 +526,7 @@
src_path = os.path.join(udhr_dir, src_file)
sample = extract_para(src_path)
if not sample:
- print 'unable to get sample from %s' % src_file
+ print('unable to get sample from %s' % src_file)
return None
return sample
@@ -538,7 +538,7 @@
code = bcp_to_code[bcp]
sample = get_sample_for_code(src_dir, code)
if not sample:
- print 'bcp %s: no sample found (code %s)' % (bcp, code)
+ print('bcp %s: no sample found (code %s)' % (bcp, code))
else:
bcp_to_sample[bcp] = sample
return bcp_to_sample
@@ -622,9 +622,9 @@
del bcp_to_sample[bcp]
if errors:
- print 'found %d errors in samples' % len(errors)
+ print('found %d errors in samples' % len(errors))
for e in errors:
- print ' ', e
+ print(' ', e)
def update_samples(
@@ -643,8 +643,8 @@
if in_repo:
repo, subdir = os.path.split(sample_dir)
tool_samples = frozenset(tool_utils.get_tool_generated(repo, subdir))
- print 'allowing overwrite of %d files:\n %s' % (
- len(tool_samples), ', '.join(sorted(tool_samples)))
+ print('allowing overwrite of %d files:\n %s' % (
+ len(tool_samples), ', '.join(sorted(tool_samples))))
comments = [
'# Attributions for sample excerpts:',
@@ -660,13 +660,13 @@
dst_file = '%s_udhr.txt' % bcp
dst_path = os.path.join(sample_dir, dst_file)
if in_repo and os.path.isfile(dst_path) and dst_file not in tool_samples:
- print 'Not overwriting modified file %s' % dst_file
+ print('Not overwriting modified file %s' % dst_file)
else:
with codecs.open(dst_path, 'w', 'utf8') as f:
f.write(sample)
count += 1
sample_attrib_list.append('%s: %s' % (dst_file, attrib))
- print 'Created %d samples' % count
+ print('Created %d samples' % count)
# Some existing samples that we don't overwrite are not in
# bcp_to_code_attrib_sample, so they're not listed. Readers of the
@@ -685,7 +685,7 @@
# prefix of this sample commit message indicates that these were
# tool-generated
- print 'Updated by tool - sample files %sfrom %s as of %s.' % (dst, src, date)
+ print('Updated by tool - sample files %sfrom %s as of %s.' % (dst, src, date))
def get_scripts(text):
@@ -759,7 +759,7 @@
if required and required - scripts:
required_name = ', '.join(sorted([s for s in required]))
scripts_name = ', '.join(sorted([s for s in scripts]))
- print '%s requires %s but contains only %s' % (filename, required_name, scripts_name)
+ print('%s requires %s but contains only %s' % (filename, required_name, scripts_name))
errors += 1
else:
remainder = scripts
@@ -771,9 +771,9 @@
allowed_name = '<none>' if not allowed else ', '.join(
sorted([s for s in allowed]))
scripts_name = ', '.join(sorted([s for s in scripts]))
- print '%s allows %s but contains %s' % (filename, allowed_name, scripts_name)
+ print('%s allows %s but contains %s' % (filename, allowed_name, scripts_name))
errors += 1
- print 'Found %d errors in %d files tested.' % (errors, tested)
+ print('Found %d errors in %d files tested.' % (errors, tested))
def compare_samples(base_dir, trg_dir, trg_to_base_name=lambda x: x, opts=None):
@@ -782,15 +782,15 @@
file name to use in the comparisons."""
if not os.path.isdir(base_dir):
- print 'Original sample dir \'%s\' does not exist' % base_dir
+ print('Original sample dir \'%s\' does not exist' % base_dir)
return
if not os.path.isdir(trg_dir):
- print 'New sample dir \'%s\' does not exist' % trg_dir
+ print('New sample dir \'%s\' does not exist' % trg_dir)
return
- print 'Base (current) dir: %s' % base_dir
- print 'Target (new) dir: %s' % trg_dir
- print '[a/b] means "a" in base is replaced with "b" in target'
+ print('Base (current) dir: %s' % base_dir)
+ print('Target (new) dir: %s' % trg_dir)
+ print('[a/b] means "a" in base is replaced with "b" in target')
show_missing = opts and 'missing' in opts
show_diffs = opts and 'diffs' in opts
@@ -807,7 +807,7 @@
base_path = os.path.join(base_dir, base_name)
if not os.path.exists(base_path):
if show_missing:
- print 'base does not exist: %s' % base_name
+ print('base does not exist: %s' % base_name)
continue
base_text = None
@@ -817,13 +817,13 @@
with codecs.open(trg_path, 'r', 'utf8') as f:
trg_text = f.read()
if not base_text:
- print 'base text (%s) is empty' % k
+ print('base text (%s) is empty' % k)
continue
if not trg_text:
- print 'target text is empty: %s' % trg_path
+ print('target text is empty: %s' % trg_path)
continue
if base_text.find(trg_text) == -1:
- print 'target (%s) text not in base (%s)' % (base_name, trg_name)
+ print('target (%s) text not in base (%s)' % (base_name, trg_name))
if show_diffs:
# In scripts that use space for word break it might be better to compare
# word by word, but this suffices.
@@ -838,13 +838,13 @@
lines.append('[/%s]' % trg_text[j1:j2])
else:
lines.append('[%s/%s]' % (base_text[i1:i2], trg_text[j1:j2]))
- print ''.join(lines)
+ print(''.join(lines))
def update_repo(repo_samples, new_samples):
# Verify directory is clean.
if not tool_utils.git_is_clean(new_samples):
- print 'Please fix.'
+ print('Please fix.')
return
# Copy samples into git repo
@@ -858,7 +858,7 @@
tool_utils.git_add_all(new_samples)
# Sample commit message.
- print 'Update UDHR sample data.'
+ print('Update UDHR sample data.')
def main():
@@ -914,7 +914,7 @@
if not (args.fetch or args.update_udhr or args.update_sample or args.mapping
or args.base_sample_dir or args.test_script):
- print 'nothing to do.'
+ print('nothing to do.')
return
def fix_noto_prefix(argname):
@@ -963,7 +963,7 @@
if args.test_script:
test_sample_scripts(args.sample_dir)
except ValueError as e:
- print 'Error:', e
+ print('Error:', e)
if __name__ == '__main__':
main()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/li_zhen_hua/nototools.git
git@gitee.com:li_zhen_hua/nototools.git
li_zhen_hua
nototools
nototools
master

搜索帮助