LICENSE000064400000002060150044130200005534 0ustar00MIT License Copyright (c) 2021 Taneli Hukkinen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __init__.py000064400000000325150044130200006642 0ustar00"""A lil' TOML parser.""" __all__ = ("loads", "load", "TOMLDecodeError") __version__ = "1.2.0" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT from ._parser import TOMLDecodeError, load, loads __pycache__/__init__.cpython-311.pyc000064400000000654150044130200013207 0ustar00 bg&dZdZdZddlmZmZmZdS)zA lil' TOML parser.)loadsloadTOMLDecodeErrorz1.2.0)rrrN)__doc____all__ __version___parserrrro/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/isort/_vendored/tomli/__init__.pyr s9 . 111111111111r __pycache__/_parser.cpython-311.pyc000064400000072404150044130200013105 0ustar00 bgS ddlZddlZddlmZddlmZmZmZmZm Z m Z m Z m Z m Z ddlmZmZmZmZmZmZededDeedzZeed z Zeed z ZeZeed z ZeZed Zeed zZeej ej!zdzZ"e"edzZ#eej$Z%edd d dddddZ&ee'gefZ(e e'dfZ)e*Z+Gdde,Z-e.ddede(dee'effdZ/e.dde'de(dee'effdZ0Gd d!Z1Gd"d#Z2Gd$d%e Z3d&e'd'e+d(e e'de+fd)Z4d&e'd'e+d*e'd+e e'd,e5de+f d-Z6d&e'd'e+de+fd.Z7d&e'd'e+de+fd/Z8d&e'd'e+d0e3de e+e)ffd1Z9d&e'd'e+d0e3de e+e)ffd2Z:d&e'd'e+d0e3d3e)de(de+f d4Z;d&e'd'e+de(de e+e)effd5Zd&e'd'e+de e+e'ffd8Z?d&e'd'e+de(de e+e@ffd9ZAd&e'd'e+de(de e+eBffd:ZCd;d<d&e'd'e+d=e5de e+e'ffd>ZDd&e'd'e+de e+e'ffd?ZEd&e'd'e+d@e*de e+e'ffdAZFd&e'd'e+de e+e'ffdBZGd&e'd'e+dCe5de e+e'ffdDZHd&e'd'e+d=e5de e+e'ffdEZId&e'd'e+de(de e+effdFZJd&e'd'e+dGe'de-fdHZKdIe*de5fdJZLdS)KN)MappingProxyType) IOAnyCallableDict FrozenSetIterable NamedTupleOptionalTuple) RE_DATETIME RE_LOCALTIME RE_NUMBERmatch_to_datetimematch_to_localtimematch_to_numberc#4K|]}t|VdSN)chr).0is n/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/isort/_vendored/tomli/_parser.py rs(11!s1vv111111  z z z  z-_z"'  "\)z\bz\tz\nz\fz\rz\"z\\.ceZdZdZdS)TOMLDecodeErrorz0An error raised if a document is not valid TOML.N)__name__ __module__ __qualname____doc__rrr&r&3s::::rr& parse_floatfpr-returnc|}t|tr|}nt jdt t||S)zParse TOML from a file object.zText file object support is deprecated in favor of binary file objects. Use `open("foo.toml", "rb")` to open the file in binary mode.r,)read isinstancebytesdecodewarningswarnDeprecationWarningloads)r.r-ss rloadr:7sb  A!U HHJJ  M     , , ,,rr9c|dd}d}ttt}d} t ||t } ||}n#t $rYn wxYw|dkr|dz }>|tvr*t|||||}t ||t }n|dkre ||dz}n#t $rd}YnwxYw|dkrt|||\}}nt|||\}}t ||t }n|d krt||d t||} ||}n#t $rYn"wxYw|dkrt||d |dz }:|j jS) zParse TOML from a string.z rrr+Tr [N#zInvalid statementz5Expected newline or end of document after a statement)replaceOutput NestedDictFlags skip_charsTOML_WS IndexErrorKEY_INITIAL_CHARSkey_value_rulecreate_list_rulecreate_dict_rule suffixed_err skip_commentdatadict)r9r-srcposoutheaderchar second_chars rr8r8Es ))FD ! !C C uww ' 'CF-c7++ s8DD    E  4<< 1HC  $ $ $ c3 DDCS#w//CC S[[ #-0q\  # # #"  #c!!.sC== VV.sC== VS#w//CC S[[sC)<== =3$$ s8DD    E  4<<sC)`aa a q[-^ 8=s6A$$ A21A2; C CCE EEcveZdZdZdZdZddZdeddfdZd ed ed e ddfd Z ded e d e ddfdZ ded e de fdZ dS)rAz)Flags that map to parsed keys/namespaces.rr r/Nci|_dSr_flagsselfs r__init__zFlags.__init__s ') rkeyc|j}|ddD]}||vrdS||d}||dddS)Nnested)rVpop)rXrZcontks r unset_allzFlags.unset_alls^{SbS % %A}}78$DD R$rhead_keyrel_keyflagc"|j}|D]5}||vr!ttid||<||d}6|D]L}||vr"||d|n|htid||<||d}MdS)Nflagsrecursive_flagsr]r]rgrVsetadd)rXrbrcrdr_r`s rset_for_relative_keyzFlags.set_for_relative_keys{ % %A}}$'EEceerRRQ78$DD % %ADyyQ $$T****%)FsuuPRSSQ78$DD  % %r recursivec<|j}|dd|d}}|D]5}||vr!ttid||<||d}6||vr!ttid||<|||rdnd|dS)Nr\rfr]rhrgri)rXrZrdrmr_ key_parentkey_stemr`s rrjz Flags.sets{"3B3xRH  % %A}}$'EEceerRRQ78$DD 4  '*uuRTUUDN XIB((7CGGMMMMMrc|sdS|j}|ddD]&}||vrdS||}||dvrdS|d}'|d}||vr||}||dvp ||dvSdS)NFr\rhTr]rgrU)rXrZrdr_r` inner_contrps ris_z Flags.is_s 5{SbS ( (A}}uuaJz"3444tth'DDr7 t  >D4=(KDD9J4K,K Kurr/N)r'r(r)r*FROZEN EXPLICIT_NESTrYKeyraintrlboolrjrsr+rrrArAs33FM**** S T     %S %3 %c %d % % % % Ns N# NT Nd N N N Ns#$rrAc@eZdZd dZdddededefdZdeddfd ZdS) r@r/Nci|_dSr)rLrWs rrYzNestedDict.__init__s $& rT access_listsrZr}c|j}|D]V}||vri||<||}|rt|tr|d}t|tstdW|S)Nr\z There is no nest behind this key)rLr2listKeyError)rXrZr}r_r`s rget_or_create_nestzNestedDict.get_or_create_nests I C CA}}Q7D 4 6 6 BxdD)) CABBB C rc||dd}|d}||vrC||}t|tstd|idSig||<dS)Nr\z/An object other than list found behind this key)rr2rrappend)rXrZr_last_keylist_s rappend_nest_to_listzNestedDict.append_nest_to_lists&&s3B3x00r7 t  NEeT** RPQQQ LL      TDNNNrrt) r'r(r)rYrwryrLrrr+rrr@r@s''''"     " "s "t " " " " " "rr@c$eZdZUeed<eed<dS)r?rKrgN)r'r(r)r@__annotations__rAr+rrr?r?s%  LLLLLrr?rMrNcharsc\ |||vr|dz }|||vn#t$rYnwxYw|S)Nr )rD)rMrNrs rrBrBsV #h% 1HC#h%      Js  ))expecterror_on error_on_eofcD |||}n6#t$r)t|}|rt||d|dYnwxYw||||s4|||vr|dz }|||vt||d||d|S)Nz Expected "r#r zFound invalid character ")index ValueErrorlenrI isdisjoint)rMrNrrrnew_poss r skip_untilrsG))FC(( GGGc((  GsG-E&-E-E-EFF F G GG   s3w;/ 0 0P#hh&& 1HC#hh&&3%NS%N%N%NOOO Ns0A  A c ||}n#t$rd}YnwxYw|dkrt||dzdtdS|S)Nr=r rFrr)rDrILLEGAL_COMMENT_CHARS)rMrNrQs rrJrJsc!#h  s{{#sQw7L[`aaaa J  cf |}t||t}t||}||kr|S1r)rBTOML_WS_AND_NEWLINErJ)rMrNpos_before_skips rskip_comments_and_array_wsrsBc#6773$$ / ! !J rrOc<|dz }t||t}t||\}}|j|t js%|j|t jrt||d|d|j |t jd |j |n#t$rt||dwxYw| d|st||d|dz|fS) Nr zCan not declare z twiceFrmCan not overwrite a value]z.Expected "]" at the end of a table declaration)rBrC parse_keyrgrsrArvrurIrjrKrr startswithrMrNrOrZs rrHrHs+1HC S#w ' 'Cc""HC y}}S%-..E#)--U\2R2RE3%C%C%C%CDDDIMM#u*eM<<<B ##C(((( BBB3%@AAAB >>#s # #W3%UVVV 7C<s 6CC-c$|dz }t||t}t||\}}|j|t jrt||d||j||j |t j d |j |n#t$rt||dwxYw|d|st||d|dz|fS)N#Can not mutate immutable namespace Frrz]]z0Expected "]]" at the end of an array declaration)rBrCrrgrsrArurIrarjrvrKrrrrs rrGrG,s#1HC S#w ' 'Cc""HC y}}S%,''R3%P3%P%PQQQIIMM#u*eM<<<B $$S)))) BBB3%@AAAB >>$ $ $Y3%WXXX 7C<s *CC!rPcRt|||\}}}|dd|d}}||z} |j| tjrt ||d| |j||tj |j | } n#t$rt ||dwxYw|| vrt ||dt|ttfr*|j||ztjd|| |<|S)Nr\rrTr)parse_key_value_pairrgrsrArurIrlrvrKrrr2rLrrj) rMrNrOrPr-rZvaluerorpabs_key_parentnests rrFrFAs=*3[AAOCess8SWJj(N y}}^U\22]3%[>%[%[\\\I""630CDDDBx**>:: BBB3%@AAAB43%@AAA%$&&B fslELD AAADN Js B((Cct||\}} ||}n#t$rd}YnwxYw|dkrt||d|dz }t||t}t |||\}}|||fS)N=z,Expected "=" after a key in a key/value pairr )rrDrIrBrC parse_value)rMrNr-rZrQrs rrrWsc""HC!#h  s{{3%STTT1HC S#w ' 'CS#{33JC U?s  --c>t||\}}|f}t||t} ||}n#t$rd}YnwxYw|dkr||fS|dz }t||t}t||\}}||fz }t||t}q)NT.r )parse_key_partrBrCrD)rMrNkey_partrZrQs rrres"3,,MC{C S#w ' 'C , "%c(DD   DDD  3;;8O qc7++&sC00 X {c7++ ,s8 AAc ||}n#t$rd}YnwxYw|tvr$|}t||t}||||fS|dkrt||S|dkrt ||St ||d)N'r#z(Invalid initial character for a key part)rDBARE_KEY_CHARSrBparse_literal_strparse_one_line_basic_strrI)rMrNrQ start_poss rrrws!#h  ~ c>22C # &&& s{{ c*** s{{'S111 sC!K L LLrc0|dz }t||dS)Nr F multiline)parse_basic_strrMrNs rrrs 1HC 3u 5 5 55rc|dz }g}t||}|d|r|dz|fS t|||\}}||t||}|||dz}|dkr|dz|fS|dkrt ||d|dz }t||}|d|r|dz|fS)Nr rT,zUnclosed array)rrrrrI)rMrNr-arrayvalcs r parse_arrayrs1HCE $S# . .C ~~c3Qw~"sC55S S(c22 cAg  887E> ! 88sC)9:: : q(c22 >>#s # # "7E> !"rcf|dz }t}t}t||t}|d|r |dz|jfS t |||\}}}|dd|d}}||tjrt||d| | |d} n#t$rt||dwxYw|| vrt||d |d || |<t||t}|||dz} | dkr |dz|jfS| d krt||d t|t tfr"||tjd |dz }t||t}X)Nr }Tr\rFr|rzDuplicate inline table key "r#rzUnclosed inline tabler)r@rArBrCrrLrrsrurIrrr2rrj) rMrNr- nested_dictrgrZrrorprrs rparse_inline_tablers1HC,,K GGE S#w ' 'C ~~c3)Qw (((,.sCEES%"3B3xRH 99S%, ' ' VsC)Ts)T)TUU U F11*51QQDD F F FsC)DEE E F t  sC)S)S)S)STT TXc7++ cAg  887K,, , 88sC)@AA A edD\ * * 9 IIc5<4I 8 8 8 qc7+++,s 7CC+Frrc|||dz}|dz }|rt|dvrp|dkrPt||t} ||}n#t$r|dfcYSwxYw|dkrt||d|dz }t||t}|dfS|dkrt ||d S|d krt ||d S |t |fS#t$r6t|dkrt||d t||dwxYw) Nr>\ \ \ rrzUnescaped "\" in a stringr z\uz\UUnterminated string) rBrCrDrIrparse_hex_charBASIC_STR_ESCAPE_REPLACEMENTSrr)rMrNr escape_idrQs rparse_basic_str_escapersoC#'M"I1HC Y"999   S#w//C 3x   Bw t||"3-IJJJ 1HCc#677BwEc3***Ec3***C1)<<< CCC y>>Q  sC)>?? ?3%ABBBCs?AA8CADc&t||dS)NTr)rrs r parse_basic_str_escape_multiliners !#sd ; ; ;;rhex_lenc$||||z}t||kst|st||d||z }t |d}t |st||d|t |fS)NzInvalid hex valuez/Escaped character is not a Unicode scalar value)rHEXDIGIT_CHARS issupersetrIrxis_unicode_scalar_valuer)rMrNrhex_strhex_ints rrrs#g %&G 7||wn&?&?&H&H3%89997NC'2G "7 + +X3%VWWW G rc`|dz }|}t||dtd}|dz|||fS)Nr rTr)rILLEGAL_LITERAL_STR_CHARS)rMrNrs rrrsB1HCI S#s-FUY Z Z ZC 7C # & &&rliteralc`|dz }|d|r|dz }|r+d}t||dtd}|||}|dz}nd}t||d \}}|||s||fS|dz }|||s|||zfS|dz }|||d zzfS) Nrr r'''Trr#rr)rr#ILLEGAL_MULTILINE_LITERAL_STR_CHARSr)rMrNrdelimend_posresults rparse_multiline_strrs1HC ~~dC   q @   8    S[!k%c3$??? V >>% % %F{1HC >>% % %#FUN""1HC %!)$ $$rc|rt}t}nt}t}d}|} ||}n#t$rt ||dwxYw|dkrB|s|dz||||zfS|d|r|dz||||zfS|dz }r|dkr$||||z }|||\}}||z }|}||vrt ||d |d|dz }) NrTrr#r """rr$zIllegal character ")!ILLEGAL_MULTILINE_BASIC_STR_CHARSrILLEGAL_BASIC_STR_CHARSrrDrIr) rMrNrr parse_escapesrrrQ parsed_escapes rrrsZ/48 *. FI @s8DD @ @ @sC)>?? ? @ 3;; <QwYs]); ;;;~~eS)) <QwYs]); ;;; 1HC  4<< c)C-( (F!.sC!8!8 C m #FI  8  sC)Ht)H)H)HII I q)s /A cd ||}n#t$rd}YnwxYw|dkr8|d|rt||dSt||S|dkr8|d|rt||dSt ||S|dkr|d |r|d zdfS|d kr|d |r|d zdfSt j||}|rF t|}n#t$rt||dwxYw| |fStj||}|r#| t|fStj||}|r$| t||fS|dkrt|||S|dkrt!|||S|||dz}|dvr|dz||fS|||d z} | dvr|d z|| fSt||d)Nr#rF)rrrTttruerffalsezInvalid date or datetimer<{r>infnan>+inf+nan-inf-nanz Invalid value)rDrrrrrmatchrrrIendrrrrrr) rMrNr-rQdatetime_match datetime_objlocaltime_match number_match first_three first_fours rrr8s!#h  s{{ >>% % % @&sC??? ?'S111 s{{ >>% % % ?&sC>>> > c*** s{{ >>&# & & !7D=  s{{ >>'3 ' ' "7E> !!&sC00N2 E,^<.coord_repr|su #c((??$$yyq#&&* 1991WFF3::dAs333F.t..f...rz (at ))strPosr&)rMrNrr s rrIrIxsX//#/#//// c?? 3(<(<??? @ @@r codepointcBd|cxkodkncpd|cxkodkncS)Nriiir+)rs rrrsE  # # # #e # # # # G))F)F)F)Fw)F)F)F)FGr)Mstringr5typesrtypingrrrrrr r r r _rerrrrrr frozensetranger ASCII_CTRLrrrrrrCr ascii_lettersdigitsrrE hexdigitsrrr  ParseFloatrwrxrrr&floatr:r8rAr@r?rBryrrJrrHrGrFrrrrrrrLrrrrrrrrrIrr+rrrs """"""\\\\\\\\\\\\\\\\\\\\\\Y11uuRyy111 1 1IIcc#hh4G4G G %yy6$.81D1D$D!3&099V3D3D&D#/ )E   $/6/&-?$FGG"YYu%5%556+,, 0 0 ! !seSj !  CHo ;;;;;j;;;/4 - - -R - -S#X - - - -05;;;S;*;c3h;;;;|;;;;;;;;|""""""""DZ Cc(3-C    n    ,cCcc#CfsCx$#CfsCx*#FCj]`, c  * sTWY\}I]    ,3,S,U38_,,,,$ M M# M%S/ M M M M 6#6C6E#s(O6666 "S"s""c4i@P""""0,C,c, ,uSRVYGW,,,,B.3CCC CC&*C 38_CCCC><#3duS#X@=2S=2s=2=2c3h=2=2=2=2@AcAA#A/AAAA"HsHtHHHHHHr__pycache__/_re.cpython-311.pyc000064400000010666150044130200012221 0ustar00 bg jddlZddlmZmZmZmZmZmZddlmZddl m Z m Z m Z m Z e rddlmZdZejdejZejeZejd ed ejZd d d e eeffdZeddededed efdZd d d efdZd d ddd e fdZdS)N)datedatetimetime timedeltatimezonetzinfo) lru_cache) TYPE_CHECKINGAnyOptionalUnion) ParseFloatzE([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?a` 0 (?: x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex | b[01](?:_?[01])* # bin | o[0-7](?:_?[0-7])* # oct ) | [+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part (?P (?:\.[0-9](?:_?[0-9])*)? # optional fractional part (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part ) )flagsz_ ([0-9]{4})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 (?: [T ] zR (?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset )? matchzre.Matchreturnc |\ }}}}}}}}} } } t|t|t|}} } |t| | |St|t|t|}}}|r#t|ddnd}| rt | | | }n|r t j}nd}t| | ||||||S)zConvert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. Raises ValueError if the match does not correspond to a valid date or datetime. N0r)r)groupsintrljust cached_tzrutcr)ryear_str month_strday_strhour_str minute_strsec_str micros_str zulu_timeoffset_sign_stroffset_hour_stroffset_minute_stryearmonthdayhourminutesecmicrostzs j/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/isort/_vendored/tomli/_re.pymatch_to_datetimer./s$   8}}c)nnc'll%DD%%%%H s:G #&D.8 ?S!!!S)) * * *aF(:K_]]  \  D%dFC K K KK)maxsizerrsign_strc |dkrdnd}tt|t|z|t|zS)N+)hoursminutes)rrr)rrr1signs r-rrPsSC11RD X&3z??*     r/c|\}}}}|r#t|ddnd}tt|t|t||S)Nrrr)rrrr)rrrrr r+s r-match_to_localtimer:[se05 -Hj':.8 ?S!!!S)) * * *aF H s:G f E EEr/ parse_floatrc|dr||St|dS)N floatpartr)groupr)rr;s r-match_to_numberr?asF {{;*{5;;==))) u{{}}a  r/)rerrrrrr functoolsr typingr r r r tomli._parserr _TIME_RE_STRcompileVERBOSE RE_NUMBER RE_LOCALTIME RE_DATETIMEr.strrr:r?r/r-rLs FFFFFFFFFFFFFFFF666666666666)(((((( X BJ *#    &rz,'' bj  *    LZLE(D.,ALLLLB 4FjFTFFFF !:!L!S!!!!!!r/_parser.py000064400000051625150044130200006547 0ustar00import string import warnings from types import MappingProxyType from typing import IO, Any, Callable, Dict, FrozenSet, Iterable, NamedTuple, Optional, Tuple from ._re import ( RE_DATETIME, RE_LOCALTIME, RE_NUMBER, match_to_datetime, match_to_localtime, match_to_number, ) ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) # Neither of these sets include quotation mark or backslash. They are # currently handled as separate cases in the parser functions. ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n\r") ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ASCII_CTRL - frozenset("\t\n") ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS TOML_WS = frozenset(" \t") TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") HEXDIGIT_CHARS = frozenset(string.hexdigits) BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( { "\\b": "\u0008", # backspace "\\t": "\u0009", # tab "\\n": "\u000A", # linefeed "\\f": "\u000C", # form feed "\\r": "\u000D", # carriage return '\\"': "\u0022", # quote "\\\\": "\u005C", # backslash } ) # Type annotations ParseFloat = Callable[[str], Any] Key = Tuple[str, ...] Pos = int class TOMLDecodeError(ValueError): """An error raised if a document is not valid TOML.""" def load(fp: IO, *, parse_float: ParseFloat = float) -> Dict[str, Any]: """Parse TOML from a file object.""" s = fp.read() if isinstance(s, bytes): s = s.decode() else: warnings.warn( "Text file object support is deprecated in favor of binary file objects." ' Use `open("foo.toml", "rb")` to open the file in binary mode.', DeprecationWarning, ) return loads(s, parse_float=parse_float) def loads(s: str, *, parse_float: ParseFloat = float) -> Dict[str, Any]: # noqa: C901 """Parse TOML from a string.""" # The spec allows converting "\r\n" to "\n", even in string # literals. Let's do so to simplify parsing. src = s.replace("\r\n", "\n") pos = 0 out = Output(NestedDict(), Flags()) header: Key = () # Parse one statement at a time # (typically means one line in TOML source) while True: # 1. Skip line leading whitespace pos = skip_chars(src, pos, TOML_WS) # 2. Parse rules. Expect one of the following: # - end of file # - end of line # - comment # - key/value pair # - append dict to list (and move to its namespace) # - create dict (and move to its namespace) # Skip trailing whitespace when applicable. try: char = src[pos] except IndexError: break if char == "\n": pos += 1 continue if char in KEY_INITIAL_CHARS: pos = key_value_rule(src, pos, out, header, parse_float) pos = skip_chars(src, pos, TOML_WS) elif char == "[": try: second_char: Optional[str] = src[pos + 1] except IndexError: second_char = None if second_char == "[": pos, header = create_list_rule(src, pos, out) else: pos, header = create_dict_rule(src, pos, out) pos = skip_chars(src, pos, TOML_WS) elif char != "#": raise suffixed_err(src, pos, "Invalid statement") # 3. Skip comment pos = skip_comment(src, pos) # 4. Expect end of line or end of file try: char = src[pos] except IndexError: break if char != "\n": raise suffixed_err(src, pos, "Expected newline or end of document after a statement") pos += 1 return out.data.dict class Flags: """Flags that map to parsed keys/namespaces.""" # Marks an immutable namespace (inline array or inline table). FROZEN = 0 # Marks a nest that has been explicitly created and can no longer # be opened using the "[table]" syntax. EXPLICIT_NEST = 1 def __init__(self) -> None: self._flags: Dict[str, dict] = {} def unset_all(self, key: Key) -> None: cont = self._flags for k in key[:-1]: if k not in cont: return cont = cont[k]["nested"] cont.pop(key[-1], None) def set_for_relative_key(self, head_key: Key, rel_key: Key, flag: int) -> None: cont = self._flags for k in head_key: if k not in cont: cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} cont = cont[k]["nested"] for k in rel_key: if k in cont: cont[k]["flags"].add(flag) else: cont[k] = {"flags": {flag}, "recursive_flags": set(), "nested": {}} cont = cont[k]["nested"] def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 cont = self._flags key_parent, key_stem = key[:-1], key[-1] for k in key_parent: if k not in cont: cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} cont = cont[k]["nested"] if key_stem not in cont: cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) def is_(self, key: Key, flag: int) -> bool: if not key: return False # document root has no flags cont = self._flags for k in key[:-1]: if k not in cont: return False inner_cont = cont[k] if flag in inner_cont["recursive_flags"]: return True cont = inner_cont["nested"] key_stem = key[-1] if key_stem in cont: cont = cont[key_stem] return flag in cont["flags"] or flag in cont["recursive_flags"] return False class NestedDict: def __init__(self) -> None: # The parsed content of the TOML document self.dict: Dict[str, Any] = {} def get_or_create_nest( self, key: Key, *, access_lists: bool = True, ) -> dict: cont: Any = self.dict for k in key: if k not in cont: cont[k] = {} cont = cont[k] if access_lists and isinstance(cont, list): cont = cont[-1] if not isinstance(cont, dict): raise KeyError("There is no nest behind this key") return cont def append_nest_to_list(self, key: Key) -> None: cont = self.get_or_create_nest(key[:-1]) last_key = key[-1] if last_key in cont: list_ = cont[last_key] if not isinstance(list_, list): raise KeyError("An object other than list found behind this key") list_.append({}) else: cont[last_key] = [{}] class Output(NamedTuple): data: NestedDict flags: Flags def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: try: while src[pos] in chars: pos += 1 except IndexError: pass return pos def skip_until( src: str, pos: Pos, expect: str, *, error_on: FrozenSet[str], error_on_eof: bool, ) -> Pos: try: new_pos = src.index(expect, pos) except ValueError: new_pos = len(src) if error_on_eof: raise suffixed_err(src, new_pos, f'Expected "{expect!r}"') if not error_on.isdisjoint(src[pos:new_pos]): while src[pos] not in error_on: pos += 1 raise suffixed_err(src, pos, f'Found invalid character "{src[pos]!r}"') return new_pos def skip_comment(src: str, pos: Pos) -> Pos: try: char: Optional[str] = src[pos] except IndexError: char = None if char == "#": return skip_until(src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False) return pos def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: while True: pos_before_skip = pos pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) pos = skip_comment(src, pos) if pos == pos_before_skip: return pos def create_dict_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: pos += 1 # Skip "[" pos = skip_chars(src, pos, TOML_WS) pos, key = parse_key(src, pos) if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): raise suffixed_err(src, pos, f"Can not declare {key} twice") out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) try: out.data.get_or_create_nest(key) except KeyError: raise suffixed_err(src, pos, "Can not overwrite a value") if not src.startswith("]", pos): raise suffixed_err(src, pos, 'Expected "]" at the end of a table declaration') return pos + 1, key def create_list_rule(src: str, pos: Pos, out: Output) -> Tuple[Pos, Key]: pos += 2 # Skip "[[" pos = skip_chars(src, pos, TOML_WS) pos, key = parse_key(src, pos) if out.flags.is_(key, Flags.FROZEN): raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") # Free the namespace now that it points to another empty list item... out.flags.unset_all(key) # ...but this key precisely is still prohibited from table declaration out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) try: out.data.append_nest_to_list(key) except KeyError: raise suffixed_err(src, pos, "Can not overwrite a value") if not src.startswith("]]", pos): raise suffixed_err(src, pos, 'Expected "]]" at the end of an array declaration') return pos + 2, key def key_value_rule(src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat) -> Pos: pos, key, value = parse_key_value_pair(src, pos, parse_float) key_parent, key_stem = key[:-1], key[-1] abs_key_parent = header + key_parent if out.flags.is_(abs_key_parent, Flags.FROZEN): raise suffixed_err(src, pos, f"Can not mutate immutable namespace {abs_key_parent}") # Containers in the relative path can't be opened with the table syntax after this out.flags.set_for_relative_key(header, key, Flags.EXPLICIT_NEST) try: nest = out.data.get_or_create_nest(abs_key_parent) except KeyError: raise suffixed_err(src, pos, "Can not overwrite a value") if key_stem in nest: raise suffixed_err(src, pos, "Can not overwrite a value") # Mark inline table and array namespaces recursively immutable if isinstance(value, (dict, list)): out.flags.set(header + key, Flags.FROZEN, recursive=True) nest[key_stem] = value return pos def parse_key_value_pair(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, Key, Any]: pos, key = parse_key(src, pos) try: char: Optional[str] = src[pos] except IndexError: char = None if char != "=": raise suffixed_err(src, pos, 'Expected "=" after a key in a key/value pair') pos += 1 pos = skip_chars(src, pos, TOML_WS) pos, value = parse_value(src, pos, parse_float) return pos, key, value def parse_key(src: str, pos: Pos) -> Tuple[Pos, Key]: pos, key_part = parse_key_part(src, pos) key: Key = (key_part,) pos = skip_chars(src, pos, TOML_WS) while True: try: char: Optional[str] = src[pos] except IndexError: char = None if char != ".": return pos, key pos += 1 pos = skip_chars(src, pos, TOML_WS) pos, key_part = parse_key_part(src, pos) key += (key_part,) pos = skip_chars(src, pos, TOML_WS) def parse_key_part(src: str, pos: Pos) -> Tuple[Pos, str]: try: char: Optional[str] = src[pos] except IndexError: char = None if char in BARE_KEY_CHARS: start_pos = pos pos = skip_chars(src, pos, BARE_KEY_CHARS) return pos, src[start_pos:pos] if char == "'": return parse_literal_str(src, pos) if char == '"': return parse_one_line_basic_str(src, pos) raise suffixed_err(src, pos, "Invalid initial character for a key part") def parse_one_line_basic_str(src: str, pos: Pos) -> Tuple[Pos, str]: pos += 1 return parse_basic_str(src, pos, multiline=False) def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, list]: pos += 1 array: list = [] pos = skip_comments_and_array_ws(src, pos) if src.startswith("]", pos): return pos + 1, array while True: pos, val = parse_value(src, pos, parse_float) array.append(val) pos = skip_comments_and_array_ws(src, pos) c = src[pos : pos + 1] if c == "]": return pos + 1, array if c != ",": raise suffixed_err(src, pos, "Unclosed array") pos += 1 pos = skip_comments_and_array_ws(src, pos) if src.startswith("]", pos): return pos + 1, array def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, dict]: pos += 1 nested_dict = NestedDict() flags = Flags() pos = skip_chars(src, pos, TOML_WS) if src.startswith("}", pos): return pos + 1, nested_dict.dict while True: pos, key, value = parse_key_value_pair(src, pos, parse_float) key_parent, key_stem = key[:-1], key[-1] if flags.is_(key, Flags.FROZEN): raise suffixed_err(src, pos, f"Can not mutate immutable namespace {key}") try: nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) except KeyError: raise suffixed_err(src, pos, "Can not overwrite a value") if key_stem in nest: raise suffixed_err(src, pos, f'Duplicate inline table key "{key_stem}"') nest[key_stem] = value pos = skip_chars(src, pos, TOML_WS) c = src[pos : pos + 1] if c == "}": return pos + 1, nested_dict.dict if c != ",": raise suffixed_err(src, pos, "Unclosed inline table") if isinstance(value, (dict, list)): flags.set(key, Flags.FROZEN, recursive=True) pos += 1 pos = skip_chars(src, pos, TOML_WS) def parse_basic_str_escape( # noqa: C901 src: str, pos: Pos, *, multiline: bool = False ) -> Tuple[Pos, str]: escape_id = src[pos : pos + 2] pos += 2 if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: # Skip whitespace until next non-whitespace character or end of # the doc. Error if non-whitespace is found before newline. if escape_id != "\\\n": pos = skip_chars(src, pos, TOML_WS) try: char = src[pos] except IndexError: return pos, "" if char != "\n": raise suffixed_err(src, pos, 'Unescaped "\\" in a string') pos += 1 pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) return pos, "" if escape_id == "\\u": return parse_hex_char(src, pos, 4) if escape_id == "\\U": return parse_hex_char(src, pos, 8) try: return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] except KeyError: if len(escape_id) != 2: raise suffixed_err(src, pos, "Unterminated string") raise suffixed_err(src, pos, 'Unescaped "\\" in a string') def parse_basic_str_escape_multiline(src: str, pos: Pos) -> Tuple[Pos, str]: return parse_basic_str_escape(src, pos, multiline=True) def parse_hex_char(src: str, pos: Pos, hex_len: int) -> Tuple[Pos, str]: hex_str = src[pos : pos + hex_len] if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): raise suffixed_err(src, pos, "Invalid hex value") pos += hex_len hex_int = int(hex_str, 16) if not is_unicode_scalar_value(hex_int): raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") return pos, chr(hex_int) def parse_literal_str(src: str, pos: Pos) -> Tuple[Pos, str]: pos += 1 # Skip starting apostrophe start_pos = pos pos = skip_until(src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True) return pos + 1, src[start_pos:pos] # Skip ending apostrophe def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> Tuple[Pos, str]: pos += 3 if src.startswith("\n", pos): pos += 1 if literal: delim = "'" end_pos = skip_until( src, pos, "'''", error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, error_on_eof=True, ) result = src[pos:end_pos] pos = end_pos + 3 else: delim = '"' pos, result = parse_basic_str(src, pos, multiline=True) # Add at maximum two extra apostrophes/quotes if the end sequence # is 4 or 5 chars long instead of just 3. if not src.startswith(delim, pos): return pos, result pos += 1 if not src.startswith(delim, pos): return pos, result + delim pos += 1 return pos, result + (delim * 2) def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> Tuple[Pos, str]: if multiline: error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS parse_escapes = parse_basic_str_escape_multiline else: error_on = ILLEGAL_BASIC_STR_CHARS parse_escapes = parse_basic_str_escape result = "" start_pos = pos while True: try: char = src[pos] except IndexError: raise suffixed_err(src, pos, "Unterminated string") if char == '"': if not multiline: return pos + 1, result + src[start_pos:pos] if src.startswith('"""', pos): return pos + 3, result + src[start_pos:pos] pos += 1 continue if char == "\\": result += src[start_pos:pos] pos, parsed_escape = parse_escapes(src, pos) result += parsed_escape start_pos = pos continue if char in error_on: raise suffixed_err(src, pos, f'Illegal character "{char!r}"') pos += 1 def parse_value(src: str, pos: Pos, parse_float: ParseFloat) -> Tuple[Pos, Any]: # noqa: C901 try: char: Optional[str] = src[pos] except IndexError: char = None # Basic strings if char == '"': if src.startswith('"""', pos): return parse_multiline_str(src, pos, literal=False) return parse_one_line_basic_str(src, pos) # Literal strings if char == "'": if src.startswith("'''", pos): return parse_multiline_str(src, pos, literal=True) return parse_literal_str(src, pos) # Booleans if char == "t": if src.startswith("true", pos): return pos + 4, True if char == "f": if src.startswith("false", pos): return pos + 5, False # Dates and times datetime_match = RE_DATETIME.match(src, pos) if datetime_match: try: datetime_obj = match_to_datetime(datetime_match) except ValueError: raise suffixed_err(src, pos, "Invalid date or datetime") return datetime_match.end(), datetime_obj localtime_match = RE_LOCALTIME.match(src, pos) if localtime_match: return localtime_match.end(), match_to_localtime(localtime_match) # Integers and "normal" floats. # The regex will greedily match any type starting with a decimal # char, so needs to be located after handling of dates and times. number_match = RE_NUMBER.match(src, pos) if number_match: return number_match.end(), match_to_number(number_match, parse_float) # Arrays if char == "[": return parse_array(src, pos, parse_float) # Inline tables if char == "{": return parse_inline_table(src, pos, parse_float) # Special floats first_three = src[pos : pos + 3] if first_three in {"inf", "nan"}: return pos + 3, parse_float(first_three) first_four = src[pos : pos + 4] if first_four in {"-inf", "+inf", "-nan", "+nan"}: return pos + 4, parse_float(first_four) raise suffixed_err(src, pos, "Invalid value") def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: """Return a `TOMLDecodeError` where error message is suffixed with coordinates in source.""" def coord_repr(src: str, pos: Pos) -> str: if pos >= len(src): return "end of document" line = src.count("\n", 0, pos) + 1 if line == 1: column = pos + 1 else: column = pos - src.rindex("\n", 0, pos) return f"line {line}, column {column}" return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") def is_unicode_scalar_value(codepoint: int) -> bool: return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) _re.py000064400000005421150044130200005652 0ustar00import re from datetime import date, datetime, time, timedelta, timezone, tzinfo from functools import lru_cache from typing import TYPE_CHECKING, Any, Optional, Union if TYPE_CHECKING: from tomli._parser import ParseFloat # E.g. # - 00:32:00.999999 # - 00:32:00 _TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?" RE_NUMBER = re.compile( r""" 0 (?: x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex | b[01](?:_?[01])* # bin | o[0-7](?:_?[0-7])* # oct ) | [+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part (?P (?:\.[0-9](?:_?[0-9])*)? # optional fractional part (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part ) """, flags=re.VERBOSE, ) RE_LOCALTIME = re.compile(_TIME_RE_STR) RE_DATETIME = re.compile( rf""" ([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27 (?: [T ] {_TIME_RE_STR} (?:(Z)|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset )? """, flags=re.VERBOSE, ) def match_to_datetime(match: "re.Match") -> Union[datetime, date]: """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`. Raises ValueError if the match does not correspond to a valid date or datetime. """ ( year_str, month_str, day_str, hour_str, minute_str, sec_str, micros_str, zulu_time, offset_sign_str, offset_hour_str, offset_minute_str, ) = match.groups() year, month, day = int(year_str), int(month_str), int(day_str) if hour_str is None: return date(year, month, day) hour, minute, sec = int(hour_str), int(minute_str), int(sec_str) micros = int(micros_str.ljust(6, "0")) if micros_str else 0 if offset_sign_str: tz: Optional[tzinfo] = cached_tz(offset_hour_str, offset_minute_str, offset_sign_str) elif zulu_time: tz = timezone.utc else: # local date-time tz = None return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz) @lru_cache(maxsize=None) def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone: sign = 1 if sign_str == "+" else -1 return timezone( timedelta( hours=sign * int(hour_str), minutes=sign * int(minute_str), ) ) def match_to_localtime(match: "re.Match") -> time: hour_str, minute_str, sec_str, micros_str = match.groups() micros = int(micros_str.ljust(6, "0")) if micros_str else 0 return time(int(hour_str), int(minute_str), int(sec_str), micros) def match_to_number(match: "re.Match", parse_float: "ParseFloat") -> Any: if match.group("floatpart"): return parse_float(match.group()) return int(match.group(), 0) py.typed000064400000000032150044130200006223 0ustar00# Marker file for PEP 561