__init__.py000064400000000760150044141310006651 0ustar00# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. __title__ = "packaging" __summary__ = "Core utilities for Python packages" __uri__ = "https://github.com/pypa/packaging" __version__ = "24.1" __author__ = "Donald Stufft and individual contributors" __email__ = "donald@stufft.io" __license__ = "BSD-2-Clause or Apache-2.0" __copyright__ = "2014 %s" % __author__ __pycache__/__init__.cpython-311.pyc000064400000001103150044141310013201 0ustar00 ]bg,dZdZdZdZdZdZdZdezZdS) packagingz"Core utilities for Python packagesz!https://github.com/pypa/packagingz24.1z)Donald Stufft and individual contributorszdonald@stufft.iozBSD-2-Clause or Apache-2.0z2014 %sN) __title__ __summary____uri__ __version__ __author__ __email__ __license__ __copyright__o/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/__init__.pyrs8  2 - 8  * J& r __pycache__/_manylinux.cpython-311.pyc000064400000025274150044141310013644 0ustar00 ]bgr%hUddlmZddlZddlZddlZddlZddlZddlZddlZddl m Z m Z m Z m Z ddlmZmZmZmZdZdZdZejd+d Zd,dZd,dZd-dZejdZded<Gdde Zd.dZd.dZ d.dZ!d/d Z"ej#d0d!Z$d1d$Z%d%d&d'd(Z&d2d*Z'dS)3) annotationsN) GeneratorIterator NamedTupleSequence)EIClassEIDataELFFileEMachinel~iipathstrreturn%Generator[ELFFile | None, None, None]c#K t|d5}t|VddddS#1swxYwYdS#tttf$rdVYdSwxYw)Nrb)openr OSError TypeError ValueError)r fs q/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_manylinux.py _parse_elfrs $   !**                      Y + s,A3 A7A7AAA executableboolc<t|5}|duon|jtjkoY|jt jkoD|jtj ko/|j tztko|j tztk cdddS#1swxYwYdSN)rcapacityr C32encodingr Lsbmachiner ArmflagsEF_ARM_ABIMASKEF_ARM_ABI_VER5EF_ARM_ABI_FLOAT_HARDrrs r_is_linux_armhfr)s J   1 TM I gk) I fj( I X\) I.(O;  I //3HH                    sA4BBBct|5}|duo>|jtjko)|jt jko|jtj k cdddS#1swxYwYdSr) rrr rr r r!r"r I386r(s r_is_linux_i686r,-s J   1 TM + gk) + fj( + X]*                    sAA!!A%(A%archs Sequence[str]cd|vrt|Sd|vrt|Shdtfd|DS)Narmv7li686>ppc64s390xx86_64aarch64ppc64leriscv64 loongarch64c3 K|]}|vV dSr).0arch allowed_archss r z'_have_compatible_abi..Es(77t}$777777)r)r,any)rr-r=s @r_have_compatible_abirA7si5z*** j)))M 7777777 7 77r?cdS)N2r:r:r?rrDMsBr?zdict[int, int]_LAST_GLIBC_MINORc$eZdZUded<ded<dS) _GLibCVersionintmajorminorN)__name__ __module__ __qualname____annotations__r:r?rrGrGPs"JJJJJJJJr?rG str | Nonec tjd}|J|\}}n$#ttt t f$rYdSwxYw|S)zJ Primary implementation of glibc_version_string using os.confstr. CS_GNU_LIBC_VERSIONN)osconfstrrsplitAssertionErrorAttributeErrorrr)version_string_versions r_glibc_version_string_confstrrZUsl%'Z0E%F%F)))#**,, 77 NGZ @tt Ns/2AAc8 ddl}n#t$rYdSwxYw |d}n#t$rYdSwxYw |j}n#t $rYdSwxYw|j|_|}t|ts| d}|S)zG Fallback implementation of glibc_version_string using ctypes. rNascii) ctypes ImportErrorCDLLrgnu_get_libc_versionrVc_char_prestype isinstancerdecode)r]process_namespacer` version_strs r_glibc_version_string_ctypesrghs tt "KK-- tt0E tt $*? ++--K k3 ' '2!((11 s* / ==A AAc:tp tS)z9Returns glibc version string, or None if not using glibc.)rZrgr:r?r_glibc_version_stringris ( * * L.J.L.LLr?rftuple[int, int]ctjd|}|stjd|tdSt |dt |dfS)a3Parse glibc version. We use a regexp instead of str.split because we want to discard any random junk that might come after the minor version -- this might happen in patched/forked versions of glibc (e.g. Linaro's version of glibc uses version strings like "2.20-2014.11"). See gh-3588. z$(?P[0-9]+)\.(?P[0-9]+)z;Expected glibc version with 2 components major.minor, got: rmrIrJ)rematchwarningswarnRuntimeWarningrHgroup)rfms r_parse_glibc_versionrus 8+FFA   #  # #    v qwww #aggg&6&6"7"7 77r?cDt}|dSt|S)Nrl)riru)rfs r_get_glibc_versionrws&'))Kx  , ,,r?r<rYc<t}||krdS ddl}n#t$rYdSwxYwt|dr6||d|d|}|t |SdS|t ddkr$t|drt |jS|t dd kr$t|d rt |jS|t dd kr$t|d rt |j SdS) NFrTmanylinux_compatiblermanylinux1_compatible manylinux2010_compatiblemanylinux2014_compatible) rw _manylinuxr^hasattrryrrGr|r~r)r<rY sys_glibcrresults r_is_compatiblersL"$$I7u ttz12200WQZNN  << t-1%%%% :6 7 7 : 899 9-2&&&& :9 : : = ;<< <-2&&&& :9 : : = ;<< < 4s  ++ manylinux2014 manylinux2010 manylinux1))rzr)rzr})rzr{ Iterator[str]c#Kttj|sdStdd}t |ddhzrtdd}tt }|g}t |jdz ddD]2}t|}| t||3|D]}|D]}|j|jkr|j }nd}t |j |dD]i}t|j|} d j | } t|| r | d |V| tvr&t| } t|| r | d |VjdS) aGenerate manylinux tags compatible to the current platform. :param archs: Sequence of compatible architectures. The first one shall be the closest to the actual architecture and be the part of platform tag after the ``linux_`` prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a prerequisite for the current platform to be manylinux-compatible. :returns: An iterator of compatible manylinux tags. Nrzr4r1rrmzmanylinux_{}_{}rX)rAsysrrGsetrwrangerIrEappendrJformatr_LEGACY_MANYLINUX_MAP) r-too_old_glibc2 current_glibcglibc_max_list glibc_major glibc_minorr< glibc_max min_minor glibc_versiontag legacy_tags r platform_tagsrs  6 6"1b))N 5zzXv&&-&q!,,!#5#7#78M#_N]014a<<GG ' 4 mKEEFFFF55' 5 5I."666*0  $Y_iDD 5 5 -io{ K K .'. >!$ 66* //4//))) $999!6}!EJ%dM::5!+44d44444 5  555r?)r rrr)rrrr)rrr-r.rr)rrO)rfrrrj)rrj)r<rrYrGrr)r-r.rr)( __future__r collections contextlib functoolsrRrnrrptypingrrrr_elffiler r r r r%r&r'contextmanagerrr)r,rA defaultdictrErNrGrZrgriru lru_cacherwrrrr:r?rrs*""""""" <<<<<<<<<<<<888888888888"           8888,%z'_parse_musl_version..s F F F1A FQ F F Frc3>K|]}|VdS)N)striprs r z&_parse_musl_version..s*@@q@@@@@@rrmuslzVersion (\d+)\.(\d+)r)r r ) splitlineslenrematchr r group)rlinesms r_parse_musl_versionr,s F F@@F,=,=,?,?@@@ F F FE 5zzA~~q"1"//t (%(33A t c!''!**ooS__ E E EEr executablec6 t|d5}t|j}dddn #1swxYwYn#ttt f$rYdSwxYw|d|vrdSt j|gt jd}t|j S)a`Detect currently-running musl runtime version. This is done by checking the specified executable's dynamic linking information, and invoking the loader to parse its output for a version string. If the loader is musl, the output would be something like:: musl libc (x86_64) Version 1.2.2 Dynamic Program Loader rbNr$T)stderrtext) openr interpreterOSError TypeError ValueError subprocessrunPIPEr,r0)r-fldprocs r_get_musl_versionr=!s *d # # (q'B ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( Y +tt zV2%%t >2$zT B B BD t{ + ++s'?3 ?7?7?AAarchs Sequence[str] Iterator[str]c#Kttj}|dS|D]-}t|jddD]}d|jd|d|V.dS)aGenerate musllinux tags compatible to the current platform. :param archs: Sequence of compatible architectures. The first one shall be the closest to the actual architecture and be the part of platform tag after the ``linux_`` prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a prerequisite for the current platform to be musllinux-compatible. :returns: An iterator of compatible musllinux tags. N musllinux__)r=sysr-ranger r )r>sys_muslarchr s r platform_tagsrI8s!00H??8>2r22 ? ?E>x~>>>>>> > > > > ???r__main__zlinux-z not linuxzplat:zmusl:ztags: )endz[.-]rD-rBz )rrrr)r-rrr)r>r?rr@)__doc__ __future__r functoolsr'r7rEtypingrrr_elffilerr r, lru_cacher=rIr sysconfig get_platformplat startswithprintr-subsplittrrrr\s #"""""  1111111111: FFFF ,,,,,????& z !9 ! # #D ??8 $ $11k11 $ E'4 E'$$S^44555 E's ]626'3 30B0B20FGG H H!! aZ     !!r__pycache__/_structures.cpython-311.pyc000064400000007170150044141310014036 0ustar00 ]bgbGddZeZGddZeZdS)ceZdZdefdZdefdZdedefdZ dedefdZ dedefdZ dedefdZ dedefd Z d edd fd Zd S) InfinityTypereturncdS)NInfinityselfs r/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_structures.py__repr__zInfinityType.__repr__szc:tt|SNhashreprrs r __hash__zInfinityType.__hash__ DJJr othercdSNFrr rs r __lt__zInfinityType.__lt__ ur cdSrrrs r __le__zInfinityType.__le__rr c,t||jSr isinstance __class__rs r __eq__zInfinityType.__eq__%000r cdSNTrrs r __gt__zInfinityType.__gt__tr cdSr#rrs r __ge__zInfinityType.__ge__r%r r NegativeInfinityTypectSr)NegativeInfinityrs r __neg__zInfinityType.__neg__sr N)__name__ __module__ __qualname__strr introbjectboolrrr r$r'r+rr r rrs# #    FtFt1F1t1111FtFt f !7      r rceZdZdefdZdefdZdedefdZ dedefdZ dedefdZ dedefdZ dedefd Z d edefd Zd S) r(rcdS)Nz -Infinityrrs r r zNegativeInfinityType.__repr__$s{r c:tt|Srrrs r rzNegativeInfinityType.__hash__'rr rcdSr#rrs r rzNegativeInfinityType.__lt__*r%r cdSr#rrs r rzNegativeInfinityType.__le__-r%r c,t||jSrrrs r r zNegativeInfinityType.__eq__0r!r cdSrrrs r r$zNegativeInfinityType.__gt__3rr cdSrrrs r r'zNegativeInfinityType.__ge__6rr r ctSr)rrs r r+zNegativeInfinityType.__neg__9sr N)r,r-r.r/r r0rr1r2rrr r$r'rr+rr r r(r(#s# #    FtFt1F1t1111FtFtfr r(N)rrr(r*rr r r<sy         4 <>>4('))r __pycache__/markers.cpython-311.pyc000064400000030770150044141310013122 0ustar00 ]bg) UddlmZddlZddlZddlZddlZddlmZmZm Z m Z ddl m Z m Z mZmZmZddl mZddlmZddlmZmZdd lmZgd ZeeegefZGd d eZGd deZ GddeZ!Gdde Z"d8dZ# d9d:dZ$ddej%ej&ej'ej(ej)ej*d Z+d!e,d"<d;d(Z-dd4Z0d?d5Z1Gd6d7Z2dS)@) annotationsN)AnyCallable TypedDictcast) MarkerAtom MarkerListOpValueVariable) parse_marker)ParserSyntaxError)InvalidSpecifier Specifiercanonicalize_name) InvalidMarkerUndefinedComparisonUndefinedEnvironmentNameMarkerdefault_environmentceZdZdZdS)rzE An invalid marker was found, users should refer to PEP 508. N__name__ __module__ __qualname____doc__n/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/markers.pyrrr rceZdZdZdS)rzP An invalid operation was attempted on a value that doesn't support it. Nrrr r!rr$r"r rceZdZdZdS)rz\ A name was attempted to be used that does not exist inside of the environment. Nrrr r!rr*sr rceZdZUded< ded< ded< ded< ded< ded< ded< ded < ded < ded < ded <d S) Environmentstrimplementation_nameimplementation_versionos_nameplatform_machineplatform_releaseplatform_systemplatform_versionpython_full_versionplatform_python_implementationpython_version sys_platformN)rrr__annotations__rr r!r&r&1s> LLL     ('''9r r&resultsrreturncrt|dtr|d\}}}t|tr/|jdkr$t |j}t |}nCt|tr.|jdkr#t |j}t |}|||f|d<|S)z! Normalize extra values. rextra) isinstancetupler valuerr )r4lhsoprhsnormalized_extras r!_normalize_extra_valuesr?ys'!*e$$"qz R c8 $ $ *g)=)=0;; ())CC X & & *39+?+?0;; ())C"c\ Nr Tmarkerlist[str] | MarkerAtom | strfirst bool | Noner'ct|tttfsJt|trJt |dkr7t|dttfrt |dSt|tr>d|D}|rd|Sdd|zdzSt|trdd|DS|S)Nrrc38K|]}t|dVdS)F)rBN)_format_marker.0ms r! z!_format_marker..s/@@A///@@@@@@r  ()c6g|]}|Sr) serializerGs r! z"_format_marker..s 7771777r )r8listr9r'lenrFjoin)r@rBinners r!rFrFs ftUC0 1 111 1 64  ) KK1   vay4- 0 0 fQi(((&$ @@@@@  /88E?? "%(3. . FE " "xx77777888 r c ||vSNrr;r=s r!rXs 3#:r c ||vSrVrrWs r!rXrXs s#~r )inznot in=>zdict[str, Operator] _operatorsr;r<r r=boolc d td||g}||dS#t$rYnwxYwt |}|td|d|d|d|||S)NT) prereleasesz Undefined z on z and .)rrSrOcontainsrr]getr)r;r<r=specopers r!_eval_oprgs4",,..#!67788}}Sd}333      'NN2<<>>::D |!"Mr"M"M"M"MS"M"M"MNNN 4S>>s6A AAvalueskeytuple[str, ...]cD|dkrtd|DS|S)Nr7c34K|]}t|VdSrVr)rHvs r!rJz_normalize..s+::a&q))::::::r )r9)rirhs r! _normalizerns1  g~~::6:::::: Mr markersr environmentdict[str, str]cgg}|D]%}t|tttfsJt|tr*|dt ||ft|tr|\}}}t|t r|j}||}|j} n|j}|j}||} t|| |\}} |dt||| |dvsJ|dkr|g'td|DS)N)ri)andorruc34K|]}t|VdSrV)all)rHitems r!rJz$_evaluate_markers..s(,,Ts4yy,,,,,,r ) r8rQr9r'append_evaluate_markersr r:rnrgany) rorpgroupsr@r;r<r=environment_key lhs_value rhs_values r!rzrzsX "tF""&4"455555 fd # # " 2J  / DD E E E E  & & "!LCS#x(( 9"%)'8 I I "%)'8 #-i#X#X#X Iy 2J  hy"i@@ A A A A]****~~ b!!! ,,V,,, , ,,r infosys._version_infocd|}|j}|dkr ||dt|jzz }|S)Nz{0.major}.{0.minor}.{0.micro}finalr)format releaselevelr'serial)rversionkinds r!format_full_versionrsH-44T::G  D w47S---- Nr cttjj}tjj}||t jt jt jt j t jt j t j d t j ddtjd S)Nrb) r(r)r*r+r,r-r.r/r0r1r2)rsysimplementationrnameosplatformmachinereleasesystemr1python_implementationrSpython_version_tuple)iverr(s r!rrs s19 : :D,12"&7$,..$,..#?,,$,..'688*2*H*J*J((8#@#B#B2A2#FGG    r c@eZdZddZddZddZdd Zdd ZdddZdS)rr@r'r5Nonec tt||_dS#t$r"}t t ||d}~wwxYwrV)r? _parse_marker_markersrrr')selfr@es r!__init__zMarker.__init__sY /3M&4I4IJJDMMM"! / / /A''Q . /s!% AA  Ac*t|jSrV)rFrrs r!__str__zMarker.__str__sdm,,,r cd|dS)Nz rrs r!__repr__zMarker.__repr__!s$4$$$$r intcRt|jjt|fSrV)hash __class__rr'rs r!__hash__zMarker.__hash__$s T^,c$ii8999r otherrr^czt|tstSt|t|kSrV)r8rNotImplementedr')rrs r!__eq__z Marker.__eq__'s1%(( "! !4yyCJJ&&r Nrpdict[str, str] | Nonec tdt}d|d<|ddr|dxxdz cc<|"|||dd|d<t |j|S)a$Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. rqr`r7r/+local)rrendswithupdaterzr)rrpcurrent_environments r!evaluatezMarker.evaluate-s##35H5J5JKK')G$ 4 5 > >s C C B  5 6 6 6' A 6 6 6  "  & &{ 3 3 3#7+3/1#G, 0CDDDr )r@r'r5r)r5r')r5r)rrr5r^rV)rprr5r^) rrrrrrrrrrr r!rrs////2----%%%%::::'''' EEEEEEEr r)r4rr5r)T)r@rArBrCr5r')r;r'r<r r=r'r5r^)rhr'rir'r5rj)ror rprqr5r^)rrr5r')r5r&)3 __future__roperatorrrrtypingrrrr_parserr r r r r rr _tokenizerr specifiersrrutilsr__all__r'r^Operator ValueErrorrrrr&r?rFltleeqnegegtr]r3rgrnrzrrrrr r!rs #""""""  111111111111@@@@@@@@@@@@@@222222))))))33333333$$$$$$    S#J$ %J * zEEEEE)EEEP    "@D: & %--  + + + +  # #            ----<$AEAEAEAEAEAEAEAEAEAEr __pycache__/requirements.cpython-311.pyc000064400000011211150044141310014166 0ustar00 ]bg ddlmZddlmZmZddlmZddlm Z ddl m Z m Z ddl mZddlmZGd d eZGd d Zd S)) annotations)AnyIterator)parse_requirement)ParserSyntaxError)Marker_normalize_extra_values) SpecifierSet)canonicalize_nameceZdZdZdS)InvalidRequirementzJ An invalid requirement was found, users should refer to PEP 508. N)__name__ __module__ __qualname____doc__s/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/requirements.pyrrsrrcBeZdZdZddZdd Zdd Zdd Zdd ZddZ dS) RequirementzParse a requirement. Parse a given requirement string into its parts, such as name, specifier, URL, and extras. Raises InvalidRequirement on a badly-formed requirement string. requirement_stringstrreturnNonec t|}n/#t$r"}tt||d}~wwxYw|j|_|jpd|_t |jpg|_t|j |_ d|_ |j >tj t|_ t|j |j _dSdSN)_parse_requirementrrrnameurlsetextrasr specifiermarkerr __new__r _markers)selfrparsedes r__init__zRequirement.__init__"s 4'(:;;FF  4 4 4$SVV,,! 3 4  %z1T #FM$7R 8 8 '3F4D'E'E%) = $ .00DK#:6=#I#IDK  % $s >9>r Iterator[str]c#K|V|jr/dt|j}d|dV|jrt |jV|jrd|jV|jrdV|jrd|jVdSdS)N,[]z@  z; )r"joinsortedr#rr r$)r'rformatted_extrass r _iter_partszRequirement._iter_parts1s ; *"xxt{(;(;<< )&))) ) ) ) > &dn%% % % % 8 !tx// ! ! !{  ; %$t{$$ $ $ $ $ $ % %rc\d||jS)N)r1r4rr's r__str__zRequirement.__str__Cs$wwt'' 22333rcd|dS)Nzrr7s r__repr__zRequirement.__repr__Fs)))))rintct|jjg|t |jRSr)hash __class__rr4r rr7s r__hash__zRequirement.__hash__IsG' !!"3DI">">??     rotherrboolct|tstSt|jt|jko?|j|jko/|j|jko|j|jko|j|jkSr) isinstancerNotImplementedr rr"r#r r$)r'r@s r__eq__zRequirement.__eq__Qs%-- "! ! di ( (,=ej,I,I I , u|+ ,%/1 ,EI% , u|+  rN)rrrr)rrrr+)rr)rr;)r@rrrA) rrrrr*r4r8r:r?rErrrrrs J J J J%%%%$4444****           rrN) __future__rtypingrr_parserrr _tokenizerrmarkersr r specifiersr utilsr ValueErrorrrrrrrNs#""""" <<<<<<))))))44444444$$$$$$$$$$$$ F F F F F F F F F F r__pycache__/specifiers.cpython-311.pyc000064400000120500150044141310013601 0ustar00 ]bg:ldZddlmZddlZddlZddlZddlmZmZm Z m Z m Z ddl m Z ddlmZe eefZe de ZeeegefZd%dZGddeZGddejZGddeZejdZd&dZd'dZd(dZd)d"Z Gd#d$eZ!dS)*z .. testsetup:: from pip._vendor.packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier from pip._vendor.packaging.version import Version ) annotationsN)CallableIterableIteratorTypeVarUnion)canonicalize_version)VersionUnparsedVersionVar)boundversionUnparsedVersionreturnr cNt|tst|}|SN) isinstancer )rs q/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/specifiers.py_coerce_versionrs& gw ' '#'"" NceZdZdZdS)InvalidSpecifiera Raised when attempting to create a :class:`Specifier` with a specifier string that is invalid. >>> Specifier("lolwat") Traceback (most recent call last): ... packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat' N)__name__ __module__ __qualname____doc__rrrr srrceZdZejddZejddZejdd Zeejdd Z e j ddZ ejdddZ ej dddZ dS) BaseSpecifierrstrcdS)z Returns the str representation of this Specifier-like object. This should be representative of the Specifier itself. Nrselfs r__str__zBaseSpecifier.__str__-rintcdS)zF Returns a hash value for this Specifier-like object. Nrr"s r__hash__zBaseSpecifier.__hash__4r%rotherobjectboolcdS)z Returns a boolean representing whether or not the two Specifier-like objects are equal. :param other: The other object to check against. Nrr#r)s r__eq__zBaseSpecifier.__eq__:r%r bool | NonecdS)zWhether or not pre-releases as a whole are allowed. This can be set to either ``True`` or ``False`` to explicitly enable or disable prereleases or it can be set to ``None`` (the default) to use default semantics. Nrr"s r prereleaseszBaseSpecifier.prereleasesCr%rvalueNonecdS)zQSetter for :attr:`prereleases`. :param value: The value to set. Nrr#r2s rr1zBaseSpecifier.prereleasesLr%rNitemr1cdS)zR Determines if the given item is contained within this specifier. Nr)r#r6r1s rcontainszBaseSpecifier.containsSr%riterableIterable[UnparsedVersionVar]Iterator[UnparsedVersionVar]cdS)z Takes an iterable of items and filters them so that only items which are contained within this specifier are allowed in it. Nr)r#r9r1s rfilterzBaseSpecifier.filterYr%rrr rr&r)r*rr+rr/r2r+rr3r)r6r r1r/rr+r9r:r1r/rr;) rrrabcabstractmethodr$r(r.propertyr1setterr8r=rrrrr,s%                 X            QU       rr) metaclassc eZdZdZdZdZejdezezdzejej zZ dddd d d d d dZ d>d?dZ e d@dZejdAdZe dBdZe dBdZdBdZdBdZe dCd!ZdDd#ZdEd&ZdFd)ZdGd,ZdGd-ZdGd.ZdGd/ZdGd0ZdHd2ZdHd3ZdGd4ZdId7Z dJdKd9Z! dJdLd=Z"dS)M Specifiera?This class abstracts handling of version specifiers. .. tip:: It is generally not required to instantiate this manually. You should instead prefer to work with :class:`SpecifierSet` instead, which can parse comma-separated version specifiers (which is what package metadata contains). z8 (?P(~=|==|!=|<=|>=|<|>|===)) a (?P (?: # The identity operators allow for an escape hatch that will # do an exact string match of the version you wish to install. # This will not be parsed by PEP 440 and we cannot determine # any semantic meaning from it. This operator is discouraged # but included entirely as an escape hatch. (?<====) # Only match for the identity operator \s* [^\s;)]* # The arbitrary version can be just about anything, # we match everything except for whitespace, a # semi-colon for marker support, and a closing paren # since versions can be enclosed in them. ) | (?: # The (non)equality operators allow for wild card and local # versions to be specified so we have to define these two # operators separately to enable that. (?<===|!=) # Only match for equals and not equals \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)* # release # You cannot use a wild card and a pre-release, post-release, a dev or # local version together so group them with a | and make them optional. (?: \.\* # Wild card syntax of .* | (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local )? ) | (?: # The compatible operator requires at least two digits in the # release segment. (?<=~=) # Only match for the compatible operator \s* v? (?:[0-9]+!)? # epoch [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) (?: # pre release [-_\.]? (alpha|beta|preview|pre|a|b|c|rc) [-_\.]? [0-9]* )? (?: # post release (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) )? (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release ) | (?: # All other operators only allow a sub set of what the # (non)equality operators do. Specifically they do not allow # local versions to be specified nor do they allow the prefix # matching wild cards. (?=<>===Nspecr r1r/rr3c|j|}|std|d|d|df|_||_dS)aInitialize a Specifier instance. :param spec: The string representation of a specifier which will be parsed and normalized before use. :param prereleases: This tells the specifier if it should accept prerelease versions if applicable or not. The default of ``None`` will autodetect it from the given specifiers. :raises InvalidSpecifier: If the given specifier is invalid (i.e. bad syntax). zInvalid specifier: ''operatorrN)_regexsearchrgroupstrip_spec _prereleases)r#r[r1matchs r__init__zSpecifier.__init__s ""4(( C"#A$#A#A#ABB B KK # # ) ) + + KK " " ( ( * *'  (rr+c|j|jS|j\}}|dvr;|dkr|dr |dd}t|jrdSdS)N)rTrVrUrSrYrT.*TF)rdrcendswithr is_prerelease)r#r^rs rr1zSpecifier.prereleasess|   ($ $ !J' 6 6 64G$4$4T$:$:!#2#,w- turr2c||_dSrrdr5s rr1zSpecifier.prereleases!rc|jdS)z`The operator of this specifier. >>> Specifier("==1.2.3").operator '==' rrcr"s rr^zSpecifier.operatorz!}rc|jdS)zaThe version of this specifier. >>> Specifier("==1.2.3").version '1.2.3' r rpr"s rrzSpecifier.versionrqrcl|j d|jnd}d|jjdt ||dS)aTA representation of the Specifier that shows all internal state. >>> Specifier('>=1.0.0') =1.0.0')> >>> Specifier('>=1.0.0', prereleases=False) =1.0.0', prereleases=False)> >>> Specifier('>=1.0.0', prereleases=True) =1.0.0', prereleases=True)> N, prereleases=rZrW()>)rdr1 __class__rr r#pres r__repr__zSpecifier.__repr__&sT , 2T- 1 1 1 B4>*AASYYA#AAAArc dj|jS)zA string representation of the Specifier that can be round-tripped. >>> str(Specifier('>=1.0.0')) '>=1.0.0' >>> str(Specifier('>=1.0.0', prereleases=False)) '>=1.0.0' z{}{})formatrcr"s rr$zSpecifier.__str__8sv}dj))rtuple[str, str]cvt|jd|jddk}|jd|fS)Nr rrSstrip_trailing_zero)r rc)r#canonical_versions r_canonical_speczSpecifier._canonical_specBsB0 JqM!%A$!6   z!}///rr&c*t|jSr)hashrr"s rr(zSpecifier.__hash__JsD()))rr)r*ct|tr; |t|}n3#t$r tcYSwxYwt||jstS|j|jkS)a>Whether or not the two Specifier-like objects are equal. :param other: The other object to check against. The value of :attr:`prereleases` is ignored. >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0") True >>> (Specifier("==1.2.3", prereleases=False) == ... Specifier("==1.2.3", prereleases=True)) True >>> Specifier("==1.2.3") == "==1.2.3" True >>> Specifier("==1.2.3") == Specifier("==1.2.4") False >>> Specifier("==1.2.3") == Specifier("~=1.2.3") False )rr rwrNotImplementedrr-s rr.zSpecifier.__eq__Ms& eS ! ! " &s5zz22# & & &%%%% &E4>22 "! !#u'<<>> "1.2.3" in Specifier(">=1.2.3") True >>> Version("1.2.3") in Specifier(">=1.2.3") True >>> "1.0.0" in Specifier(">=1.2.3") False >>> "1.3.0a1" in Specifier(">=1.2.3") False >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True) True r8r#r6s r __contains__zSpecifier.__contains__&}}T"""rrc||j}t|}|jr|sdS||j}|||jS)alReturn whether or not the item is contained in this specifier. :param item: The item to check for, which can be a version string or a :class:`Version` instance. :param prereleases: Whether or not to match prereleases with this Specifier. If set to ``None`` (the default), it uses :attr:`prereleases` to determine whether or not prereleases are allowed. >>> Specifier(">=1.2.3").contains("1.2.3") True >>> Specifier(">=1.2.3").contains(Version("1.2.3")) True >>> Specifier(">=1.2.3").contains("1.0.0") False >>> Specifier(">=1.2.3").contains("1.3.0a1") False >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1") True >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True) True NF)r1rrkrr^r)r#r6r1normalized_itemrs rr8zSpecifier.contains sg4  *K*$//  (  5/3.@.@.O.O  $,???rr9r:r;c#Kd}g}d||ndi}|D]K}t|}|j|fi|r,|jr|s|js||Ed}|VL|s |r |D] }|VdSdSdS)aOFilter items in the given iterable, that match the specifier. :param iterable: An iterable that can contain version strings and :class:`Version` instances. The items in the iterable will be filtered according to the specifier. :param prereleases: Whether or not to allow prereleases in the returned iterator. If set to ``None`` (the default), it will be intelligently decide whether to allow prereleases or not (based on the :attr:`prereleases` attribute, and whether the only versions matching are prereleases). This method is smarter than just ``filter(Specifier().contains, [...])`` because it implements the rule from :pep:`440` that a prerelease item SHOULD be accepted if no other versions match the given specifier. >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) ['1.3'] >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")])) ['1.2.3', '1.3', ] >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"])) ['1.5a1'] >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] Fr1NT)rr8rkr1append)r#r9r1yieldedfound_prereleaseskwrparsed_versions rr=zSpecifier.filter5s<K,C[[ N  " "G,W55Nt}^22r22 ""/""#'#3"&,,W5555#G!MMM  , ,         rrZN)r[r r1r/rr3)rr+rBr>)rr}r?r@)rr rr)rr r[r rr+)rr rr rr+)r6rrr+r)r6rr1r/rr+rC)#rrrr_operator_regex_str_version_regex_strrecompileVERBOSE IGNORECASEr_rrfrFr1rGr^rrzr$rr(r.rrrrrrrrrrr8r=rrrrJrJcs \ |RZ%%(::WD R]"F "    J(((((4X.""""XXBBBB$****000X0****====:!!!!     (&/&/&/&/P::::<<<< <<<< 0<====####**@*@*@*@*@ZRV;;;;;;;rrJz^([0-9]+)((?:a|b|c|rc)[0-9]+)$r list[str]cLg}|d\}}}||pd|dD][}t|}|r(||F||\|S)aSplit version into components. The split components are intended for version comparison. The logic does not attempt to retain the original version string, so joining the components back with :func:`_version_join` may not produce the original version string. !0.) rpartitionrsplit _prefix_regexr`extendgroups)rresultepochrrestr6res rrrvsF'',,NE1d MM%,3 3  $$T**  MM%,,.. ) ) ) ) MM$     Mr componentsc>|^}}|dd|S)zJoin split version components into a version string. This function assumes the input came from :func:`_version_split`, where the first component must be the epoch (either empty or numeric), and all other components numeric. rr)join)rrrs rrrs+LED & &chhtnn & &&rsegmentr+c<tfddD S)Nc3BK|]}|VdSr) startswith).0rrs r z!_is_not_suffix..sB'-6""r)devabrcpost)any)rs`rrrs@1P rleftrighttuple[list[str], list[str]]c gg}}|ttjd||ttjd|||t |dd||t |dd|ddgt dt |dt |dz z|ddgt dt |dt |dz zttj|ttj|fS)Nc*|Srisdigitxs rz_pad_version..src*|Srrrs rrz_pad_version..s!))++rrr r) rrrrrinsertmaxchain from_iterable)rr left_split right_splits rrrs " Jd9./D/DdKKLLMMMtI/0E0EuMMNNOOOd3z!}--//0111uSQ00223444a#QKN(;(;c*Q->P>P(P!Q!QQRRRq3%#aZ]););c+a.>Q>Q)Q"R"RRSSS Y_ * *: 6 677 Y_ * *; 7 788 rceZdZdZd$d%d Zed&d Zejd'dZd(dZd(dZ d)dZ d*dZ d+dZ d)dZ d,dZd-dZ d.d/dZ d0d1d#ZdS)2 SpecifierSetzThis class abstracts handling of a set of version specifiers. It can be passed a single specifier (``>=3.0``), a comma-separated list of specifiers (``>=3.0,!=3.1``), or no specifier at all. rZN specifiersr r1r/rr3cd|dD}ttt||_||_dS)aNInitialize a SpecifierSet instance. :param specifiers: The string representation of a specifier or a comma-separated list of specifiers which will be parsed and normalized before use. :param prereleases: This tells the SpecifierSet if it should accept prerelease versions if applicable or not. The default of ``None`` will autodetect it from the given specifiers. :raises InvalidSpecifier: If the given ``specifiers`` are not parseable than this exception will be raised. c^g|]*}||+Sr)rbrss r z)SpecifierSet.__init__..s-RRR! RAGGIIRRRr,N)r frozensetmaprJ_specsrd)r#rr1split_specifierss rrfzSpecifierSet.__init__sS$SRz/?/?/D/DRRR I/? @ @AA (rcl|j|jS|jsdStd|jDS)Nc3$K|] }|jV dSrr1rs rrz+SpecifierSet.prereleases..s$66Q1=666666r)rdr rr"s rr1zSpecifierSet.prereleasessH   ($ $ { 466$+666666rr2r+c||_dSrrmr5s rr1zSpecifierSet.prereleasesrnrcR|j d|jnd}dt||dS)aA representation of the specifier set that shows all internal state. Note that the ordering of the individual specifiers within the set may not match the input string. >>> SpecifierSet('>=1.0.0,!=2.0.0') =1.0.0')> >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False) =1.0.0', prereleases=False)> >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True) =1.0.0', prereleases=True)> NrtrZz>> str(SpecifierSet(">=1.0.0,!=1.0.1")) '!=1.0.1,>=1.0.0' >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False)) '!=1.0.1,>=1.0.0' rc34K|]}t|VdSr)r rs rrz'SpecifierSet.__str__.. s(;;!s1vv;;;;;;r)rsortedr r"s rr$zSpecifierSet.__str__s/xx;;t{;;;;;<<>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1' =1.0.0')> >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1') =1.0.0')> NzFCannot combine SpecifierSets with True and False prerelease overrides.)rr rrrr rd ValueError)r#r) specifiers r__and__zSpecifierSet.__and__s eS ! ! " ''EEE<00 "! ! NN $T[5<%?@@    $);)G%*%7I " "   *u/A/I%)%6I " "  %"4 4 4%)%6I " "  rr*ct|ttfrtt|}nt|tstS|j|jkS)aWhether or not the two SpecifierSet-like objects are equal. :param other: The other object to check against. The value of :attr:`prereleases` is ignored. >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1") True >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) == ... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)) True >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1" True >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0") False >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2") False )rr rJrrr r-s rr.zSpecifierSet.__eq__.sW& ec9- . . " U,,EEE<00 "! !{el**rc*t|jS)z7Returns the number of specifiers in this specifier set.)rr r"s r__len__zSpecifierSet.__len__Hs4;rIterator[Specifier]c*t|jS)z Returns an iterator over all the underlying :class:`Specifier` instances in this specifier set. >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str) [, =1.0.0')>] )iterr r"s r__iter__zSpecifierSet.__iter__LsDK   rr6rc,||S)arReturn whether or not the item is contained in this specifier. :param item: The item to check for. This is used for the ``in`` operator and behaves the same as :meth:`contains` with no ``prereleases`` argument passed. >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1") True >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1") True >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1") False >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1") False >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True) True rrs rrzSpecifierSet.__contains__Vrr installedcttst|js jrdS|rjrtjt fd|jDS)aReturn whether or not the item is contained in this SpecifierSet. :param item: The item to check for, which can be a version string or a :class:`Version` instance. :param prereleases: Whether or not to match prereleases with this SpecifierSet. If set to ``None`` (the default), it uses :attr:`prereleases` to determine whether or not prereleases are allowed. >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3") True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3")) True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1") False >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1") False >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1") True >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True) True NFc3FK|]}|VdS)rNr)rrr6r1s rrz(SpecifierSet.contains..s3RR1::d :<<RRRRRRr)rr r1rkrallr )r#r6r1r"s `` rr8zSpecifierSet.containsks<$(( !4==D  *K t1 5  .+ .4,--D RRRRRdkRRRRRRrr9r:r;c||j}|jr=|jD]&}||t|}'t |Sg}g}|D]G}t |}|jr|s|s||2||H|s|r|t |St |S)a.Filter items in the given iterable, that match the specifiers in this set. :param iterable: An iterable that can contain version strings and :class:`Version` instances. The items in the iterable will be filtered according to the specifier. :param prereleases: Whether or not to allow prereleases in the returned iterator. If set to ``None`` (the default), it will be intelligently decide whether to allow prereleases or not (based on the :attr:`prereleases` attribute, and whether the only versions matching are prereleases). This method is smarter than just ``filter(SpecifierSet(...).contains, [...])`` because it implements the rule from :pep:`440` that a prerelease item SHOULD be accepted if no other versions match the given specifier. >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"])) ['1.3'] >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")])) ['1.3', ] >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"])) [] >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] An "empty" SpecifierSet will filter items based on the presence of prerelease versions in the set. >>> list(SpecifierSet("").filter(["1.3", "1.5a1"])) ['1.3'] >>> list(SpecifierSet("").filter(["1.5a1"])) ['1.5a1'] >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"])) ['1.3', '1.5a1'] >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True)) ['1.3', '1.5a1'] Nr)r1r r=r+rrrkr)r#r9r1r[filteredrr6rs rr=zSpecifierSet.filtersX  *K ; "  P P;;xT+=N=N;OO>> ! 24H:<  * *!0!6!6"/* *#7)00666OOD)))) / 1 /k6I-...>> !rr)rr r1r/rr3rArBr>r?)r)rrrr@)rr)r6rrr+)NN)r6rr1r/r"r/rr+rrC)rrrrrfrFr1rGrzr$r(rr.rr rr8r=rrrrrsj (((((6777X7 """"5555* = = = =!!!!@++++4    !!!!####0$(!% 7S7S7S7S7StRVM"M"M"M"M"M"M"rr)rrrr )rr rr)rrrr )rr rr+)rrrrrr)"r __future__rrDrrtypingrrrrrutilsr rr r rr r+rrrrABCMetarrJrrrrrrrrrrr,s #"""""  ??????????????'''''' %W1IIIWcND01     z   4 4 4 4 4 ck4 4 4 4 nMMMMM MMM` <== ,'''' *"""""="""""r__pycache__/tags.cpython-311.pyc000064400000063115150044141310012413 0ustar00 ]bgSUddlmZddlZddlZddlZddlZddlZddlZddlZddl m Z ddl m Z m Z mZmZmZddlmZmZejeZeeZeeefZddd d d d Zd ed<ejddkZGddZdHdZdIdJdZ dKdZ!dLd"Z"dMd&Z#dIdNd(Z$ dOdd)dPd.Z%dQd/Z& dOdd)dRd2Z'dSd4Z( dOdTd5Z)efdUd8Z*dVd<Z+ dWdXd>Z, dWdYd@Z-efdZdAZ.d[dBZ/d[dCZ0d\dDZ1dd)d]dEZ2d^dFZ3dd)d_dGZ4dS)`) annotationsN)EXTENSION_SUFFIXES)IterableIteratorSequenceTuplecast) _manylinux _musllinuxpycpppipjy)pythoncpythonpypy ironpythonjythonzdict[str, str]INTERPRETER_SHORT_NAMESPceZdZdZgdZdd Zedd Zedd Zedd Z ddZ ddZ ddZ ddZ dS)Tagz A representation of the tag triple for a wheel. Instances are considered immutable and thus are hashable. Equality checking is also supported. ) _interpreter_abi _platform_hash interpreterstrabiplatformreturnNonec||_||_|j|_t |j|j|jf|_dSN)lowerrrrhashr)selfr r"r#s k/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/tags.py__init__z Tag.__init__4sY'--//IIKK ')) 4,diHII c|jSr')rr*s r+r zTag.interpreter?s   r-c|jSr')rr/s r+r"zTag.abiCs yr-c|jSr')rr/s r+r#z Tag.platformGs ~r-otherobjectboolct|tstS|j|jko/|j|jko|j|jko|j|jkSr') isinstancerNotImplementedrrrr)r*r2s r+__eq__z Tag.__eq__Ksd%%% "! !Z5; & :5?2 :ej( :"e&88  r-intc|jSr')rr/s r+__hash__z Tag.__hash__Vs zr-c4|jd|jd|jS)N-)rrrr/s r+__str__z Tag.__str__Ys%#BBdiBB$.BBBr-c.d|dt|dS)N)idr/s r+__repr__z Tag.__repr__\s!'4''BtHH''''r-N)r r!r"r!r#r!r$r%r$r!)r2r3r$r4)r$r9)__name__ __module__ __qualname____doc__ __slots__r,propertyr r"r#r8r;r>rCr-r+rr*s?>>I J J J J!!!X!XX     CCCC((((((r-rtagr!r$frozenset[Tag]c Ft}|d\}}}|dD]V}|dD]>}|dD]&}|t|||'?Wt |S)z Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. Returning a set is required due to the possibility that the tag is a compressed tag set. r=.)setsplitaddr frozenset)rLtags interpretersabis platformsr r" platform_s r+ parse_tagrY`s 55D$'IIcNN!L$ #))#..;; ::c?? ; ;C&__S11 ; ; [#y99:::: ; ; T??r-Fnamewarnr4int | str | Nonecltj|}||rtd||S)Nz>Config variable '%s' is unset, Python ABI tag may be incorrect) sysconfigget_config_varloggerdebug)rZr[values r+_get_config_varrcps='6t<>#s # # + +C 5 5 = =c3 G GGr-rV list[str]ct|dkrdStjd|d}|sdS|d}d|vS)z Determine if the ABI corresponds to a threaded (`--disable-gil`) build. The threaded builds are indicated by a "t" in the abiflags. rFz cp\d+(.*)r t)lenrematchgroup)rVmabiflagss r+_is_threaded_cpythonrs}sS  4yyA~~u tAw''A uwwqzzH (?r-python_version PythonVersion threadingcTt|dkot|dko| S)z Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`) builds do not support abi3. r ))rmtuple)rtrvs r+ _abi3_appliesr{s0 ~   " Xu^'<'<'F Xy=Xr- py_versionc t|}g}t|dd}dx}x}x}}td|}ttd} dt v} |s|| s| rd}|dkrtd|rd }|d krGtd |} | s| d }|d kr*td|} | dks| tjdkrd}n|r|d|||dd||||||S)NryPy_DEBUGgettotalrefcountz_d.pydd)rx Py_GIL_DISABLEDrl)rx WITH_PYMALLOCrq)rxrxPy_UNICODE_SIZEriurr) rz_version_nodotrchasattrsysr maxunicodeappendinsert) r|r[rVversionrvrapymallocucs4 with_debug has_refcounthas_ext with_pymalloc unicode_sizes r+ _cpython_abisrsoz""J DZ^,,G*,,I,,4 T22J3 233L,,Gj(l(g(W1BD!I!I F'>>  M1H   *+-- - cp-abi3- - cp-none- - cp-abi3- # Older Python versions down to 3.2. If python_version only specifies a major version then user-provided ABIs and the 'none' ABItag will be used. If 'abi3' or 'none' are specified in 'abis' then they will be yielded at their normal position and not at the beginning. Nryrr )abi3nonec3:K|]}td|VdS)rNr.0rXr s r+ zcpython_tags..s/SSIC VY77SSSSSSr-c3:K|]}td|VdS)rNrrs r+rzcpython_tags..s/OO K33OOOOOOr-z cp{version}rrr)r version_inforrmrlistremove ValueError platform_tagsrrsr{rangeformat) rtrVrWr[ explicit_abir"rXrvuse_abi3 minor_versionr s @r+ cpython_tagsrs8, .)"1"-;~nRaR&899;;K | ~   " " 66DDD ::D(   KK % % % %    D Y1-//22I33" 3 3Ik3 22 2 2 2 2 3%T**I^Y77HTSSSSSSSSSSSSSSOOOOYOOOOOOOOOO:">!#4q#8!R@@ : :M& : : +22*N1,=}+MNN3 +vy999999  ::: : :s0B BBc6tdd}t|tr |ddkrtd|d}t |dkr!t tjdd S|d }| d rd |d d z}n| d r|d d}n| dr1d |d dd }nM| dr1d |d dd}n|r|}ngSt|gS)z1 Return the ABI tag based on EXT_SUFFIX. EXT_SUFFIXTrrrOz.invalid sysconfig.get_config_var('EXT_SUFFIX')rxNryr rrr=rgraalpy) rcr6r! SystemErrorrQrmrrr startswithjoinri) ext_suffixpartssoabir"s r+ _generic_abirs!D999J j# & &L*Q-3*>*>JKKK   S ! !E 5zzA~~S-bqb1222 !HE  ""U[[%%a((   $   kk#q!   & ! !hhu{{3''+,,   ) $ $hhu{{3''+,,  c " " ##r-r str | Nonec#^K|s5t}t|}d||g}|t}nt |}t |p t }d|vr|d|D]}|D]}t|||VdS)z Yields the tags for a generic interpreter. The tags consist of: - -- The "none" ABI will be added if it was not explicitly provided. rr~Nr)interpreter_nameinterpreter_versionrrrrrr)r rVrWr[ interp_nameinterp_versionr"rXs r+ generic_tagsrs =&(( ,$777gg{N;<<  |~~DzzY1-//22I T F33" 3 3Ik3 22 2 2 2 2 333r- Iterator[str]c#Kt|dkrdt|ddVd|dVt|dkr9t|ddz ddD] }dt|d|fVdSdS)z Yields Python versions in descending order. After the latest version, the major-only version will be yielded, and then all previous versions of that major version. r r Nryrr)rmrr)r|minors r+_py_interpreter_ranger<s :3>*RaR.1133333 z!}   ::a=1,b"55 @ @E?~z!}e&<==?? ? ? ? ? @ @r-c#8K|stjdd}t|p t}t |D]}|D]}t |d|V|rt |ddVt |D]}t |ddVdS)z Yields the sequence of tags that are compatible with a specific version of Python. The tags consist of: - py*-none- - -none-any # ... if `interpreter` is provided. - py*-none-any Nryrany)rrrrrr)rtr rWrrXs r+compatible_tagsrKs .)"1"-Y1-//22I(8822" 2 2Igvy11 1 1 1 1 2.+vu-----(88**'65))))))**r-archis_32bitc<|s|S|drdSdS)Nppci386)r)rrs r+ _mac_archrds-   uu 6r-r AppleVersioncpu_archc|g}|dkr |dkrgS|gdnu|dkr |dkrgS|gdnO|dkr$|dks|dkrgS|dn%|d kr|d krgS|d d g|d vr|d|dvr|d|S)Nx86_64) r)intelfat64fat32r)rrfatppc64)rrr)rrr>arm64r universal2>rrrrr universal)extendr)rrformatss r+_mac_binary_formatsrns$jG8 W  I2223333 V   W  I0001111 W   W  ' 1 1Iw U   W  I'(((&&&|$$$>>>{### Nr-AppleVersion | Nonec #Ktj\}}}|tdtt t |ddd}|dkrtjtj dddgd d d itj d j }tdtt t |ddd}n|}|t|}n|}d |krS|dkrMt|dddD]5}d|f}t||}|D]}dd||V6|dkrMt|dddD]5} | df}t||}|D]}d| d|V6|dkr|dkrUtdddD]A}d|f}t||}|D](}d|d|d|V)BdStdddD]0}d|f}d}d|d|d|V/dSdS)aD Yields the platform tags for a macOS system. The `version` parameter is a two-item tuple specifying the macOS version to generate platform tags for. The `arch` parameter is the CPU architecture to generate platform tags for. Both parameters default to the appropriate value for the current system. NrrOry)rz-sSz-cz-import platform; print(platform.mac_ver()[0])TSYSTEM_VERSION_COMPAT0)checkenvstdouttext)rr) rr rrz&macosx_{major}_{minor}_{binary_format})majorr binary_formatrrrrxr)r#mac_verr rzmapr9rQ subprocessrunr executablePIPErrrrr) rr version_strrfrrcompat_versionbinary_formatsr major_versions r+ mac_platformsrsY (/11KH~uSk6G6G6L6LRaR6P-Q-Q'R'RSS h  %.NC  ,c2!    >5S+:K:KC:P:PQSRSQS:T1U1U+V+VWWG |""'g//#71:r266  M.N0FFN!/   >EEMF  '#71:r266  M*A-N0FFN!/   >EE'q F  ' 8  !&r1b!1!1   !#]!2!4^T!J!J%3MBII,Q/,Q/&3J  "'r1b!1!1   !#]!2 , >EE(+(+"/F-&  r- multiarchc #K|ctj\}}}}tdtt t |ddd}|tjj }| dd}d}|dd krdS| |d|d | Vt|d d z d d D]"}| |d|| V#t|dd z d d D]0}tdd d D]}| ||| V1dS)a Yields the platform tags for an iOS system. :param version: A two-item tuple specifying the iOS version to generate platform tags for. Defaults to the current iOS version. :param multiarch: The CPU architecture+ABI to generate platform tags for - (the value used by `sys.implementation._multiarch` e.g., `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current multiarch value. NrrOryr=rfzios_{major}_{minor}_{multiarch}r r )rrrrr ) r#ios_verr rzrr9rQrimplementation _multiarchrhrr)rrrfreleaseios_platform_templaterrs r+ ios_platformsrs$+--7Aq~uSgmmC6H6H!6L-M-M'N'NOO&1 !!#s++I=qzB & &aj i '   wqzA~r2..  #**!*EY+      wqzA~r2..1b"%%  E'..5I/     r-c#Kttj}|ds|VdS|r|dkrd}n|dkrd}|dd\}}ddd gi||g}t j|Ed{Vtj|Ed{V|D] }d|V dS) Nlinux_ linux_x86_64 linux_i686 linux_aarch64 linux_armv8lrfr armv8larmv7l) rir^ get_platformrrQgetr rr )rlinuxrfrarchss r+_linux_platformsrs i466 7 7E   H % % # N " " EE o % %"Ekk#q!!GAt (+ , 0 0v > >E'.........'.........toor-c#NKttjVdSr')rir^r rKr-r+_generic_platformsr2s) I244 5 555555r-ctjdkrtStjdkrtStjdkrt St S)z; Provides the platform tags for this installation. DarwiniOSLinux)r#systemrrrrrKr-r+rr6siH$$   e # #   g % %!!!!###r-c\tjj}t|p|S)z Returns the name of the running interpreter. Some implementations have a reserved, two-letter abbreviation which will be returned when appropriate. )rrrZrr )rZs r+rrDs(   "D " & &t , , 44r-ctd|}|rt|}n!ttjdd}|S)z9 Returns the version of the running interpreter. py_version_nodotrNry)rcr!rrr)r[rs r+rrOsJ0t<<>!!!!!!!d   +66666222222222222r-)rLr!r$rM)F)rZr!r[r4r$r\)rdr!r$r!)rVrjr$r4)rtrurvr4r$r4)r|rur[r4r$rj)NNN) rtrrVrrWrr[r4r$r)r$rj) r rrVrrWrr[r4r$r)r|rur$r)rtrr rrWrr$r)rr!rr4r$r!)rrrr!r$rj)NN)rrrrr$r)rrrrr$r)rr4r$r)r$rrD)r[r4r$r!)rrur$r!)r[r4r$r)5 __future__rloggingr#rnstructrrr^importlib.machineryrtypingrrrrr r~r r getLoggerrEr`r9rurr__annotations__calcsize_32_BIT_INTERPRETERrrYrcrirsr{rrrrrrrrrrrrrrrrrrKr-r+r's #""""""  222222%$$$$$$$  8 $ $ S#X   ++&foc**a/3(3(3(3(3(3(3(3(l     HHHH     YYYYD,0!%&*9:  9:9:9:9:9:9:x&$&$&$&$T#!%&*3  333333> @ @ @ @ ,0"&******2+>B=AUUUUUrBF66666r':&6666 $ $ $ $5555).      &&&&#33333333r-__pycache__/utils.cpython-311.pyc000064400000020214150044141310012606 0ustar00 ]bgddlmZddlZddlmZmZmZmZddlm Z m Z ddl m Z m Z eedeeeffZedeZGd d eZGd d eZGd deZejdejZejdZejdZejdZddd'dZd(dZddd)d!Zd*d$Zd+d&Z dS),) annotationsN)NewTypeTupleUnioncast)Tag parse_tag)InvalidVersionVersionNormalizedNameceZdZdZdS) InvalidNamezW An invalid distribution name; users should refer to the packaging user guide. N__name__ __module__ __qualname____doc__r l/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/utils.pyrrrrceZdZdZdS)InvalidWheelFilenamezM An invalid wheel filename was found, users should refer to PEP 427. Nrr rrrrrrrceZdZdZdS)InvalidSdistFilenamez^ An invalid sdist filename was found, users should refer to the packaging user guide. Nrr rrrrrrrz)^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$z[-_.]+z/^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$z (\d+)(.*)F)validatenamestrrboolreturnc|r,t|std|td|}t t|S)Nzname is invalid: -)_validate_regexmatchr_canonicalize_regexsublowerrr)rrvalues rcanonicalize_namer*-sg8--d3386d66777  # #C . . 4 4 6 6E  & &&rc:t|duSN)_normalized_regexr%)rs ris_normalized_namer.5s  " "4 ( ( 44rT)strip_trailing_zeroversion Version | strr/ct|tr# t|}n#t$r|cYSwxYw|}g}|jdkr||jddd|jD}|rtj dd|}|||j 7|dd|j D|j |d |j |j |d |j |j |d |j d|S) z This is very similar to Version.__str__, but has one subtle difference with the way it handles the release segment. r!.c34K|]}t|VdSr,r.0xs r z'canonicalize_version..Ps(>>!s1vv>>>>>>rz(\.0)+$Nc34K|]}t|VdSr,r6r7s rr:z'canonicalize_version..Xs(88SVV888888rz.postz.dev+) isinstancerr r epochappendjoinreleaserer'prepostdevlocal)r0r/parsedpartsrelease_segments rcanonicalize_versionrK9s'3 W%%FF   NNN  E|q  '''(((hh>>v~>>>>>OB&RAA LL!!!z RWW88VZ88888999{ *V[**+++z (FJ(()))| '''((( 775>>s ' 66filename8tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]cN|dstd||dd}|d}|dvrtd||d|dz }|d}d |vs t jd |tjtd |t|} t|d }n%#t$r}td ||d}~wwxYw|dkr|d}t|}|td|d|dttt|d |df} nd} t|d} ||| | fS)Nz.whlz3Invalid wheel filename (extension must be '.whl'): r#)z0Invalid wheel filename (wrong number of parts): r__z ^[\w\d._]*$zInvalid project name: rz*Invalid wheel filename (invalid version): rQzInvalid build number: z in ''r )endswithrcountsplitrCr%UNICODEr*r r _build_tag_regexrBuildTagintgroupr ) rLdashesrI name_partrr0e build_part build_matchbuildtagss rparse_wheel_filenamereis   V $ $ " L( L L   }H ^^C F V" Ix I I    NN3 + +EaI yBH^Y KKS"#FH#F#FGGG Y ' 'D%(## " C C C    {{1X &,,Z88  &EEE(EEE XK$5$5a$8$8 9 9;;L;LQ;O;OPQQ U2Y  D '5$ ''sC C<$C77C<tuple[NormalizedName, Version]c|dr|dtd }n@|dr|dtd }ntd||d\}}}|std|t |} t |}n%#t $r}td||d}~wwxYw||fS)Nz.tar.gzz.zipz@Invalid sdist filename (extension must be '.tar.gz' or '.zip'): r#zInvalid sdist filename: z*Invalid sdist filename (invalid version): )rVlenr rpartitionr*r r )rL file_stemr_sep version_partrr0r`s rparse_sdist_filenamerms,## .I./   6 " " ^F |^, "      $-#7#7#<#< IsL J"#Hh#H#HIII Y ' 'D,'' " C C C    '?s,B<< CCC)rrrr r!r)rrr!r )r0r1r/r r!r)rLrr!rM)rLrr!rf)! __future__rrCtypingrrrrrdr r r0r r r\rr[r ValueErrorrrrcompile IGNORECASEr$r&r-rZr*r.rKrermr rrrss #""""" ............ ,,,,,,,, rE#s(O+ ,)3//* : :"*0"-!bj++BJQRR2:l++6;''''''5555 <@------`((((((((Vr__pycache__/version.cpython-311.pyc000064400000050562150044141310013144 0ustar00 ]bgR? dZddlmZddlZddlZddlmZmZmZm Z m Z m Z ddl m Z mZmZmZgdZe e eefdfZe eee eeffZe ee e e eefe ee eefffdffZe ee edfeeeefZeeegefZGd d eZd)dZGddeZGddZdZeZ GddeZ!d*dZ"ej#dZ$d+dZ%d,d(Z&dS)-zN .. testsetup:: from pip._vendor.packaging.version import parse, Version ) annotationsN)AnyCallable NamedTuple SupportsIntTupleUnion)Infinity InfinityTypeNegativeInfinityNegativeInfinityType)VERSION_PATTERNparseVersionInvalidVersion.cLeZdZUded<ded<ded<ded<ded<d ed <d S) _Versionintepochtuple[int, ...]releasetuple[str, int] | NonedevprepostLocalType | NonelocalN)__name__ __module__ __qualname____annotations__n/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/version.pyrr&sWJJJ    r$rversionstrreturnrc t|S)zParse the given version string. >>> parse('1.0.dev1') :param version: The version string to parse. :raises InvalidVersion: When the version string is not a valid version. )r)r&s r%rr/s 7  r$ceZdZdZdS)rzRaised when a version string is not a valid version. >>> Version("invalid") Traceback (most recent call last): ... packaging.version.InvalidVersion: Invalid version: 'invalid' N)rr r!__doc__r#r$r%rr;sr$rcReZdZUded<ddZddZdd Zdd Zdd Zdd Z ddZ dS) _BaseVersionztuple[Any, ...]_keyr(rc*t|jSN)hashr.selfs r%__hash__z_BaseVersion.__hash__HsDIr$otherboolcZt|tstS|j|jkSr0 isinstancer-NotImplementedr.r3r5s r%__lt__z_BaseVersion.__lt__N)%.. "! !y5:%%r$cZt|tstS|j|jkSr0r8r;s r%__le__z_BaseVersion.__le__T)%.. "! !yEJ&&r$objectcZt|tstS|j|jkSr0r8r;s r%__eq__z_BaseVersion.__eq__Zr@r$cZt|tstS|j|jkSr0r8r;s r%__ge__z_BaseVersion.__ge__`r@r$cZt|tstS|j|jkSr0r8r;s r%__gt__z_BaseVersion.__gt__fr=r$cZt|tstS|j|jkSr0r8r;s r%__ne__z_BaseVersion.__ne__lr@r$Nr(r)r5r-r(r6)r5rAr(r6) rr r!r"r4r<r?rCrErGrIr#r$r%r-r-Es &&&& '''' '''' '''' &&&& ''''''r$r-a v? (?: (?:(?P[0-9]+)!)? # epoch (?P[0-9]+(?:\.[0-9]+)*) # release segment (?P
                                          # pre-release
            [-_\.]?
            (?Palpha|a|beta|b|preview|pre|c|rc)
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
ceZdZUdZejdezdzejejzZ	de
d<d"d
Zd#dZd#dZ
ed$dZed%dZed&dZed'dZed'dZed(dZed#dZed#dZed)dZed)dZed)dZed$dZed$dZed$d Zd!S)*raThis class abstracts handling of a project's versions.

    A :class:`Version` instance is comparison aware and can be compared and
    sorted using the standard Python interfaces.

    >>> v1 = Version("1.0a5")
    >>> v2 = Version("1.0")
    >>> v1
    
    >>> v2
    
    >>> v1 < v2
    True
    >>> v1 == v2
    False
    >>> v1 > v2
    False
    >>> v1 >= v2
    False
    >>> v1 <= v2
    True
    z^\s*z\s*$CmpKeyr.r&r'r(Nonec|j|}|std|dt|dr"t|dndt
d|ddDt|d|d	t|d
|dp|dt|d
|dt|d|_
t|j
j|j
j
|j
j|j
j|j
j|j
j|_dS)aCInitialize a Version object.

        :param version:
            The string representation of a version which will be parsed and normalized
            before use.
        :raises InvalidVersion:
            If the ``version`` does not conform to PEP 440 in any way then this
            exception will be raised.
        zInvalid version: ''rrc34K|]}t|VdSr0)r.0is  r%	z#Version.__init__..s(LLQ#a&&LLLLLLr$r.pre_lpre_npost_lpost_n1post_n2dev_ldev_nr)rrrrrrN)_regexsearchrrgrouprtuplesplit_parse_letter_version_parse_local_version_version_cmpkeyrrrrrrr.)r3r&matchs   r%__init__zVersion.__init__s""7++	B !@g!@!@!@AAA!/4{{7/C/CJ#ekk'**+++LL%++i*@*@*F*Fs*K*KLLLLL%ekk'&:&:EKK>> Version('1.0.0')
        
        z
r#r2s r%__repr__zVersion.__repr__s&D%%%%r$cBg}|jdkr||jd|dd|jD|j7|dd|jD|j|d|j|j|d	|j|j|d
|jd|S)zA string representation of the version that can be rounded-tripped.

        >>> str(Version("1.0a5"))
        '1.0a5'
        r!rUc34K|]}t|VdSr0r'rRxs  r%rTz"Version.__str__..(;;c!ff;;;;;;r$Nc34K|]}t|VdSr0rmrns  r%rTz"Version.__str__..s( : :AQ : : : : : :r$z.postz.dev+)rappendjoinrrrrrr3partss  r%__str__zVersion.__str__s":??LLDJ)))***	SXX;;dl;;;;;<<<8LL : : : : :::;;;9 LL,,,---8LL***+++:!LL)TZ))***wwu~~r$rc|jjS)zThe epoch of the version.

        >>> Version("2.0.0").epoch
        0
        >>> Version("1!2.0.0").epoch
        1
        )rdrr2s r%rz
Version.epochs}""r$rc|jjS)adThe components of the "release" segment of the version.

        >>> Version("1.2.3").release
        (1, 2, 3)
        >>> Version("2.0.0").release
        (2, 0, 0)
        >>> Version("1!2.0.0.post0").release
        (2, 0, 0)

        Includes trailing zeroes but not the epoch or any pre-release / development /
        post-release suffixes.
        )rdrr2s r%rzVersion.releases}$$r$rc|jjS)aThe pre-release segment of the version.

        >>> print(Version("1.2.3").pre)
        None
        >>> Version("1.2.3a1").pre
        ('a', 1)
        >>> Version("1.2.3b1").pre
        ('b', 1)
        >>> Version("1.2.3rc1").pre
        ('rc', 1)
        )rdrr2s r%rzVersion.pre&s}  r$
int | NonecB|jjr|jjdndS)zThe post-release number of the version.

        >>> print(Version("1.2.3").post)
        None
        >>> Version("1.2.3.post1").post
        1
        r
N)rdrr2s r%rzVersion.post5s#)-
(:Dt}!!$$Dr$cB|jjr|jjdndS)zThe development number of the version.

        >>> print(Version("1.2.3").dev)
        None
        >>> Version("1.2.3.dev1").dev
        1
        r
N)rdrr2s r%rzVersion.dev@s#(,}'8Bt} ##dBr$
str | Nonecp|jjr)dd|jjDSdS)zThe local version segment of the version.

        >>> print(Version("1.2.3").local)
        None
        >>> Version("1.2.3+abc").local
        'abc'
        rUc34K|]}t|VdSr0rmrns  r%rTz Version.local..Us(@@qCFF@@@@@@r$N)rdrrur2s r%rz
Version.localKs<=	88@@DM,?@@@@@@4r$cTt|dddS)zThe public portion of the version.

        >>> Version("1.2.3").public
        '1.2.3'
        >>> Version("1.2.3+abc").public
        '1.2.3'
        >>> Version("1.2.3+abc.dev1").public
        '1.2.3'
        rsr
r)r'rar2s r%publiczVersion.publicYs#4yysA&&q))r$cg}|jdkr||jd|dd|jDd|S)a]The "base version" of the version.

        >>> Version("1.2.3").base_version
        '1.2.3'
        >>> Version("1.2.3+abc").base_version
        '1.2.3'
        >>> Version("1!1.2.3+abc.dev1").base_version
        '1!1.2.3'

        The "base version" is the public version of the project without any pre or post
        release markers.
        rrkrUc34K|]}t|VdSr0rmrns  r%rTz'Version.base_version..{rpr$rq)rrtrurrvs  r%base_versionzVersion.base_versionfst:??LLDJ)))***	SXX;;dl;;;;;<<<wwu~~r$r6c&|jdup|jduS)aTWhether this version is a pre-release.

        >>> Version("1.2.3").is_prerelease
        False
        >>> Version("1.2.3a1").is_prerelease
        True
        >>> Version("1.2.3b1").is_prerelease
        True
        >>> Version("1.2.3rc1").is_prerelease
        True
        >>> Version("1.2.3dev1").is_prerelease
        True
        N)rrr2s r%
is_prereleasezVersion.is_prereleasesxt#;txt';;r$c|jduS)zWhether this version is a post-release.

        >>> Version("1.2.3").is_postrelease
        False
        >>> Version("1.2.3.post1").is_postrelease
        True
        N)rr2s r%is_postreleasezVersion.is_postreleasesy$$r$c|jduS)zWhether this version is a development release.

        >>> Version("1.2.3").is_devrelease
        False
        >>> Version("1.2.3.dev1").is_devrelease
        True
        N)rr2s r%
is_devreleasezVersion.is_devreleasesxt##r$cPt|jdkr
|jdndS)zqThe first item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").major
        1
        r
rlenrr2s r%majorz
Version.majors(#&dl"3"3q"8"8t|Aa?r$cPt|jdkr
|jdndS)zThe second item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").minor
        2
        >>> Version("1").minor
        0
        r
rrr2s r%minorz
Version.minor(#&dl"3"3q"8"8t|Aa?r$cPt|jdkr
|jdndS)zThe third item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").micro
        3
        >>> Version("1").micro
        0
        rrrr2s r%microz
Version.microrr$N)r&r'r(rM)r(r'rJ)r(r)r(r)r(r|)r(r)r(r6)rr r!r+recompilerVERBOSE
IGNORECASEr]r"rgrirxpropertyrrrrrrrrrrrrrrr#r$r%rrse.RZ/1G;RZ"-=W
X
XFLLL$
$
$
$
L&&&&B###X#
%
%
%X
%!!!X!EEEXECCCXCX
*
*
*X
*X0<<<X< %%%X%$$$X$@@@X@@@@X@@@@X@@@r$letterrnumber str | bytes | SupportsInt | Nonerc|rH|d}|}|dkrd}n|dkrd}n
|dvrd}n|dvrd	}|t|fS|s|rd	}|t|fSdS)
Nralphaabetab)crpreviewrc)revrr)lowerr)rrs  r%rbrbs#>F
WFF
v

FF
.
.
.FF
|
#
#Fs6{{""#f#s6{{""4r$z[\._-]rrcl|1tdt|DSdS)zR
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    Nc3K|];}|s|nt|V.sU

!%=DJJLLLCII





r$)r`_local_version_separatorsra)rs r%rcrcsM


177>>




	
4r$rrrrrrrrLcPttttjdt|}||
|t
}n|t}n|}|t
}n|}|t}	n|}	|t
}
ntd|D}
|||||	|
fS)Nc|dkS)Nrr#)ros r%z_cmpkey..s
AFr$c3ZK|]&}t|tr|dfnt|fV'dS)rqN)r9rr
rQs  r%rTz_cmpkey../sP

IJz!S))DQGG0@!/D





r$)r`reversedlist	itertools	dropwhiler
r)rrrrrr_release_pre_post_dev_locals           r%reresi)*:*:HWrs#"""""				GGGGGGGGGGGGGGGGWWWWWWWWWWWW
C
C
C%S/3&'	,(#	b@b@b@b@b@lb@b@b@J	    F'BJy11				;6;6;6;6;6;6r$__pycache__/_elffile.cpython-311.pyc000064400000012544150044141310013222 0ustar00

]bgdZddlmZddlZddlZddlZddlmZGddeZ	Gddej
ZGd	d
ej
ZGddej
Z
Gd
dZdS)a;
ELF file parser.

This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.

Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
)annotationsN)IOceZdZdS)
ELFInvalidN)__name__
__module____qualname__o/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_elffile.pyrrsDrrceZdZdZdZdS)EIClassN)rrr	C32C64r
rrrr
C
CCCrrceZdZdZdZdS)EIDatarrN)rrr	LsbMsbr
rrrrrrrc"eZdZdZdZdZdZdZdS)EMachine(>N)rrr	I386S390ArmX8664AArc64r
rrrr!s'D
D
CE
FFFrrc:eZdZdZddZdd
ZeddZd
S)ELFFilez.
    Representation of an ELF executable.
    f	IO[bytes]returnNonec
||_	|d}n"#tj$rt	dwxYwt|dd}|dkrt	d||d|_|d|_	ddd	d
d|j|jf\}|_|_	n.#t$r!t	d|jd
|jdwxYw	||\
}|_}}|_}|_
}|_|_dS#tj$r}t	d|d}~wwxYw)N16Bzunable to parse identificationsELFzinvalid magic: )zHHIIIIIHHHz	>IIIIIIIIr.)zHHIQQQIHHHz	>IIQQQQQQr/))rr)rr)rr)rrzunrecognized capacity (z) or encoding ()z/unable to parse machine and section information)_f_readstructerrorrbytescapacityencoding_p_fmt_p_idxKeyErrormachine_e_phoffflags_e_phentsize_e_phnum)selfr&identmagice_fmt_es       r__init__zELFFile.__init__.s	?JJu%%EE|	?	?	?=>>>	?eBQBi  J8u88999a
a
	
@???	//
}dm,/.+E4;			.$-..!]...
		W

5!!


!


|	W	W	WNOOUVV	Ws,>'B33+C"9DE,D<<Efmtstrtuple[int, ...]ctj||jtj|S)N)r3unpackr1readcalcsize)r@rGs  rr2z
ELFFile._read\s,}S$',,vs/C/C"D"DEEEr
str | Nonect|jD]}|j|j|j|zz	||j}n#tj	$rYYwxYw||j
ddkru|j||j
dtj|j
||j
ddcSdS)zH
        The path recorded in the ``PT_INTERP`` section header.
        rrrrN)ranger?r1seekr<r>r2r8r3r4r9osfsdecoderLstrip)r@indexdatas   rinterpreterzELFFile.interpreter_s
4=))		O		OEGLL):U)BBCCC
zz$+..<



DKN#q((GLLdk!n-...;tw||DQ,@AABBHHNNNNNtsAA/.A/N)r&r'r(r))rGrHr(rI)r(rN)rrr	__doc__rFr2propertyrXr
rrr%r%)so,W,W,W,W\FFFFXrr%)rY
__future__renumrSr3typingr
ValueErrorrIntEnumrrrr%r
rrr`s:#"""""				



								dl
T\
t|EEEEEEEEEEr__pycache__/_parser.cpython-311.pyc000064400000037657150044141310013124 0ustar00

]bg'dZddlmZddlZddlmZmZmZmZddl	m
Z
mZGddZGd	d
eZ
GddeZGd
deZee
efZeeeefZeeedfZeedeefZGddeZd2dZd3dZd4dZd5dZd6d!Zd6d"Zd7d#Zd7d$Zd8d%Zd9d&Zd9d'Z d:d(Z!d;d*Z"dd0Z%d?d1Z&dS)@zHandwritten parser of dependency specifiers.

The docstring for each __parse_* function contains EBNF-inspired grammar representing
the implementation.
)annotationsN)
NamedTupleSequenceTupleUnion)
DEFAULT_RULES	Tokenizerc.eZdZd
dZddZddZddZd	S)NodevaluestrreturnNonec||_dSNr
)selfr
s  n/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_parser.py__init__z
Node.__init__s



c|jSrrrs r__str__zNode.__str__s
zrc(d|jjd|dS)N)	__class____name__rs r__repr__z
Node.__repr__s 74>*77d7777rctr)NotImplementedErrorrs r	serializezNode.serializes!!rN)r
rrrrr)r
__module____qualname__rrrr"rrrrsd8888""""""rrceZdZddZdS)Variablerrc t|Srrrs rr"zVariable.serialize4yyrNr#rr$r%r"r&rrr(r((rr(ceZdZddZdS)Valuerrcd|dS)N"r&rs rr"zValue.serialize#s4{{{rNr#r,r&rrr/r/"s(rr/ceZdZddZdS)Oprrc t|Srr*rs rr"zOp.serialize(r+rNr#r,r&rrr3r3'r-rr3
MarkerAtom
MarkerListcBeZdZUded<ded<ded<ded<ded<d	S)
ParsedRequirementrnameurl	list[str]extras	specifierzMarkerList | NonemarkerN)rr$r%__annotations__r&rrr8r82sE
IIIHHHNNNrr8sourcerrcHtt|tSN)rules)_parse_requirementr
r	r@s rparse_requirementrF=imDDDEEEr	tokenizerr
cT|d|dd}|j}|dt|}|dt	|\}}}|ddt|||||S)zI
    requirement = WS? IDENTIFIER WS? extras WS? requirement_details
    WS
IDENTIFIERz1package name at the start of dependency specifierexpectedENDzend of dependency specifier)consumeexpecttext
_parse_extras_parse_requirement_detailsr8)rH
name_tokenr9r<r:r=r>s       rrDrDAsd!!R"J?D
d
9
%
%F
d7	BBCF
U%BCCCT3	6BBBr"tuple[str, str, MarkerList | None]cXd}d}d}|dr||d|j}|ddj}|dd	
r|||fS|dd|dd	
r|||fSt
||d
}n]|j}t|}|d|dd	
r|||fSt
|||rdnd
}|||fS)z~
    requirement_details = AT URL (WS requirement_marker?)?
                        | specifier WS? (requirement_marker)?
    NATrJURLzURL after @rLrNTpeekzwhitespace after URLzURL and whitespace)
span_startafterversion specifierz#name and no valid version specifier)checkreadrOpositionrPrQ_parse_requirement_marker_parse_specifier)rHr=r:r>	url_startspecifier_starts      rrSrSVs~I
C
Ft"
$&	u}==B??5t?,,	,F++(>?????5t?,,	,F++*)3G


$,$Y//	$??5t?,,	,F++*&;##:




F##rr\intr]c|ds|d|d||t|}|d|S)z3
    requirement_marker = SEMICOLON marker WS?
    	SEMICOLONz!Expected end or semicolon (after ))r\rJ)r_raise_syntax_errorr`
_parse_markerrO)rHr\r]r>s    rrbrbs~??;''
$$8888!	%	
	
	
NN
9
%
%F
dMrr;c
|ddsgS|ddd5|dt|}|ddddn#1swxYwY|S)	zG
    extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
    LEFT_BRACKETTrZ
RIGHT_BRACKETr<aroundrJN)r_enclosing_tokensrO_parse_extras_list)rHr<s  rrRrRs??>?55			#	#
$

  
	$#I..$               Ms:A88A<?A<cg}|ds|S||j	|d|ddr|dn|dsn[||d|dd}||j|S)	z>
    extras_list = identifier (wsp* ',' wsp* identifier)*
    rKTrJrZz"Expected comma between extra namesCOMMAzextra name after commarL)r_appendr`rQrOrjrP)rHr<extra_tokens   rrrrrsF??<((

MM)..""'(((($??V&WW

k&'''(Mrc|ddd5|dt|}|ddddn#1swxYwY|S)zr
    specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
              | WS? version_many WS?
    LEFT_PARENTHESISRIGHT_PARENTHESISr^rorJN)rqrO_parse_version_many)rHparsed_specifierss  rrcrcs

	#	#"
$

  
	$/	::$               s:AA#&A#c2d}|dr|j}||jz
}|ddr |d||jdz|d	dr|d
||j|d|dsnF||jz
}|d|d|S)
z@
    version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
    rW	SPECIFIERVERSION_PREFIX_TRAILTrZz6.* suffix can only be used with `==` or `!=` operatorsr)r\span_endVERSION_LOCAL_LABEL_TRAILz@Local version label can only be used with `==` or `!=` operatorsrJrt)r_rar`rQrjrO)rHr{r\s   rrzrzs@
//+
&
& '
Y^^--22??1?==	((H%"+a/
)



??6T?BB	((R%"+
)



	$w''	Y^^--22$'//+
&
& *rcHtt|tSrB)_parse_full_markerr
r	rEs rparse_markerrrGrcRt|}|dd|S)NrNzend of marker expressionrL)rkrP)rHretvals  rrrs.
9
%
%F
U%?@@@Mrct|g}|drT|}t|}||j|f|dT|S)z4
    marker = marker_atom (BOOLOP marker_atom)+
    BOOLOP)_parse_marker_atomr_r`extendrQ)rH
expressiontoken
expr_rights    rrkrks}%Y//0J
//(
#
#4  '	22
5:z2333//(
#
#4rcz|d|ddrj|ddd5|dt|}|ddddn#1swxYwYnt	|}|d|S)	zw
    marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
                | WS? marker_item WS?
    rJrxTrZryzmarker expressionroN)rOr_rqrk_parse_marker_item)rHr>s  rrrs
d)55
/

'
'&(

	$	$

d###!.y!9!9Fd###	$	$	$	$	$	$	$	$	$	$	$	$	$	$	$$I..
dMs:BBB
MarkerItemc|dt|}|dt|}|dt|}|d|||fS)zG
    marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
    rJ)rO_parse_marker_var_parse_marker_op)rHmarker_var_left	marker_opmarker_var_rights    rrr(sd'	22O
d ++I
d(33
dY(899r	MarkerVarcF|dr:t|jddS|dr&t|jS|ddS)z/
    marker_var = VARIABLE | QUOTED_STRING
    VARIABLE._
QUOTED_STRINGz+Expected a marker variable or quoted string)messageN)r_process_env_varr`rQreplaceprocess_python_strrjrHs rrr6sz""
y~~//4<=, >, ~=, ===, in, not in)r_r`r3rPrQrjrs rrrPst
$xx			
(@AAA(:;;;(||			
)..""'(((++
8

	
r)r@rrr8)rHr
rr8)rHr
rrU)rHr
r\rfr]rrr6)rHr
rr;)rHr
rr)r@rrr6)rHr
rr6)rHr
rr5)rHr
rr)rHr
rr)rrrr()rrrr/)rHr
rr3)'__doc__
__future__rrtypingrrrr
_tokenizerr	r
rr(r/r3rrr5rr6r8rFrDrSrbrRrrrcrzrrrkrrrrrrr&rrrs#"""""



55555555555500000000""""""""t
D


(E/"	
9b)+
,

:x55
6

eL*c9:
;

FFFFCCCC*0$0$0$0$f(&6"@FFFF				,::::



!!!!






r__pycache__/_tokenizer.cpython-311.pyc000064400000020570150044141310013624 0ustar00

]bgUddlmZddlZddlZddlmZddlmZmZddl	m
Z
eGddZGd	d
eZ
iddd
ddddddddddejdejdddddddd d!ejd"ejd#eje
je
jzejejzd$d%d&d'd(d)d*d+d,d-d.d/Zd0ed1<Gd2d3ZdS)4)annotationsN)	dataclass)IteratorNoReturn)	Specifierc.eZdZUded<ded<ded<dS)TokenstrnametextintpositionN)__name__
__module____qualname____annotations__q/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/_tokenizer.pyr
r
s+
III
IIIMMMMMrr
c,eZdZdZdfd	Zdd
ZxZS)
ParserSyntaxErrorz7The provided source text could not be parsed correctly.messagersourcespantuple[int, int]returnNonecr||_||_||_tdS)N)rrrsuper__init__)selfrrr	__class__s    rr!zParserSyntaxError.__init__s6	
rcd|jdzd|jd|jdz
zzdz}d|j|j|gS)N r~r^z
    )rjoinrr)r"markers  r__str__zParserSyntaxError.__str__"sOty|#cTYq\DIaL-H&IICO}}dlDK@AAAr)rrrrrrrr)rr)rrr__doc__r!r*
__classcell__)r#s@rrrs_AABBBBBBBBrrLEFT_PARENTHESISz\(RIGHT_PARENTHESISz\)LEFT_BRACKETz\[
RIGHT_BRACKETz\]	SEMICOLON;COMMA,
QUOTED_STRINGzk
            (
                ('[^']*')
                |
                ("[^"]*")
            )
        OPz(===|==|~=|!=|<=|>=|<|>)BOOLOPz\b(or|and)\bINz\bin\bNOTz\bnot\bVARIABLEa
            \b(
                python_version
                |python_full_version
                |os[._]name
                |sys[._]platform
                |platform_(release|system)
                |platform[._](version|machine|python_implementation)
                |python_implementation
                |implementation_(name|version)
                |extra
            )\b
        	SPECIFIERATz\@URLz[^ \t]+
IDENTIFIERz\b[a-zA-Z0-9][a-zA-Z0-9._-]*\bVERSION_PREFIX_TRAILz\.\*z\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*z[ \t]+$)VERSION_LOCAL_LABEL_TRAILWSEND dict[str, str | re.Pattern[str]]
DEFAULT_RULEScreZdZdZd!dZd"d
Zddd#dZd$dZd%dZdddd&dZ	e
jd'd ZdS)(	TokenizerzContext-sensitive token parsing.

    Provides methods to examine the input stream to check whether the next token
    matches.
    rrrulesrDrrcv||_d|D|_d|_d|_dS)Nc>i|]\}}|tj|Sr)recompile).0rpatterns   r
z&Tokenizer.__init__..hs52
2
2
*7$D"*W%%2
2
2
rr)ritemsrH
next_tokenr)r"rrHs   rr!zTokenizer.__init__asG2
2
;@;;==2
2
2

)-


rrc\||r|dSdS)z8Move beyond provided token name, if at current position.N)checkread)r"rs  rconsumezTokenizer.consumens1::d	IIKKKKK		rF)peekrVboolc|jJd|d|j||jvs
Jd||j|}||j|j}|dS|s!t||d|j|_dS)zCheck whether the next token has the provided name.

        By default, if the check succeeds, the token *must* be read before
        another check. If `peek` is set to `True`, the token is not loaded and
        would need to be checked again.
        NzCannot check for z, already have zUnknown token name: FrT)rQrHmatchrrr
)r"rrV
expressionrYs     rrSzTokenizer.checkss
O##ItIIdoII
$##tz!!!#B$#B#B!!!Z%
  dm<<=5	C#D%(DMBBDOtrexpectedr
c||s|d||S)zsExpect a certain token name next, failing with a syntax error otherwise.

        The token is *not* read.
        z	Expected )rSraise_syntax_errorrT)r"rr[s   rexpectzTokenizer.expectsD
zz$	B))*@h*@*@AAAyy{{rcn|j}|J|xjt|jz
c_d|_|S)z%Consume the next token and return it.N)rQrlenr
)r"tokens  rrTzTokenizer.reads9   

UZ(

rN)
span_startspan_endrrb
int | Nonercrc\||jn|||jn|f}t||j|)z.Raise ParserSyntaxError at the given position.N)rr)rrr)r"rrbrcrs     rr]zTokenizer.raise_syntax_errorsK(/DMMZ%-DMM8
 ;


	
r
open_tokenclose_tokenaroundIterator[None]c#K||r|j}|nd}dV|dS||s |d|d|d|||dS)NzExpected matching z for z, after )rb)rSrrTr])r"rfrgrh
open_positions     renclosing_tokenszTokenizer.enclosing_tokenss::j!!	! MMIIKKKK M
 Fzz+&&	##S[SSzSS6SS(
$



	
		r)rrrHrDrr)rrrr)rrrVrWrrW)rrr[rrr
)rr
)rrrbrdrcrdrr)rfrrgrrhrrri)
rrrr+r!rUrSr^rTr]
contextlibcontextmanagerrlrrrrGrGZs
05*"&#





$rrG)
__future__rrmrKdataclassesrtypingrr
specifiersrr
	ExceptionrrLVERBOSE_operator_regex_str_version_regex_str
IGNORECASErErrGrrrrxsv"""""""				!!!!!!%%%%%%%%!!!!!!BBBBB	BBB*030303E03U	03
03T
03ZRZ		
		03"	
%#03$
o%03&	)'03(
:)03*

		
+03J%	(DD

R]"K03R	%S03T
:U03V3W03XGY03Z"D
_030303
0000fhhhhhhhhhhr__pycache__/metadata.cpython-311.pyc000064400000067617150044141310013250 0ustar00

]bg]~
ddlmZddlZddlZddlZddlZddlZddlZddlm	Z	m
Z
mZmZm
Z
mZddlmZmZmZddlmZejdZ	eeZn#e$rGdd	eZYnwxYwGd
deZGdd
e
dZhdZhdZdhZdMdZ dNdZ!dOdZ"iddd d!d"d#d$d$d%d&d'd(d)d)d*d+d,d,d-d-d.d.d/d0d1d2d3d3d4d4d5d6d7d8dd9d:d;dd?d@dAdBdCZ#dDe#$DZ%dPdFZ&e'Z(gdGZ)edGZ*e+gdHZ,GdIdJeeZ-GdKdLZ.dS)Q)annotationsN)AnyCallableGenericLiteral	TypedDictcast)requirements
specifiersutils)versionTc8eZdZUdZded<ded<ddZdd	Zd
S)
ExceptionGroupzA minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11.

        If :external:exc:`ExceptionGroup` is already defined by Python itself,
        that version is used instead.
        strmessagelist[Exception]
exceptionsreturnNonec"||_||_dSN)rr)selfrrs   o/builddir/build/BUILD/cloudlinux-venv-1.0.7/venv/lib/python3.11/site-packages/pip/_vendor/packaging/metadata.py__init__zExceptionGroup.__init__&s"DL(DOOOc@|jjd|jd|jdS)N(z, ))	__class____name__rr)rs r__repr__zExceptionGroup.__repr__*s*n-VVVV$/VVVVrN)rrrrrr)rr)r"
__module____qualname____doc____annotations__rr#rrrrse			####	)	)	)	)	W	W	W	W	W	Wrrc2eZdZUdZded<	dfdZxZS)	InvalidMetadataz'A metadata field contains invalid data.rfieldrrrcX||_t|dSr)r+superr)rr+rr!s   rrzInvalidMetadata.__init__7s(

!!!!!r)r+rrrrr)r"r$r%r&r'r
__classcell__)r!s@rr*r*1sQ11JJJ;""""""""""rr*c,eZdZUdZded<ded<ded<ded<ded<ded	<ded
<ded<ded<ded
<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded<ded <d!S)"RawMetadataaA dictionary of raw core metadata.

    Each field in core metadata maps to a key of this dictionary (when data is
    provided). The key is lower-case and underscores are used instead of dashes
    compared to the equivalent core metadata field. Any core metadata field that
    can be specified multiple times or can hold multiple values in a single
    field have a key with a plural name. See :class:`Metadata` whose attributes
    match the keys of this dictionary.

    Core metadata fields that can be specified multiple times are stored as a
    list or dict depending on which is appropriate for the field. Any fields
    which hold multiple values in a single field are stored as a list.

    rmetadata_versionnamer	list[str]	platformssummarydescriptionkeywords	home_pageauthorauthor_emaillicensesupported_platformsdownload_urlclassifiersrequiresprovides	obsoletes
maintainermaintainer_email
requires_dist
provides_distobsoletes_distrequires_pythonrequires_externaldict[str, str]project_urlsdescription_content_typeprovides_extradynamicN)r"r$r%r&r'r(rrr0r0@sa

 
IIILLLLLLNNNKKKLLL#"""OOO        "!!!rr0F)total>r2r9r;r5rr8rBr6r:r=rGrCr1rK>rMr@r?rAr4r>rErDrFrLrHr<rJdatarrr3c@d|dDS)zCSplit a string of comma-separate keyboards into a list of keywords.c6g|]}|Sr(strip).0ks  r
z#_parse_keywords..s ///!AGGII///r,)split)rOs r_parse_keywordsrYs //tzz#////rrIci}|D]v}d|ddD}|dgtddt|z
z|\}}||vrt	d|||<w|S)z?Parse a list of label/URL string pairings separated by a comma.c6g|]}|Sr(rR)rTps  rrVz'_parse_project_urls..s 777q777rrWr
rz duplicate labels in project urls)rXextendmaxlenKeyError)rOurlspairpartslabelurls      r_parse_project_urlsrhs
D  $87DJJsA$6$6777
bTSAE

N334555
sD===>>>UKrmsgemail.message.Messagesourcebytes | strct|tr|}|S|d}	|ddS#t$rtdwxYw)zGet the body of the message.Tdecodeutf8strictzpayload in an invalid encoding)
isinstancerget_payloadroUnicodeDecodeError
ValueError)rirkpayloadbpayloads    r_get_payloadrxs&#
?((///66	???68444!	?	?	?=>>>	?sAA3r9zauthor-emailr:
classifierr>r6zdescription-content-typerKzdownload-urlr=rMz	home-pager8r7r;rBzmaintainer-emailrCmetadata-versionr1r2rAzobsoletes-distrFplatformr4r@rErLr?rDrHrGr5r<r)zproject-urlr@z
provides-distzprovides-extrar?z
requires-distzrequires-externalzrequires-pythonr5zsupported-platformrci|]\}}||	Sr(r()rTemailraws   r
rsTTT
seTTTr(tuple[RawMetadata, dict[str, list[str]]]c	i}i}t|trCtjtjj|}nBtjtjj	|}t|D]}|}|
|pg}g}d}|D]}t|tjjtfsJt|tjjrg}	tj|D]J\}
}	|
ddd}n#t$$rd}d}YnwxYw|	|
|fK|ttj|	||
|s|||<Jt*|}||||<m|t.vr t1|dkr
|d	||<|t2vr|||<|d
kr-t1|dkrt5|d	||<|dkr+	t7|||<#t8$r	|||<YwxYw|||<	t;||}
|
r}d|vrTt=t|d}| dg!||
gnd|vr|d|
nf|
|d<n`#tD$rS| dg|#t|tH
YnwxYwt=tJ||fS)a
Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).

    This function returns a two-item tuple of dicts. The first dict is of
    recognized fields from the core metadata specification. Fields that can be
    parsed and translated into Python's built-in types are converted
    appropriately. All other fields are left as-is. Fields that are allowed to
    appear multiple times are stored as lists.

    The second dict contains all other fields from the metadata. This includes
    any unrecognized fields. It also includes any fields which are expected to
    be parsed into a built-in type but were not formatted appropriately. Finally,
    any fields that are expected to appear only once but are repeated are
    included in this dict.

    )policyTrprqlatin1FNr
rr7rJr6rn)&rrrr}parserParserrcompat32parsestrBytesParser
parsebytes	frozensetkeyslowerget_allheaderHeader
decode_headerrortappendmake_header_EMAIL_TO_RAW_MAPPINGget_STRING_FIELDSra_LIST_FIELDSrYrhrbrxr	pop
setdefaultr_rursbytesr0)rOr~unparsedparsedr2headersvaluevalid_encodinghchunksbinencodingraw_namervdescription_headers               rparse_emailrs/ 8:C%'H$Y$$EL,A$BBKKDQQ))1F)GGRRSWXX
&++--((t#t#zz||
..&&,"#	 #	 Aa%,"5s!;<<<<<!U\011
 :<%*\%?%?%B%B	3	3MC*

68444$*.///#+)./MM3/2222
S!9!9&!A!ABBCCCCQ
	"HTN(,,T22#HTN~%%#e**//!!HCMM
%
%!CMM
#
#E

a+E!H55CMM
'
'
' 3E : :H


'
'
'!&
'
#HTNN-vt,,	-##%)#sww}/E/E%F%F"##M266=='1(**'..w7777%,M"!


M2..55ju&=&=>>	
	
	
	
	

,S!!8++s7%E>>F	F	 J44KKM""AN?>N?)1.01.11.22.12.2z2.3)r1r2rceZdZUdZded<ded<ded<ddd(dZd)dZd*dZ	d+d,dZd-dZ	d.dZ
d/dZd.dZd.dZ
d0d!Zd1d#Zd2d%Zd3d'ZdS)4
_ValidatoravValidate a metadata field.

    All _process_*() methods correspond to a core metadata field. The method is
    called with the field's raw value. If the raw value is valid it is returned
    in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field).
    If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause
    as appropriate).
    rr2r_MetadataVersionaddedrrrrc||_dSrr)rrs  rrz_Validator.__init__s



r_ownerMetadatac8||_t||_dSr)r2_RAW_TO_EMAIL_MAPPINGr)rrr2s   r__set_name__z_Validator.__set_name__s	-d3


rinstancetype[Metadata]rcT|j}|j|j}|jtvs|5	t|d|j}||}n#t$rYnwxYw|||j<	|j|j=n#t$rYnwxYwtt|S)N	_process_)
__dict___rawrr2_REQUIRED_ATTRSgetattrAttributeErrorrbr	r)rrrcacher	converters      r__get__z_Validator.__get__s!
!!$),,9''5+<
)07>U$)>U>U0V0V	"	%(("




!di	
di((			D	Au~~s#A
A)(A)7
B
BBNricauseException | Noner*c	t|j|dt|ji}||_|S)Nr+)r*r
format_maprepr	__cause__)rrirexcs    r_invalid_metadataz_Validator._invalid_metadatasCM3>>7D4G4G*HII



rrcn|tvr||dtt|S)Nz  is not a valid metadata version)_VALID_METADATA_VERSIONSrr	rrrs  r_process_metadata_versionz$_Validator._process_metadata_versions<000((E)U)U)UVVV$e,,,rc|s|d	tj|d|S#tj$r}||d|d}~wwxYw)N{field} is a required fieldTvalidate is invalid for {field}r)rr
canonicalize_nameInvalidNamerrrs   r
_process_namez_Validator._process_names	H(()FGGG	#ED9999L 			((555S)
	s1AAAversion_module.Versionc|s|d	tj|S#tj$r}||d|d}~wwxYw)Nrrr)rversion_moduleparseInvalidVersionrs   r_process_versionz_Validator._process_versions	H(()FGGG	!'...,			((555S)
	s-AAAc8d|vr|d|S)z%Check the field contains no newlines.
z{field} must be a single line)rrs  r_process_summaryz_Validator._process_summary(s%5==(()HIIIrcXhd}tj}||d<||dj}}||vs||vr(|dt|d||dd}|dkr%|dt|dd	h}|d
d}|dkr,||vr(|dt|d||S)
N>
text/plain
text/x-rst
text/markdownzcontent-typez{field} must be one of z, not charsetzUTF-8z0{field} can only specify the UTF-8 charset, not GFM
CommonMarkvariantrz(valid Markdown variants for {field} are )	r}rEmailMessageget_content_typerparamsrlistr)	rr
content_typesrcontent_type
parametersrmarkdown_variantsrs	         r!_process_description_content_typez,_Validator._process_description_content_type.spEEE
-,,.."'
$$&&,,..N#*!},,EKKMM0Q0Q((PD,?,?PPuPP
..G44g((TT']]TT
#L1..E22?**w>O/O/O((#TBS=T=T####
rr3cttj|D]?}|dvr||d|tvr||d@tttj|S)N>r2rrzz" is not allowed as a dynamic fieldz is not a valid dynamic field)maprrrrr)rr
dynamic_fields   r_process_dynamicz_Validator._process_dynamicNs E22	X	XM GGG,,BBB&;;;,,-V-V-VWWW<C	5))***rlist[utils.NormalizedName]cg}	|D]+}|tj|d,	|S#tj$r}||d|d}~wwxYw)NTrrr)rr
rrr)rrnormalized_namesr2rs     r_process_provides_extraz"_Validator._process_provides_extraXs	$
V
V ''(?t(T(T(TUUUU
V$# 			((444C)
	s.5A#AA#specifiers.SpecifierSetc	tj|S#tj$r}||d|d}~wwxYwNrr)rSpecifierSetInvalidSpecifierrrs   r_process_requires_pythonz#_Validator._process_requires_pythongse	*5111*			((555S)
	sA?Alist[requirements.Requirement]cg}	|D])}|tj|*	|S#tj$r}||d|d}~wwxYwr)rrRequirementInvalidRequirementr)rrreqsreqrs     r_process_requires_distz!_Validator._process_requires_distos	
;
;L4S99::::
;
K.	Y	Y	Y((C)L)L)LTW(XXX	Ys,3A!AA!)rrrr)rrr2rrr)rrrrrrr)rirrrrr*)rrrr)rrrr)rrrr)rr3rr3)rr3rr)rrrr)rr3rr)r"r$r%r&r'rrrrrrrrrrrrrr(rrrrs^IIIMMM
#(4444837----@++++
$
$
$
$rrceZdZUdZded<eddd8d
Zeddd9dZeZ	d
ed<	eZ
ded<	eZded<	edZded<	eZ
ded<	edZded<	eZded<	eZded<	edZded<	eZded<	eZded <	edZded!<	eZded"<	eZded#<	ed$Zded%<	ed$Zded&<	eZded'<	edZded(<	ed$Zd)ed*<	ed$Zd+ed,<	ed$Zded-<	ed$Zd.ed/<	edZd0ed1<	ed$Z ded2<	ed$Z!ded3<	edZ"ded4<	edZ#ded5<	edZ$ded6<d7S):raJRepresentation of distribution metadata.

    Compared to :class:`RawMetadata`, this class provides objects representing
    metadata fields instead of only using built-in types. Any invalid metadata
    will cause :exc:`InvalidMetadata` to be raised (with a
    :py:attr:`~BaseException.__cause__` attribute as appropriate).
    r0rTrrOrboolrc&|}||_|rjg}	|j}t|}n.#t
$r!}||d}Yd}~nd}~wwxYwt|jtz}|dhz}|D]}		|r	|j	|	j
}
n8#t$r+t|	d|	}||YMwxYwt|
}||kr3t|	}
t|
d}||t||	#t
$r}||Yd}~d}~wwxYw|rtd||S)zCreate an instance from :class:`RawMetadata`.

        If *validate* is true, all metadata will be validated. All exceptions
        related to validation will be gathered and raised as an :class:`ExceptionGroup`.
        Nr1unrecognized field: zW{field} introduced in metadata version {field_metadata_version}, not {metadata_version}zinvalid metadata)copyrr1rindexr*rrrrrrbrrr)clsrOrinsrr1metadata_agemetadata_version_excfields_to_checkkeyfield_metadata_versionr	field_ager+s              rfrom_rawzMetadata.from_rawscee99;;+	E*,J
(# 7==>NOO"
(
(
(!!"6777#'      
((11OCO 233O&
+
++'%%58\#5F5L22'%%%"1#7Uc7U7U"V"VC&--c222$H%%=$B$B2%%	%|33$9#$>E"1 %!S##C
'--c222$C%%%%&+++%%c********+
E$%7DDD
sZ!A
A7A22A7!E$B76E72C,)E+C,,AEE
E<E77E<rlcJt|\}}|rPg}|D]9}|tvr|d}nd|}|t||:|rt	d|	|||S#t$r}t	d|jdd}~wwxYw)zParse metadata from email headers.

        If *validate* is true, the metadata will be validated. All exceptions
        related to validation will be gathered and raised as an :class:`ExceptionGroup`.
        z has invalid datar
rrzinvalid or unparsed metadataN)rrrr*rrr)	r
rOrr~rrunparsed_keyr	exc_groups	         r
from_emailzMetadata.from_emails$D))
X
	=*,J (
J
J#888!-BBBGGE\EEG!!/,"H"HIIII
=$Z<<<	<<h<777			 .	0D
	s&A==
B"BB"z_Validator[_MetadataVersion]r1z_Validator[str]r2z"_Validator[version_module.Version]rrrz_Validator[list[str] | None]rMr4rr<z_Validator[str | None]r5r6rrKr7r8r=r9r:rrBrCr;r>z1_Validator[list[requirements.Requirement] | None]rDz*_Validator[specifiers.SpecifierSet | None]rGrHz!_Validator[dict[str, str] | None]rJz-_Validator[list[utils.NormalizedName] | None]rLrErFr?r@rAN)rOr0rrrr)rOrlrrrr)%r"r$r%r&r'classmethodrrrr1r2rrMr4r<r5r6rKr7r8r=r9r:rBrCr;r>rDrGrHrJrLrErFr?r@rAr(rrrr}s2=A66666[6p?C[66@Z\\AAAA<&JLLD((((3=*,,G>>>>:,6J---GD.8jllI::::08B
8O8O8OOOOO:&0jllG2222R*4*,,K666637Az7N7N7NNNNNL-7Z\\H99990(2
I44441+5:E+B+B+BLBBBB4%/Z\\F1111.+5:<r)s""""""



.---------''''''FN3$N*$NN)WWWWWWWWWWWWWW."""""j"""=====)5====H"


 
0000
%%%%P????:hN-=	
 :N
y
y,**F &!"#$"$&$,(/9<UT6K6Q6Q6S6STTTm,m,m,m,`VXX
FEECD)CCCDDllllllll^g%g%g%g%g%g%g%g%g%g%sAA+*A+_manylinux.py000064400000022562150044141310007301 0ustar00from __future__ import annotations

import collections
import contextlib
import functools
import os
import re
import sys
import warnings
from typing import Generator, Iterator, NamedTuple, Sequence

from ._elffile import EIClass, EIData, ELFFile, EMachine

EF_ARM_ABIMASK = 0xFF000000
EF_ARM_ABI_VER5 = 0x05000000
EF_ARM_ABI_FLOAT_HARD = 0x00000400


# `os.PathLike` not a generic type until Python 3.9, so sticking with `str`
# as the type for `path` until then.
@contextlib.contextmanager
def _parse_elf(path: str) -> Generator[ELFFile | None, None, None]:
    try:
        with open(path, "rb") as f:
            yield ELFFile(f)
    except (OSError, TypeError, ValueError):
        yield None


def _is_linux_armhf(executable: str) -> bool:
    # hard-float ABI can be detected from the ELF header of the running
    # process
    # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
    with _parse_elf(executable) as f:
        return (
            f is not None
            and f.capacity == EIClass.C32
            and f.encoding == EIData.Lsb
            and f.machine == EMachine.Arm
            and f.flags & EF_ARM_ABIMASK == EF_ARM_ABI_VER5
            and f.flags & EF_ARM_ABI_FLOAT_HARD == EF_ARM_ABI_FLOAT_HARD
        )


def _is_linux_i686(executable: str) -> bool:
    with _parse_elf(executable) as f:
        return (
            f is not None
            and f.capacity == EIClass.C32
            and f.encoding == EIData.Lsb
            and f.machine == EMachine.I386
        )


def _have_compatible_abi(executable: str, archs: Sequence[str]) -> bool:
    if "armv7l" in archs:
        return _is_linux_armhf(executable)
    if "i686" in archs:
        return _is_linux_i686(executable)
    allowed_archs = {
        "x86_64",
        "aarch64",
        "ppc64",
        "ppc64le",
        "s390x",
        "loongarch64",
        "riscv64",
    }
    return any(arch in allowed_archs for arch in archs)


# If glibc ever changes its major version, we need to know what the last
# minor version was, so we can build the complete list of all versions.
# For now, guess what the highest minor version might be, assume it will
# be 50 for testing. Once this actually happens, update the dictionary
# with the actual value.
_LAST_GLIBC_MINOR: dict[int, int] = collections.defaultdict(lambda: 50)


class _GLibCVersion(NamedTuple):
    major: int
    minor: int


def _glibc_version_string_confstr() -> str | None:
    """
    Primary implementation of glibc_version_string using os.confstr.
    """
    # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
    # to be broken or missing. This strategy is used in the standard library
    # platform module.
    # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
    try:
        # Should be a string like "glibc 2.17".
        version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION")
        assert version_string is not None
        _, version = version_string.rsplit()
    except (AssertionError, AttributeError, OSError, ValueError):
        # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
        return None
    return version


def _glibc_version_string_ctypes() -> str | None:
    """
    Fallback implementation of glibc_version_string using ctypes.
    """
    try:
        import ctypes
    except ImportError:
        return None

    # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
    # manpage says, "If filename is NULL, then the returned handle is for the
    # main program". This way we can let the linker do the work to figure out
    # which libc our process is actually using.
    #
    # We must also handle the special case where the executable is not a
    # dynamically linked executable. This can occur when using musl libc,
    # for example. In this situation, dlopen() will error, leading to an
    # OSError. Interestingly, at least in the case of musl, there is no
    # errno set on the OSError. The single string argument used to construct
    # OSError comes from libc itself and is therefore not portable to
    # hard code here. In any case, failure to call dlopen() means we
    # can proceed, so we bail on our attempt.
    try:
        process_namespace = ctypes.CDLL(None)
    except OSError:
        return None

    try:
        gnu_get_libc_version = process_namespace.gnu_get_libc_version
    except AttributeError:
        # Symbol doesn't exist -> therefore, we are not linked to
        # glibc.
        return None

    # Call gnu_get_libc_version, which returns a string like "2.5"
    gnu_get_libc_version.restype = ctypes.c_char_p
    version_str: str = gnu_get_libc_version()
    # py2 / py3 compatibility:
    if not isinstance(version_str, str):
        version_str = version_str.decode("ascii")

    return version_str


def _glibc_version_string() -> str | None:
    """Returns glibc version string, or None if not using glibc."""
    return _glibc_version_string_confstr() or _glibc_version_string_ctypes()


def _parse_glibc_version(version_str: str) -> tuple[int, int]:
    """Parse glibc version.

    We use a regexp instead of str.split because we want to discard any
    random junk that might come after the minor version -- this might happen
    in patched/forked versions of glibc (e.g. Linaro's version of glibc
    uses version strings like "2.20-2014.11"). See gh-3588.
    """
    m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str)
    if not m:
        warnings.warn(
            f"Expected glibc version with 2 components major.minor,"
            f" got: {version_str}",
            RuntimeWarning,
        )
        return -1, -1
    return int(m.group("major")), int(m.group("minor"))


@functools.lru_cache
def _get_glibc_version() -> tuple[int, int]:
    version_str = _glibc_version_string()
    if version_str is None:
        return (-1, -1)
    return _parse_glibc_version(version_str)


# From PEP 513, PEP 600
def _is_compatible(arch: str, version: _GLibCVersion) -> bool:
    sys_glibc = _get_glibc_version()
    if sys_glibc < version:
        return False
    # Check for presence of _manylinux module.
    try:
        import _manylinux
    except ImportError:
        return True
    if hasattr(_manylinux, "manylinux_compatible"):
        result = _manylinux.manylinux_compatible(version[0], version[1], arch)
        if result is not None:
            return bool(result)
        return True
    if version == _GLibCVersion(2, 5):
        if hasattr(_manylinux, "manylinux1_compatible"):
            return bool(_manylinux.manylinux1_compatible)
    if version == _GLibCVersion(2, 12):
        if hasattr(_manylinux, "manylinux2010_compatible"):
            return bool(_manylinux.manylinux2010_compatible)
    if version == _GLibCVersion(2, 17):
        if hasattr(_manylinux, "manylinux2014_compatible"):
            return bool(_manylinux.manylinux2014_compatible)
    return True


_LEGACY_MANYLINUX_MAP = {
    # CentOS 7 w/ glibc 2.17 (PEP 599)
    (2, 17): "manylinux2014",
    # CentOS 6 w/ glibc 2.12 (PEP 571)
    (2, 12): "manylinux2010",
    # CentOS 5 w/ glibc 2.5 (PEP 513)
    (2, 5): "manylinux1",
}


def platform_tags(archs: Sequence[str]) -> Iterator[str]:
    """Generate manylinux tags compatible to the current platform.

    :param archs: Sequence of compatible architectures.
        The first one shall be the closest to the actual architecture and be the part of
        platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
        The ``linux_`` prefix is assumed as a prerequisite for the current platform to
        be manylinux-compatible.

    :returns: An iterator of compatible manylinux tags.
    """
    if not _have_compatible_abi(sys.executable, archs):
        return
    # Oldest glibc to be supported regardless of architecture is (2, 17).
    too_old_glibc2 = _GLibCVersion(2, 16)
    if set(archs) & {"x86_64", "i686"}:
        # On x86/i686 also oldest glibc to be supported is (2, 5).
        too_old_glibc2 = _GLibCVersion(2, 4)
    current_glibc = _GLibCVersion(*_get_glibc_version())
    glibc_max_list = [current_glibc]
    # We can assume compatibility across glibc major versions.
    # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
    #
    # Build a list of maximum glibc versions so that we can
    # output the canonical list of all glibc from current_glibc
    # down to too_old_glibc2, including all intermediary versions.
    for glibc_major in range(current_glibc.major - 1, 1, -1):
        glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
        glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
    for arch in archs:
        for glibc_max in glibc_max_list:
            if glibc_max.major == too_old_glibc2.major:
                min_minor = too_old_glibc2.minor
            else:
                # For other glibc major versions oldest supported is (x, 0).
                min_minor = -1
            for glibc_minor in range(glibc_max.minor, min_minor, -1):
                glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
                tag = "manylinux_{}_{}".format(*glibc_version)
                if _is_compatible(arch, glibc_version):
                    yield f"{tag}_{arch}"
                # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
                if glibc_version in _LEGACY_MANYLINUX_MAP:
                    legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
                    if _is_compatible(arch, glibc_version):
                        yield f"{legacy_tag}_{arch}"
_musllinux.py000064400000005206150044141310007311 0ustar00"""PEP 656 support.

This module implements logic to detect if the currently running Python is
linked against musl, and what musl version is used.
"""

from __future__ import annotations

import functools
import re
import subprocess
import sys
from typing import Iterator, NamedTuple, Sequence

from ._elffile import ELFFile


class _MuslVersion(NamedTuple):
    major: int
    minor: int


def _parse_musl_version(output: str) -> _MuslVersion | None:
    lines = [n for n in (n.strip() for n in output.splitlines()) if n]
    if len(lines) < 2 or lines[0][:4] != "musl":
        return None
    m = re.match(r"Version (\d+)\.(\d+)", lines[1])
    if not m:
        return None
    return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))


@functools.lru_cache
def _get_musl_version(executable: str) -> _MuslVersion | None:
    """Detect currently-running musl runtime version.

    This is done by checking the specified executable's dynamic linking
    information, and invoking the loader to parse its output for a version
    string. If the loader is musl, the output would be something like::

        musl libc (x86_64)
        Version 1.2.2
        Dynamic Program Loader
    """
    try:
        with open(executable, "rb") as f:
            ld = ELFFile(f).interpreter
    except (OSError, TypeError, ValueError):
        return None
    if ld is None or "musl" not in ld:
        return None
    proc = subprocess.run([ld], stderr=subprocess.PIPE, text=True)
    return _parse_musl_version(proc.stderr)


def platform_tags(archs: Sequence[str]) -> Iterator[str]:
    """Generate musllinux tags compatible to the current platform.

    :param archs: Sequence of compatible architectures.
        The first one shall be the closest to the actual architecture and be the part of
        platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
        The ``linux_`` prefix is assumed as a prerequisite for the current platform to
        be musllinux-compatible.

    :returns: An iterator of compatible musllinux tags.
    """
    sys_musl = _get_musl_version(sys.executable)
    if sys_musl is None:  # Python not dynamically linked against musl.
        return
    for arch in archs:
        for minor in range(sys_musl.minor, -1, -1):
            yield f"musllinux_{sys_musl.major}_{minor}_{arch}"


if __name__ == "__main__":  # pragma: no cover
    import sysconfig

    plat = sysconfig.get_platform()
    assert plat.startswith("linux-"), "not linux"

    print("plat:", plat)
    print("musl:", _get_musl_version(sys.executable))
    print("tags:", end=" ")
    for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
        print(t, end="\n      ")
_structures.py000064400000002627150044141310007500 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.


class InfinityType:
    def __repr__(self) -> str:
        return "Infinity"

    def __hash__(self) -> int:
        return hash(repr(self))

    def __lt__(self, other: object) -> bool:
        return False

    def __le__(self, other: object) -> bool:
        return False

    def __eq__(self, other: object) -> bool:
        return isinstance(other, self.__class__)

    def __gt__(self, other: object) -> bool:
        return True

    def __ge__(self, other: object) -> bool:
        return True

    def __neg__(self: object) -> "NegativeInfinityType":
        return NegativeInfinity


Infinity = InfinityType()


class NegativeInfinityType:
    def __repr__(self) -> str:
        return "-Infinity"

    def __hash__(self) -> int:
        return hash(repr(self))

    def __lt__(self, other: object) -> bool:
        return True

    def __le__(self, other: object) -> bool:
        return True

    def __eq__(self, other: object) -> bool:
        return isinstance(other, self.__class__)

    def __gt__(self, other: object) -> bool:
        return False

    def __ge__(self, other: object) -> bool:
        return False

    def __neg__(self: object) -> InfinityType:
        return Infinity


NegativeInfinity = NegativeInfinityType()
markers.py000064400000024657150044141310006571 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.

from __future__ import annotations

import operator
import os
import platform
import sys
from typing import Any, Callable, TypedDict, cast

from ._parser import MarkerAtom, MarkerList, Op, Value, Variable
from ._parser import parse_marker as _parse_marker
from ._tokenizer import ParserSyntaxError
from .specifiers import InvalidSpecifier, Specifier
from .utils import canonicalize_name

__all__ = [
    "InvalidMarker",
    "UndefinedComparison",
    "UndefinedEnvironmentName",
    "Marker",
    "default_environment",
]

Operator = Callable[[str, str], bool]


class InvalidMarker(ValueError):
    """
    An invalid marker was found, users should refer to PEP 508.
    """


class UndefinedComparison(ValueError):
    """
    An invalid operation was attempted on a value that doesn't support it.
    """


class UndefinedEnvironmentName(ValueError):
    """
    A name was attempted to be used that does not exist inside of the
    environment.
    """


class Environment(TypedDict):
    implementation_name: str
    """The implementation's identifier, e.g. ``'cpython'``."""

    implementation_version: str
    """
    The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or
    ``'7.3.13'`` for PyPy3.10 v7.3.13.
    """

    os_name: str
    """
    The value of :py:data:`os.name`. The name of the operating system dependent module
    imported, e.g. ``'posix'``.
    """

    platform_machine: str
    """
    Returns the machine type, e.g. ``'i386'``.

    An empty string if the value cannot be determined.
    """

    platform_release: str
    """
    The system's release, e.g. ``'2.2.0'`` or ``'NT'``.

    An empty string if the value cannot be determined.
    """

    platform_system: str
    """
    The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``.

    An empty string if the value cannot be determined.
    """

    platform_version: str
    """
    The system's release version, e.g. ``'#3 on degas'``.

    An empty string if the value cannot be determined.
    """

    python_full_version: str
    """
    The Python version as string ``'major.minor.patchlevel'``.

    Note that unlike the Python :py:data:`sys.version`, this value will always include
    the patchlevel (it defaults to 0).
    """

    platform_python_implementation: str
    """
    A string identifying the Python implementation, e.g. ``'CPython'``.
    """

    python_version: str
    """The Python version as string ``'major.minor'``."""

    sys_platform: str
    """
    This string contains a platform identifier that can be used to append
    platform-specific components to :py:data:`sys.path`, for instance.

    For Unix systems, except on Linux and AIX, this is the lowercased OS name as
    returned by ``uname -s`` with the first part of the version as returned by
    ``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python
    was built.
    """


def _normalize_extra_values(results: Any) -> Any:
    """
    Normalize extra values.
    """
    if isinstance(results[0], tuple):
        lhs, op, rhs = results[0]
        if isinstance(lhs, Variable) and lhs.value == "extra":
            normalized_extra = canonicalize_name(rhs.value)
            rhs = Value(normalized_extra)
        elif isinstance(rhs, Variable) and rhs.value == "extra":
            normalized_extra = canonicalize_name(lhs.value)
            lhs = Value(normalized_extra)
        results[0] = lhs, op, rhs
    return results


def _format_marker(
    marker: list[str] | MarkerAtom | str, first: bool | None = True
) -> str:
    assert isinstance(marker, (list, tuple, str))

    # Sometimes we have a structure like [[...]] which is a single item list
    # where the single item is itself it's own list. In that case we want skip
    # the rest of this function so that we don't get extraneous () on the
    # outside.
    if (
        isinstance(marker, list)
        and len(marker) == 1
        and isinstance(marker[0], (list, tuple))
    ):
        return _format_marker(marker[0])

    if isinstance(marker, list):
        inner = (_format_marker(m, first=False) for m in marker)
        if first:
            return " ".join(inner)
        else:
            return "(" + " ".join(inner) + ")"
    elif isinstance(marker, tuple):
        return " ".join([m.serialize() for m in marker])
    else:
        return marker


_operators: dict[str, Operator] = {
    "in": lambda lhs, rhs: lhs in rhs,
    "not in": lambda lhs, rhs: lhs not in rhs,
    "<": operator.lt,
    "<=": operator.le,
    "==": operator.eq,
    "!=": operator.ne,
    ">=": operator.ge,
    ">": operator.gt,
}


def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
    try:
        spec = Specifier("".join([op.serialize(), rhs]))
    except InvalidSpecifier:
        pass
    else:
        return spec.contains(lhs, prereleases=True)

    oper: Operator | None = _operators.get(op.serialize())
    if oper is None:
        raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")

    return oper(lhs, rhs)


def _normalize(*values: str, key: str) -> tuple[str, ...]:
    # PEP 685 – Comparison of extra names for optional distribution dependencies
    # https://peps.python.org/pep-0685/
    # > When comparing extra names, tools MUST normalize the names being
    # > compared using the semantics outlined in PEP 503 for names
    if key == "extra":
        return tuple(canonicalize_name(v) for v in values)

    # other environment markers don't have such standards
    return values


def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool:
    groups: list[list[bool]] = [[]]

    for marker in markers:
        assert isinstance(marker, (list, tuple, str))

        if isinstance(marker, list):
            groups[-1].append(_evaluate_markers(marker, environment))
        elif isinstance(marker, tuple):
            lhs, op, rhs = marker

            if isinstance(lhs, Variable):
                environment_key = lhs.value
                lhs_value = environment[environment_key]
                rhs_value = rhs.value
            else:
                lhs_value = lhs.value
                environment_key = rhs.value
                rhs_value = environment[environment_key]

            lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
            groups[-1].append(_eval_op(lhs_value, op, rhs_value))
        else:
            assert marker in ["and", "or"]
            if marker == "or":
                groups.append([])

    return any(all(item) for item in groups)


def format_full_version(info: sys._version_info) -> str:
    version = "{0.major}.{0.minor}.{0.micro}".format(info)
    kind = info.releaselevel
    if kind != "final":
        version += kind[0] + str(info.serial)
    return version


def default_environment() -> Environment:
    iver = format_full_version(sys.implementation.version)
    implementation_name = sys.implementation.name
    return {
        "implementation_name": implementation_name,
        "implementation_version": iver,
        "os_name": os.name,
        "platform_machine": platform.machine(),
        "platform_release": platform.release(),
        "platform_system": platform.system(),
        "platform_version": platform.version(),
        "python_full_version": platform.python_version(),
        "platform_python_implementation": platform.python_implementation(),
        "python_version": ".".join(platform.python_version_tuple()[:2]),
        "sys_platform": sys.platform,
    }


class Marker:
    def __init__(self, marker: str) -> None:
        # Note: We create a Marker object without calling this constructor in
        #       packaging.requirements.Requirement. If any additional logic is
        #       added here, make sure to mirror/adapt Requirement.
        try:
            self._markers = _normalize_extra_values(_parse_marker(marker))
            # The attribute `_markers` can be described in terms of a recursive type:
            # MarkerList = List[Union[Tuple[Node, ...], str, MarkerList]]
            #
            # For example, the following expression:
            # python_version > "3.6" or (python_version == "3.6" and os_name == "unix")
            #
            # is parsed into:
            # [
            #     (, ')>, ),
            #     'and',
            #     [
            #         (, , ),
            #         'or',
            #         (, , )
            #     ]
            # ]
        except ParserSyntaxError as e:
            raise InvalidMarker(str(e)) from e

    def __str__(self) -> str:
        return _format_marker(self._markers)

    def __repr__(self) -> str:
        return f""

    def __hash__(self) -> int:
        return hash((self.__class__.__name__, str(self)))

    def __eq__(self, other: Any) -> bool:
        if not isinstance(other, Marker):
            return NotImplemented

        return str(self) == str(other)

    def evaluate(self, environment: dict[str, str] | None = None) -> bool:
        """Evaluate a marker.

        Return the boolean from evaluating the given marker against the
        environment. environment is an optional argument to override all or
        part of the determined environment.

        The environment is determined from the current Python process.
        """
        current_environment = cast("dict[str, str]", default_environment())
        current_environment["extra"] = ""
        # Work around platform.python_version() returning something that is not PEP 440
        # compliant for non-tagged Python builds. We preserve default_environment()'s
        # behavior of returning platform.python_version() verbatim, and leave it to the
        # caller to provide a syntactically valid version if they want to override it.
        if current_environment["python_full_version"].endswith("+"):
            current_environment["python_full_version"] += "local"
        if environment is not None:
            current_environment.update(environment)
            # The API used to allow setting extra to None. We need to handle this
            # case for backwards compatibility.
            if current_environment["extra"] is None:
                current_environment["extra"] = ""

        return _evaluate_markers(self._markers, current_environment)
requirements.py000064400000005603150044141310007636 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import annotations

from typing import Any, Iterator

from ._parser import parse_requirement as _parse_requirement
from ._tokenizer import ParserSyntaxError
from .markers import Marker, _normalize_extra_values
from .specifiers import SpecifierSet
from .utils import canonicalize_name


class InvalidRequirement(ValueError):
    """
    An invalid requirement was found, users should refer to PEP 508.
    """


class Requirement:
    """Parse a requirement.

    Parse a given requirement string into its parts, such as name, specifier,
    URL, and extras. Raises InvalidRequirement on a badly-formed requirement
    string.
    """

    # TODO: Can we test whether something is contained within a requirement?
    #       If so how do we do that? Do we need to test against the _name_ of
    #       the thing as well as the version? What about the markers?
    # TODO: Can we normalize the name and extra name?

    def __init__(self, requirement_string: str) -> None:
        try:
            parsed = _parse_requirement(requirement_string)
        except ParserSyntaxError as e:
            raise InvalidRequirement(str(e)) from e

        self.name: str = parsed.name
        self.url: str | None = parsed.url or None
        self.extras: set[str] = set(parsed.extras or [])
        self.specifier: SpecifierSet = SpecifierSet(parsed.specifier)
        self.marker: Marker | None = None
        if parsed.marker is not None:
            self.marker = Marker.__new__(Marker)
            self.marker._markers = _normalize_extra_values(parsed.marker)

    def _iter_parts(self, name: str) -> Iterator[str]:
        yield name

        if self.extras:
            formatted_extras = ",".join(sorted(self.extras))
            yield f"[{formatted_extras}]"

        if self.specifier:
            yield str(self.specifier)

        if self.url:
            yield f"@ {self.url}"
            if self.marker:
                yield " "

        if self.marker:
            yield f"; {self.marker}"

    def __str__(self) -> str:
        return "".join(self._iter_parts(self.name))

    def __repr__(self) -> str:
        return f""

    def __hash__(self) -> int:
        return hash(
            (
                self.__class__.__name__,
                *self._iter_parts(canonicalize_name(self.name)),
            )
        )

    def __eq__(self, other: Any) -> bool:
        if not isinstance(other, Requirement):
            return NotImplemented

        return (
            canonicalize_name(self.name) == canonicalize_name(other.name)
            and self.extras == other.extras
            and self.specifier == other.specifier
            and self.url == other.url
            and self.marker == other.marker
        )
specifiers.py000064400000115472150044141310007255 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::

    from pip._vendor.packaging.specifiers import Specifier, SpecifierSet, InvalidSpecifier
    from pip._vendor.packaging.version import Version
"""

from __future__ import annotations

import abc
import itertools
import re
from typing import Callable, Iterable, Iterator, TypeVar, Union

from .utils import canonicalize_version
from .version import Version

UnparsedVersion = Union[Version, str]
UnparsedVersionVar = TypeVar("UnparsedVersionVar", bound=UnparsedVersion)
CallableOperator = Callable[[Version, str], bool]


def _coerce_version(version: UnparsedVersion) -> Version:
    if not isinstance(version, Version):
        version = Version(version)
    return version


class InvalidSpecifier(ValueError):
    """
    Raised when attempting to create a :class:`Specifier` with a specifier
    string that is invalid.

    >>> Specifier("lolwat")
    Traceback (most recent call last):
        ...
    packaging.specifiers.InvalidSpecifier: Invalid specifier: 'lolwat'
    """


class BaseSpecifier(metaclass=abc.ABCMeta):
    @abc.abstractmethod
    def __str__(self) -> str:
        """
        Returns the str representation of this Specifier-like object. This
        should be representative of the Specifier itself.
        """

    @abc.abstractmethod
    def __hash__(self) -> int:
        """
        Returns a hash value for this Specifier-like object.
        """

    @abc.abstractmethod
    def __eq__(self, other: object) -> bool:
        """
        Returns a boolean representing whether or not the two Specifier-like
        objects are equal.

        :param other: The other object to check against.
        """

    @property
    @abc.abstractmethod
    def prereleases(self) -> bool | None:
        """Whether or not pre-releases as a whole are allowed.

        This can be set to either ``True`` or ``False`` to explicitly enable or disable
        prereleases or it can be set to ``None`` (the default) to use default semantics.
        """

    @prereleases.setter
    def prereleases(self, value: bool) -> None:
        """Setter for :attr:`prereleases`.

        :param value: The value to set.
        """

    @abc.abstractmethod
    def contains(self, item: str, prereleases: bool | None = None) -> bool:
        """
        Determines if the given item is contained within this specifier.
        """

    @abc.abstractmethod
    def filter(
        self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
    ) -> Iterator[UnparsedVersionVar]:
        """
        Takes an iterable of items and filters them so that only items which
        are contained within this specifier are allowed in it.
        """


class Specifier(BaseSpecifier):
    """This class abstracts handling of version specifiers.

    .. tip::

        It is generally not required to instantiate this manually. You should instead
        prefer to work with :class:`SpecifierSet` instead, which can parse
        comma-separated version specifiers (which is what package metadata contains).
    """

    _operator_regex_str = r"""
        (?P(~=|==|!=|<=|>=|<|>|===))
        """
    _version_regex_str = r"""
        (?P
            (?:
                # The identity operators allow for an escape hatch that will
                # do an exact string match of the version you wish to install.
                # This will not be parsed by PEP 440 and we cannot determine
                # any semantic meaning from it. This operator is discouraged
                # but included entirely as an escape hatch.
                (?<====)  # Only match for the identity operator
                \s*
                [^\s;)]*  # The arbitrary version can be just about anything,
                          # we match everything except for whitespace, a
                          # semi-colon for marker support, and a closing paren
                          # since versions can be enclosed in them.
            )
            |
            (?:
                # The (non)equality operators allow for wild card and local
                # versions to be specified so we have to define these two
                # operators separately to enable that.
                (?<===|!=)            # Only match for equals and not equals

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)*   # release

                # You cannot use a wild card and a pre-release, post-release, a dev or
                # local version together so group them with a | and make them optional.
                (?:
                    \.\*  # Wild card syntax of .*
                    |
                    (?:                                  # pre release
                        [-_\.]?
                        (alpha|beta|preview|pre|a|b|c|rc)
                        [-_\.]?
                        [0-9]*
                    )?
                    (?:                                  # post release
                        (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                    )?
                    (?:[-_\.]?dev[-_\.]?[0-9]*)?         # dev release
                    (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
                )?
            )
            |
            (?:
                # The compatible operator requires at least two digits in the
                # release segment.
                (?<=~=)               # Only match for the compatible operator

                \s*
                v?
                (?:[0-9]+!)?          # epoch
                [0-9]+(?:\.[0-9]+)+   # release  (We have a + instead of a *)
                (?:                   # pre release
                    [-_\.]?
                    (alpha|beta|preview|pre|a|b|c|rc)
                    [-_\.]?
                    [0-9]*
                )?
                (?:                                   # post release
                    (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
                )?
                (?:[-_\.]?dev[-_\.]?[0-9]*)?          # dev release
            )
            |
            (?:
                # All other operators only allow a sub set of what the
                # (non)equality operators do. Specifically they do not allow
                # local versions to be specified nor do they allow the prefix
                # matching wild cards.
                (?=": "greater_than_equal",
        "<": "less_than",
        ">": "greater_than",
        "===": "arbitrary",
    }

    def __init__(self, spec: str = "", prereleases: bool | None = None) -> None:
        """Initialize a Specifier instance.

        :param spec:
            The string representation of a specifier which will be parsed and
            normalized before use.
        :param prereleases:
            This tells the specifier if it should accept prerelease versions if
            applicable or not. The default of ``None`` will autodetect it from the
            given specifiers.
        :raises InvalidSpecifier:
            If the given specifier is invalid (i.e. bad syntax).
        """
        match = self._regex.search(spec)
        if not match:
            raise InvalidSpecifier(f"Invalid specifier: '{spec}'")

        self._spec: tuple[str, str] = (
            match.group("operator").strip(),
            match.group("version").strip(),
        )

        # Store whether or not this Specifier should accept prereleases
        self._prereleases = prereleases

    # https://github.com/python/mypy/pull/13475#pullrequestreview-1079784515
    @property  # type: ignore[override]
    def prereleases(self) -> bool:
        # If there is an explicit prereleases set for this, then we'll just
        # blindly use that.
        if self._prereleases is not None:
            return self._prereleases

        # Look at all of our specifiers and determine if they are inclusive
        # operators, and if they are if they are including an explicit
        # prerelease.
        operator, version = self._spec
        if operator in ["==", ">=", "<=", "~=", "==="]:
            # The == specifier can include a trailing .*, if it does we
            # want to remove before parsing.
            if operator == "==" and version.endswith(".*"):
                version = version[:-2]

            # Parse the version, and if it is a pre-release than this
            # specifier allows pre-releases.
            if Version(version).is_prerelease:
                return True

        return False

    @prereleases.setter
    def prereleases(self, value: bool) -> None:
        self._prereleases = value

    @property
    def operator(self) -> str:
        """The operator of this specifier.

        >>> Specifier("==1.2.3").operator
        '=='
        """
        return self._spec[0]

    @property
    def version(self) -> str:
        """The version of this specifier.

        >>> Specifier("==1.2.3").version
        '1.2.3'
        """
        return self._spec[1]

    def __repr__(self) -> str:
        """A representation of the Specifier that shows all internal state.

        >>> Specifier('>=1.0.0')
        =1.0.0')>
        >>> Specifier('>=1.0.0', prereleases=False)
        =1.0.0', prereleases=False)>
        >>> Specifier('>=1.0.0', prereleases=True)
        =1.0.0', prereleases=True)>
        """
        pre = (
            f", prereleases={self.prereleases!r}"
            if self._prereleases is not None
            else ""
        )

        return f"<{self.__class__.__name__}({str(self)!r}{pre})>"

    def __str__(self) -> str:
        """A string representation of the Specifier that can be round-tripped.

        >>> str(Specifier('>=1.0.0'))
        '>=1.0.0'
        >>> str(Specifier('>=1.0.0', prereleases=False))
        '>=1.0.0'
        """
        return "{}{}".format(*self._spec)

    @property
    def _canonical_spec(self) -> tuple[str, str]:
        canonical_version = canonicalize_version(
            self._spec[1],
            strip_trailing_zero=(self._spec[0] != "~="),
        )
        return self._spec[0], canonical_version

    def __hash__(self) -> int:
        return hash(self._canonical_spec)

    def __eq__(self, other: object) -> bool:
        """Whether or not the two Specifier-like objects are equal.

        :param other: The other object to check against.

        The value of :attr:`prereleases` is ignored.

        >>> Specifier("==1.2.3") == Specifier("== 1.2.3.0")
        True
        >>> (Specifier("==1.2.3", prereleases=False) ==
        ...  Specifier("==1.2.3", prereleases=True))
        True
        >>> Specifier("==1.2.3") == "==1.2.3"
        True
        >>> Specifier("==1.2.3") == Specifier("==1.2.4")
        False
        >>> Specifier("==1.2.3") == Specifier("~=1.2.3")
        False
        """
        if isinstance(other, str):
            try:
                other = self.__class__(str(other))
            except InvalidSpecifier:
                return NotImplemented
        elif not isinstance(other, self.__class__):
            return NotImplemented

        return self._canonical_spec == other._canonical_spec

    def _get_operator(self, op: str) -> CallableOperator:
        operator_callable: CallableOperator = getattr(
            self, f"_compare_{self._operators[op]}"
        )
        return operator_callable

    def _compare_compatible(self, prospective: Version, spec: str) -> bool:
        # Compatible releases have an equivalent combination of >= and ==. That
        # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
        # implement this in terms of the other specifiers instead of
        # implementing it ourselves. The only thing we need to do is construct
        # the other specifiers.

        # We want everything but the last item in the version, but we want to
        # ignore suffix segments.
        prefix = _version_join(
            list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
        )

        # Add the prefix notation to the end of our string
        prefix += ".*"

        return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
            prospective, prefix
        )

    def _compare_equal(self, prospective: Version, spec: str) -> bool:
        # We need special logic to handle prefix matching
        if spec.endswith(".*"):
            # In the case of prefix matching we want to ignore local segment.
            normalized_prospective = canonicalize_version(
                prospective.public, strip_trailing_zero=False
            )
            # Get the normalized version string ignoring the trailing .*
            normalized_spec = canonicalize_version(spec[:-2], strip_trailing_zero=False)
            # Split the spec out by bangs and dots, and pretend that there is
            # an implicit dot in between a release segment and a pre-release segment.
            split_spec = _version_split(normalized_spec)

            # Split the prospective version out by bangs and dots, and pretend
            # that there is an implicit dot in between a release segment and
            # a pre-release segment.
            split_prospective = _version_split(normalized_prospective)

            # 0-pad the prospective version before shortening it to get the correct
            # shortened version.
            padded_prospective, _ = _pad_version(split_prospective, split_spec)

            # Shorten the prospective version to be the same length as the spec
            # so that we can determine if the specifier is a prefix of the
            # prospective version or not.
            shortened_prospective = padded_prospective[: len(split_spec)]

            return shortened_prospective == split_spec
        else:
            # Convert our spec string into a Version
            spec_version = Version(spec)

            # If the specifier does not have a local segment, then we want to
            # act as if the prospective version also does not have a local
            # segment.
            if not spec_version.local:
                prospective = Version(prospective.public)

            return prospective == spec_version

    def _compare_not_equal(self, prospective: Version, spec: str) -> bool:
        return not self._compare_equal(prospective, spec)

    def _compare_less_than_equal(self, prospective: Version, spec: str) -> bool:
        # NB: Local version identifiers are NOT permitted in the version
        # specifier, so local version labels can be universally removed from
        # the prospective version.
        return Version(prospective.public) <= Version(spec)

    def _compare_greater_than_equal(self, prospective: Version, spec: str) -> bool:
        # NB: Local version identifiers are NOT permitted in the version
        # specifier, so local version labels can be universally removed from
        # the prospective version.
        return Version(prospective.public) >= Version(spec)

    def _compare_less_than(self, prospective: Version, spec_str: str) -> bool:
        # Convert our spec to a Version instance, since we'll want to work with
        # it as a version.
        spec = Version(spec_str)

        # Check to see if the prospective version is less than the spec
        # version. If it's not we can short circuit and just return False now
        # instead of doing extra unneeded work.
        if not prospective < spec:
            return False

        # This special case is here so that, unless the specifier itself
        # includes is a pre-release version, that we do not accept pre-release
        # versions for the version mentioned in the specifier (e.g. <3.1 should
        # not match 3.1.dev0, but should match 3.0.dev0).
        if not spec.is_prerelease and prospective.is_prerelease:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # If we've gotten to here, it means that prospective version is both
        # less than the spec version *and* it's not a pre-release of the same
        # version in the spec.
        return True

    def _compare_greater_than(self, prospective: Version, spec_str: str) -> bool:
        # Convert our spec to a Version instance, since we'll want to work with
        # it as a version.
        spec = Version(spec_str)

        # Check to see if the prospective version is greater than the spec
        # version. If it's not we can short circuit and just return False now
        # instead of doing extra unneeded work.
        if not prospective > spec:
            return False

        # This special case is here so that, unless the specifier itself
        # includes is a post-release version, that we do not accept
        # post-release versions for the version mentioned in the specifier
        # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
        if not spec.is_postrelease and prospective.is_postrelease:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # Ensure that we do not allow a local version of the version mentioned
        # in the specifier, which is technically greater than, to match.
        if prospective.local is not None:
            if Version(prospective.base_version) == Version(spec.base_version):
                return False

        # If we've gotten to here, it means that prospective version is both
        # greater than the spec version *and* it's not a pre-release of the
        # same version in the spec.
        return True

    def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
        return str(prospective).lower() == str(spec).lower()

    def __contains__(self, item: str | Version) -> bool:
        """Return whether or not the item is contained in this specifier.

        :param item: The item to check for.

        This is used for the ``in`` operator and behaves the same as
        :meth:`contains` with no ``prereleases`` argument passed.

        >>> "1.2.3" in Specifier(">=1.2.3")
        True
        >>> Version("1.2.3") in Specifier(">=1.2.3")
        True
        >>> "1.0.0" in Specifier(">=1.2.3")
        False
        >>> "1.3.0a1" in Specifier(">=1.2.3")
        False
        >>> "1.3.0a1" in Specifier(">=1.2.3", prereleases=True)
        True
        """
        return self.contains(item)

    def contains(self, item: UnparsedVersion, prereleases: bool | None = None) -> bool:
        """Return whether or not the item is contained in this specifier.

        :param item:
            The item to check for, which can be a version string or a
            :class:`Version` instance.
        :param prereleases:
            Whether or not to match prereleases with this Specifier. If set to
            ``None`` (the default), it uses :attr:`prereleases` to determine
            whether or not prereleases are allowed.

        >>> Specifier(">=1.2.3").contains("1.2.3")
        True
        >>> Specifier(">=1.2.3").contains(Version("1.2.3"))
        True
        >>> Specifier(">=1.2.3").contains("1.0.0")
        False
        >>> Specifier(">=1.2.3").contains("1.3.0a1")
        False
        >>> Specifier(">=1.2.3", prereleases=True).contains("1.3.0a1")
        True
        >>> Specifier(">=1.2.3").contains("1.3.0a1", prereleases=True)
        True
        """

        # Determine if prereleases are to be allowed or not.
        if prereleases is None:
            prereleases = self.prereleases

        # Normalize item to a Version, this allows us to have a shortcut for
        # "2.0" in Specifier(">=2")
        normalized_item = _coerce_version(item)

        # Determine if we should be supporting prereleases in this specifier
        # or not, if we do not support prereleases than we can short circuit
        # logic if this version is a prereleases.
        if normalized_item.is_prerelease and not prereleases:
            return False

        # Actually do the comparison to determine if this item is contained
        # within this Specifier or not.
        operator_callable: CallableOperator = self._get_operator(self.operator)
        return operator_callable(normalized_item, self.version)

    def filter(
        self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
    ) -> Iterator[UnparsedVersionVar]:
        """Filter items in the given iterable, that match the specifier.

        :param iterable:
            An iterable that can contain version strings and :class:`Version` instances.
            The items in the iterable will be filtered according to the specifier.
        :param prereleases:
            Whether or not to allow prereleases in the returned iterator. If set to
            ``None`` (the default), it will be intelligently decide whether to allow
            prereleases or not (based on the :attr:`prereleases` attribute, and
            whether the only versions matching are prereleases).

        This method is smarter than just ``filter(Specifier().contains, [...])``
        because it implements the rule from :pep:`440` that a prerelease item
        SHOULD be accepted if no other versions match the given specifier.

        >>> list(Specifier(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
        ['1.3']
        >>> list(Specifier(">=1.2.3").filter(["1.2", "1.2.3", "1.3", Version("1.4")]))
        ['1.2.3', '1.3', ]
        >>> list(Specifier(">=1.2.3").filter(["1.2", "1.5a1"]))
        ['1.5a1']
        >>> list(Specifier(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
        ['1.3', '1.5a1']
        >>> list(Specifier(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
        ['1.3', '1.5a1']
        """

        yielded = False
        found_prereleases = []

        kw = {"prereleases": prereleases if prereleases is not None else True}

        # Attempt to iterate over all the values in the iterable and if any of
        # them match, yield them.
        for version in iterable:
            parsed_version = _coerce_version(version)

            if self.contains(parsed_version, **kw):
                # If our version is a prerelease, and we were not set to allow
                # prereleases, then we'll store it for later in case nothing
                # else matches this specifier.
                if parsed_version.is_prerelease and not (
                    prereleases or self.prereleases
                ):
                    found_prereleases.append(version)
                # Either this is not a prerelease, or we should have been
                # accepting prereleases from the beginning.
                else:
                    yielded = True
                    yield version

        # Now that we've iterated over everything, determine if we've yielded
        # any values, and if we have not and we have any prereleases stored up
        # then we will go ahead and yield the prereleases.
        if not yielded and found_prereleases:
            for version in found_prereleases:
                yield version


_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")


def _version_split(version: str) -> list[str]:
    """Split version into components.

    The split components are intended for version comparison. The logic does
    not attempt to retain the original version string, so joining the
    components back with :func:`_version_join` may not produce the original
    version string.
    """
    result: list[str] = []

    epoch, _, rest = version.rpartition("!")
    result.append(epoch or "0")

    for item in rest.split("."):
        match = _prefix_regex.search(item)
        if match:
            result.extend(match.groups())
        else:
            result.append(item)
    return result


def _version_join(components: list[str]) -> str:
    """Join split version components into a version string.

    This function assumes the input came from :func:`_version_split`, where the
    first component must be the epoch (either empty or numeric), and all other
    components numeric.
    """
    epoch, *rest = components
    return f"{epoch}!{'.'.join(rest)}"


def _is_not_suffix(segment: str) -> bool:
    return not any(
        segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
    )


def _pad_version(left: list[str], right: list[str]) -> tuple[list[str], list[str]]:
    left_split, right_split = [], []

    # Get the release segment of our versions
    left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
    right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))

    # Get the rest of our versions
    left_split.append(left[len(left_split[0]) :])
    right_split.append(right[len(right_split[0]) :])

    # Insert our padding
    left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
    right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))

    return (
        list(itertools.chain.from_iterable(left_split)),
        list(itertools.chain.from_iterable(right_split)),
    )


class SpecifierSet(BaseSpecifier):
    """This class abstracts handling of a set of version specifiers.

    It can be passed a single specifier (``>=3.0``), a comma-separated list of
    specifiers (``>=3.0,!=3.1``), or no specifier at all.
    """

    def __init__(self, specifiers: str = "", prereleases: bool | None = None) -> None:
        """Initialize a SpecifierSet instance.

        :param specifiers:
            The string representation of a specifier or a comma-separated list of
            specifiers which will be parsed and normalized before use.
        :param prereleases:
            This tells the SpecifierSet if it should accept prerelease versions if
            applicable or not. The default of ``None`` will autodetect it from the
            given specifiers.

        :raises InvalidSpecifier:
            If the given ``specifiers`` are not parseable than this exception will be
            raised.
        """

        # Split on `,` to break each individual specifier into it's own item, and
        # strip each item to remove leading/trailing whitespace.
        split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]

        # Make each individual specifier a Specifier and save in a frozen set for later.
        self._specs = frozenset(map(Specifier, split_specifiers))

        # Store our prereleases value so we can use it later to determine if
        # we accept prereleases or not.
        self._prereleases = prereleases

    @property
    def prereleases(self) -> bool | None:
        # If we have been given an explicit prerelease modifier, then we'll
        # pass that through here.
        if self._prereleases is not None:
            return self._prereleases

        # If we don't have any specifiers, and we don't have a forced value,
        # then we'll just return None since we don't know if this should have
        # pre-releases or not.
        if not self._specs:
            return None

        # Otherwise we'll see if any of the given specifiers accept
        # prereleases, if any of them do we'll return True, otherwise False.
        return any(s.prereleases for s in self._specs)

    @prereleases.setter
    def prereleases(self, value: bool) -> None:
        self._prereleases = value

    def __repr__(self) -> str:
        """A representation of the specifier set that shows all internal state.

        Note that the ordering of the individual specifiers within the set may not
        match the input string.

        >>> SpecifierSet('>=1.0.0,!=2.0.0')
        =1.0.0')>
        >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
        =1.0.0', prereleases=False)>
        >>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
        =1.0.0', prereleases=True)>
        """
        pre = (
            f", prereleases={self.prereleases!r}"
            if self._prereleases is not None
            else ""
        )

        return f""

    def __str__(self) -> str:
        """A string representation of the specifier set that can be round-tripped.

        Note that the ordering of the individual specifiers within the set may not
        match the input string.

        >>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
        '!=1.0.1,>=1.0.0'
        >>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
        '!=1.0.1,>=1.0.0'
        """
        return ",".join(sorted(str(s) for s in self._specs))

    def __hash__(self) -> int:
        return hash(self._specs)

    def __and__(self, other: SpecifierSet | str) -> SpecifierSet:
        """Return a SpecifierSet which is a combination of the two sets.

        :param other: The other object to combine with.

        >>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
        =1.0.0')>
        >>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
        =1.0.0')>
        """
        if isinstance(other, str):
            other = SpecifierSet(other)
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        specifier = SpecifierSet()
        specifier._specs = frozenset(self._specs | other._specs)

        if self._prereleases is None and other._prereleases is not None:
            specifier._prereleases = other._prereleases
        elif self._prereleases is not None and other._prereleases is None:
            specifier._prereleases = self._prereleases
        elif self._prereleases == other._prereleases:
            specifier._prereleases = self._prereleases
        else:
            raise ValueError(
                "Cannot combine SpecifierSets with True and False prerelease "
                "overrides."
            )

        return specifier

    def __eq__(self, other: object) -> bool:
        """Whether or not the two SpecifierSet-like objects are equal.

        :param other: The other object to check against.

        The value of :attr:`prereleases` is ignored.

        >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
        True
        >>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
        ...  SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
        True
        >>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
        True
        >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
        False
        >>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
        False
        """
        if isinstance(other, (str, Specifier)):
            other = SpecifierSet(str(other))
        elif not isinstance(other, SpecifierSet):
            return NotImplemented

        return self._specs == other._specs

    def __len__(self) -> int:
        """Returns the number of specifiers in this specifier set."""
        return len(self._specs)

    def __iter__(self) -> Iterator[Specifier]:
        """
        Returns an iterator over all the underlying :class:`Specifier` instances
        in this specifier set.

        >>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
        [, =1.0.0')>]
        """
        return iter(self._specs)

    def __contains__(self, item: UnparsedVersion) -> bool:
        """Return whether or not the item is contained in this specifier.

        :param item: The item to check for.

        This is used for the ``in`` operator and behaves the same as
        :meth:`contains` with no ``prereleases`` argument passed.

        >>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
        True
        >>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
        True
        >>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
        False
        >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
        False
        >>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
        True
        """
        return self.contains(item)

    def contains(
        self,
        item: UnparsedVersion,
        prereleases: bool | None = None,
        installed: bool | None = None,
    ) -> bool:
        """Return whether or not the item is contained in this SpecifierSet.

        :param item:
            The item to check for, which can be a version string or a
            :class:`Version` instance.
        :param prereleases:
            Whether or not to match prereleases with this SpecifierSet. If set to
            ``None`` (the default), it uses :attr:`prereleases` to determine
            whether or not prereleases are allowed.

        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
        True
        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
        True
        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
        False
        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
        False
        >>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True).contains("1.3.0a1")
        True
        >>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
        True
        """
        # Ensure that our item is a Version instance.
        if not isinstance(item, Version):
            item = Version(item)

        # Determine if we're forcing a prerelease or not, if we're not forcing
        # one for this particular filter call, then we'll use whatever the
        # SpecifierSet thinks for whether or not we should support prereleases.
        if prereleases is None:
            prereleases = self.prereleases

        # We can determine if we're going to allow pre-releases by looking to
        # see if any of the underlying items supports them. If none of them do
        # and this item is a pre-release then we do not allow it and we can
        # short circuit that here.
        # Note: This means that 1.0.dev1 would not be contained in something
        #       like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
        if not prereleases and item.is_prerelease:
            return False

        if installed and item.is_prerelease:
            item = Version(item.base_version)

        # We simply dispatch to the underlying specs here to make sure that the
        # given version is contained within all of them.
        # Note: This use of all() here means that an empty set of specifiers
        #       will always return True, this is an explicit design decision.
        return all(s.contains(item, prereleases=prereleases) for s in self._specs)

    def filter(
        self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
    ) -> Iterator[UnparsedVersionVar]:
        """Filter items in the given iterable, that match the specifiers in this set.

        :param iterable:
            An iterable that can contain version strings and :class:`Version` instances.
            The items in the iterable will be filtered according to the specifier.
        :param prereleases:
            Whether or not to allow prereleases in the returned iterator. If set to
            ``None`` (the default), it will be intelligently decide whether to allow
            prereleases or not (based on the :attr:`prereleases` attribute, and
            whether the only versions matching are prereleases).

        This method is smarter than just ``filter(SpecifierSet(...).contains, [...])``
        because it implements the rule from :pep:`440` that a prerelease item
        SHOULD be accepted if no other versions match the given specifier.

        >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
        ['1.3']
        >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
        ['1.3', ]
        >>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
        []
        >>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
        ['1.3', '1.5a1']
        >>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
        ['1.3', '1.5a1']

        An "empty" SpecifierSet will filter items based on the presence of prerelease
        versions in the set.

        >>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
        ['1.3']
        >>> list(SpecifierSet("").filter(["1.5a1"]))
        ['1.5a1']
        >>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
        ['1.3', '1.5a1']
        >>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
        ['1.3', '1.5a1']
        """
        # Determine if we're forcing a prerelease or not, if we're not forcing
        # one for this particular filter call, then we'll use whatever the
        # SpecifierSet thinks for whether or not we should support prereleases.
        if prereleases is None:
            prereleases = self.prereleases

        # If we have any specifiers, then we want to wrap our iterable in the
        # filter method for each one, this will act as a logical AND amongst
        # each specifier.
        if self._specs:
            for spec in self._specs:
                iterable = spec.filter(iterable, prereleases=bool(prereleases))
            return iter(iterable)
        # If we do not have any specifiers, then we need to have a rough filter
        # which will filter out any pre-releases, unless there are no final
        # releases.
        else:
            filtered: list[UnparsedVersionVar] = []
            found_prereleases: list[UnparsedVersionVar] = []

            for item in iterable:
                parsed_version = _coerce_version(item)

                # Store any item which is a pre-release for later unless we've
                # already found a final version or we are accepting prereleases
                if parsed_version.is_prerelease and not prereleases:
                    if not filtered:
                        found_prereleases.append(item)
                else:
                    filtered.append(item)

            # If we've found no items except for pre-releases, then we'll go
            # ahead and use the pre-releases
            if not filtered and found_prereleases and prereleases is None:
                return iter(found_prereleases)

            return iter(filtered)
tags.py000064400000051614150044141310006054 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.

from __future__ import annotations

import logging
import platform
import re
import struct
import subprocess
import sys
import sysconfig
from importlib.machinery import EXTENSION_SUFFIXES
from typing import (
    Iterable,
    Iterator,
    Sequence,
    Tuple,
    cast,
)

from . import _manylinux, _musllinux

logger = logging.getLogger(__name__)

PythonVersion = Sequence[int]
AppleVersion = Tuple[int, int]

INTERPRETER_SHORT_NAMES: dict[str, str] = {
    "python": "py",  # Generic.
    "cpython": "cp",
    "pypy": "pp",
    "ironpython": "ip",
    "jython": "jy",
}


_32_BIT_INTERPRETER = struct.calcsize("P") == 4


class Tag:
    """
    A representation of the tag triple for a wheel.

    Instances are considered immutable and thus are hashable. Equality checking
    is also supported.
    """

    __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]

    def __init__(self, interpreter: str, abi: str, platform: str) -> None:
        self._interpreter = interpreter.lower()
        self._abi = abi.lower()
        self._platform = platform.lower()
        # The __hash__ of every single element in a Set[Tag] will be evaluated each time
        # that a set calls its `.disjoint()` method, which may be called hundreds of
        # times when scanning a page of links for packages with tags matching that
        # Set[Tag]. Pre-computing the value here produces significant speedups for
        # downstream consumers.
        self._hash = hash((self._interpreter, self._abi, self._platform))

    @property
    def interpreter(self) -> str:
        return self._interpreter

    @property
    def abi(self) -> str:
        return self._abi

    @property
    def platform(self) -> str:
        return self._platform

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, Tag):
            return NotImplemented

        return (
            (self._hash == other._hash)  # Short-circuit ASAP for perf reasons.
            and (self._platform == other._platform)
            and (self._abi == other._abi)
            and (self._interpreter == other._interpreter)
        )

    def __hash__(self) -> int:
        return self._hash

    def __str__(self) -> str:
        return f"{self._interpreter}-{self._abi}-{self._platform}"

    def __repr__(self) -> str:
        return f"<{self} @ {id(self)}>"


def parse_tag(tag: str) -> frozenset[Tag]:
    """
    Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.

    Returning a set is required due to the possibility that the tag is a
    compressed tag set.
    """
    tags = set()
    interpreters, abis, platforms = tag.split("-")
    for interpreter in interpreters.split("."):
        for abi in abis.split("."):
            for platform_ in platforms.split("."):
                tags.add(Tag(interpreter, abi, platform_))
    return frozenset(tags)


def _get_config_var(name: str, warn: bool = False) -> int | str | None:
    value: int | str | None = sysconfig.get_config_var(name)
    if value is None and warn:
        logger.debug(
            "Config variable '%s' is unset, Python ABI tag may be incorrect", name
        )
    return value


def _normalize_string(string: str) -> str:
    return string.replace(".", "_").replace("-", "_").replace(" ", "_")


def _is_threaded_cpython(abis: list[str]) -> bool:
    """
    Determine if the ABI corresponds to a threaded (`--disable-gil`) build.

    The threaded builds are indicated by a "t" in the abiflags.
    """
    if len(abis) == 0:
        return False
    # expect e.g., cp313
    m = re.match(r"cp\d+(.*)", abis[0])
    if not m:
        return False
    abiflags = m.group(1)
    return "t" in abiflags


def _abi3_applies(python_version: PythonVersion, threading: bool) -> bool:
    """
    Determine if the Python version supports abi3.

    PEP 384 was first implemented in Python 3.2. The threaded (`--disable-gil`)
    builds do not support abi3.
    """
    return len(python_version) > 1 and tuple(python_version) >= (3, 2) and not threading


def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> list[str]:
    py_version = tuple(py_version)  # To allow for version comparison.
    abis = []
    version = _version_nodot(py_version[:2])
    threading = debug = pymalloc = ucs4 = ""
    with_debug = _get_config_var("Py_DEBUG", warn)
    has_refcount = hasattr(sys, "gettotalrefcount")
    # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
    # extension modules is the best option.
    # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
    has_ext = "_d.pyd" in EXTENSION_SUFFIXES
    if with_debug or (with_debug is None and (has_refcount or has_ext)):
        debug = "d"
    if py_version >= (3, 13) and _get_config_var("Py_GIL_DISABLED", warn):
        threading = "t"
    if py_version < (3, 8):
        with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
        if with_pymalloc or with_pymalloc is None:
            pymalloc = "m"
        if py_version < (3, 3):
            unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
            if unicode_size == 4 or (
                unicode_size is None and sys.maxunicode == 0x10FFFF
            ):
                ucs4 = "u"
    elif debug:
        # Debug builds can also load "normal" extension modules.
        # We can also assume no UCS-4 or pymalloc requirement.
        abis.append(f"cp{version}{threading}")
    abis.insert(0, f"cp{version}{threading}{debug}{pymalloc}{ucs4}")
    return abis


def cpython_tags(
    python_version: PythonVersion | None = None,
    abis: Iterable[str] | None = None,
    platforms: Iterable[str] | None = None,
    *,
    warn: bool = False,
) -> Iterator[Tag]:
    """
    Yields the tags for a CPython interpreter.

    The tags consist of:
    - cp--
    - cp-abi3-
    - cp-none-
    - cp-abi3-  # Older Python versions down to 3.2.

    If python_version only specifies a major version then user-provided ABIs and
    the 'none' ABItag will be used.

    If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
    their normal position and not at the beginning.
    """
    if not python_version:
        python_version = sys.version_info[:2]

    interpreter = f"cp{_version_nodot(python_version[:2])}"

    if abis is None:
        if len(python_version) > 1:
            abis = _cpython_abis(python_version, warn)
        else:
            abis = []
    abis = list(abis)
    # 'abi3' and 'none' are explicitly handled later.
    for explicit_abi in ("abi3", "none"):
        try:
            abis.remove(explicit_abi)
        except ValueError:
            pass

    platforms = list(platforms or platform_tags())
    for abi in abis:
        for platform_ in platforms:
            yield Tag(interpreter, abi, platform_)

    threading = _is_threaded_cpython(abis)
    use_abi3 = _abi3_applies(python_version, threading)
    if use_abi3:
        yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
    yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)

    if use_abi3:
        for minor_version in range(python_version[1] - 1, 1, -1):
            for platform_ in platforms:
                interpreter = "cp{version}".format(
                    version=_version_nodot((python_version[0], minor_version))
                )
                yield Tag(interpreter, "abi3", platform_)


def _generic_abi() -> list[str]:
    """
    Return the ABI tag based on EXT_SUFFIX.
    """
    # The following are examples of `EXT_SUFFIX`.
    # We want to keep the parts which are related to the ABI and remove the
    # parts which are related to the platform:
    # - linux:   '.cpython-310-x86_64-linux-gnu.so' => cp310
    # - mac:     '.cpython-310-darwin.so'           => cp310
    # - win:     '.cp310-win_amd64.pyd'             => cp310
    # - win:     '.pyd'                             => cp37 (uses _cpython_abis())
    # - pypy:    '.pypy38-pp73-x86_64-linux-gnu.so' => pypy38_pp73
    # - graalpy: '.graalpy-38-native-x86_64-darwin.dylib'
    #                                               => graalpy_38_native

    ext_suffix = _get_config_var("EXT_SUFFIX", warn=True)
    if not isinstance(ext_suffix, str) or ext_suffix[0] != ".":
        raise SystemError("invalid sysconfig.get_config_var('EXT_SUFFIX')")
    parts = ext_suffix.split(".")
    if len(parts) < 3:
        # CPython3.7 and earlier uses ".pyd" on Windows.
        return _cpython_abis(sys.version_info[:2])
    soabi = parts[1]
    if soabi.startswith("cpython"):
        # non-windows
        abi = "cp" + soabi.split("-")[1]
    elif soabi.startswith("cp"):
        # windows
        abi = soabi.split("-")[0]
    elif soabi.startswith("pypy"):
        abi = "-".join(soabi.split("-")[:2])
    elif soabi.startswith("graalpy"):
        abi = "-".join(soabi.split("-")[:3])
    elif soabi:
        # pyston, ironpython, others?
        abi = soabi
    else:
        return []
    return [_normalize_string(abi)]


def generic_tags(
    interpreter: str | None = None,
    abis: Iterable[str] | None = None,
    platforms: Iterable[str] | None = None,
    *,
    warn: bool = False,
) -> Iterator[Tag]:
    """
    Yields the tags for a generic interpreter.

    The tags consist of:
    - --

    The "none" ABI will be added if it was not explicitly provided.
    """
    if not interpreter:
        interp_name = interpreter_name()
        interp_version = interpreter_version(warn=warn)
        interpreter = "".join([interp_name, interp_version])
    if abis is None:
        abis = _generic_abi()
    else:
        abis = list(abis)
    platforms = list(platforms or platform_tags())
    if "none" not in abis:
        abis.append("none")
    for abi in abis:
        for platform_ in platforms:
            yield Tag(interpreter, abi, platform_)


def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
    """
    Yields Python versions in descending order.

    After the latest version, the major-only version will be yielded, and then
    all previous versions of that major version.
    """
    if len(py_version) > 1:
        yield f"py{_version_nodot(py_version[:2])}"
    yield f"py{py_version[0]}"
    if len(py_version) > 1:
        for minor in range(py_version[1] - 1, -1, -1):
            yield f"py{_version_nodot((py_version[0], minor))}"


def compatible_tags(
    python_version: PythonVersion | None = None,
    interpreter: str | None = None,
    platforms: Iterable[str] | None = None,
) -> Iterator[Tag]:
    """
    Yields the sequence of tags that are compatible with a specific version of Python.

    The tags consist of:
    - py*-none-
    - -none-any  # ... if `interpreter` is provided.
    - py*-none-any
    """
    if not python_version:
        python_version = sys.version_info[:2]
    platforms = list(platforms or platform_tags())
    for version in _py_interpreter_range(python_version):
        for platform_ in platforms:
            yield Tag(version, "none", platform_)
    if interpreter:
        yield Tag(interpreter, "none", "any")
    for version in _py_interpreter_range(python_version):
        yield Tag(version, "none", "any")


def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
    if not is_32bit:
        return arch

    if arch.startswith("ppc"):
        return "ppc"

    return "i386"


def _mac_binary_formats(version: AppleVersion, cpu_arch: str) -> list[str]:
    formats = [cpu_arch]
    if cpu_arch == "x86_64":
        if version < (10, 4):
            return []
        formats.extend(["intel", "fat64", "fat32"])

    elif cpu_arch == "i386":
        if version < (10, 4):
            return []
        formats.extend(["intel", "fat32", "fat"])

    elif cpu_arch == "ppc64":
        # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
        if version > (10, 5) or version < (10, 4):
            return []
        formats.append("fat64")

    elif cpu_arch == "ppc":
        if version > (10, 6):
            return []
        formats.extend(["fat32", "fat"])

    if cpu_arch in {"arm64", "x86_64"}:
        formats.append("universal2")

    if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
        formats.append("universal")

    return formats


def mac_platforms(
    version: AppleVersion | None = None, arch: str | None = None
) -> Iterator[str]:
    """
    Yields the platform tags for a macOS system.

    The `version` parameter is a two-item tuple specifying the macOS version to
    generate platform tags for. The `arch` parameter is the CPU architecture to
    generate platform tags for. Both parameters default to the appropriate value
    for the current system.
    """
    version_str, _, cpu_arch = platform.mac_ver()
    if version is None:
        version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2])))
        if version == (10, 16):
            # When built against an older macOS SDK, Python will report macOS 10.16
            # instead of the real version.
            version_str = subprocess.run(
                [
                    sys.executable,
                    "-sS",
                    "-c",
                    "import platform; print(platform.mac_ver()[0])",
                ],
                check=True,
                env={"SYSTEM_VERSION_COMPAT": "0"},
                stdout=subprocess.PIPE,
                text=True,
            ).stdout
            version = cast("AppleVersion", tuple(map(int, version_str.split(".")[:2])))
    else:
        version = version
    if arch is None:
        arch = _mac_arch(cpu_arch)
    else:
        arch = arch

    if (10, 0) <= version and version < (11, 0):
        # Prior to Mac OS 11, each yearly release of Mac OS bumped the
        # "minor" version number.  The major version was always 10.
        for minor_version in range(version[1], -1, -1):
            compat_version = 10, minor_version
            binary_formats = _mac_binary_formats(compat_version, arch)
            for binary_format in binary_formats:
                yield "macosx_{major}_{minor}_{binary_format}".format(
                    major=10, minor=minor_version, binary_format=binary_format
                )

    if version >= (11, 0):
        # Starting with Mac OS 11, each yearly release bumps the major version
        # number.   The minor versions are now the midyear updates.
        for major_version in range(version[0], 10, -1):
            compat_version = major_version, 0
            binary_formats = _mac_binary_formats(compat_version, arch)
            for binary_format in binary_formats:
                yield "macosx_{major}_{minor}_{binary_format}".format(
                    major=major_version, minor=0, binary_format=binary_format
                )

    if version >= (11, 0):
        # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
        # Arm64 support was introduced in 11.0, so no Arm binaries from previous
        # releases exist.
        #
        # However, the "universal2" binary format can have a
        # macOS version earlier than 11.0 when the x86_64 part of the binary supports
        # that version of macOS.
        if arch == "x86_64":
            for minor_version in range(16, 3, -1):
                compat_version = 10, minor_version
                binary_formats = _mac_binary_formats(compat_version, arch)
                for binary_format in binary_formats:
                    yield "macosx_{major}_{minor}_{binary_format}".format(
                        major=compat_version[0],
                        minor=compat_version[1],
                        binary_format=binary_format,
                    )
        else:
            for minor_version in range(16, 3, -1):
                compat_version = 10, minor_version
                binary_format = "universal2"
                yield "macosx_{major}_{minor}_{binary_format}".format(
                    major=compat_version[0],
                    minor=compat_version[1],
                    binary_format=binary_format,
                )


def ios_platforms(
    version: AppleVersion | None = None, multiarch: str | None = None
) -> Iterator[str]:
    """
    Yields the platform tags for an iOS system.

    :param version: A two-item tuple specifying the iOS version to generate
        platform tags for. Defaults to the current iOS version.
    :param multiarch: The CPU architecture+ABI to generate platform tags for -
        (the value used by `sys.implementation._multiarch` e.g.,
        `arm64_iphoneos` or `x84_64_iphonesimulator`). Defaults to the current
        multiarch value.
    """
    if version is None:
        # if iOS is the current platform, ios_ver *must* be defined. However,
        # it won't exist for CPython versions before 3.13, which causes a mypy
        # error.
        _, release, _, _ = platform.ios_ver()  # type: ignore[attr-defined]
        version = cast("AppleVersion", tuple(map(int, release.split(".")[:2])))

    if multiarch is None:
        multiarch = sys.implementation._multiarch
    multiarch = multiarch.replace("-", "_")

    ios_platform_template = "ios_{major}_{minor}_{multiarch}"

    # Consider any iOS major.minor version from the version requested, down to
    # 12.0. 12.0 is the first iOS version that is known to have enough features
    # to support CPython. Consider every possible minor release up to X.9. There
    # highest the minor has ever gone is 8 (14.8 and 15.8) but having some extra
    # candidates that won't ever match doesn't really hurt, and it saves us from
    # having to keep an explicit list of known iOS versions in the code. Return
    # the results descending order of version number.

    # If the requested major version is less than 12, there won't be any matches.
    if version[0] < 12:
        return

    # Consider the actual X.Y version that was requested.
    yield ios_platform_template.format(
        major=version[0], minor=version[1], multiarch=multiarch
    )

    # Consider every minor version from X.0 to the minor version prior to the
    # version requested by the platform.
    for minor in range(version[1] - 1, -1, -1):
        yield ios_platform_template.format(
            major=version[0], minor=minor, multiarch=multiarch
        )

    for major in range(version[0] - 1, 11, -1):
        for minor in range(9, -1, -1):
            yield ios_platform_template.format(
                major=major, minor=minor, multiarch=multiarch
            )


def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
    linux = _normalize_string(sysconfig.get_platform())
    if not linux.startswith("linux_"):
        # we should never be here, just yield the sysconfig one and return
        yield linux
        return
    if is_32bit:
        if linux == "linux_x86_64":
            linux = "linux_i686"
        elif linux == "linux_aarch64":
            linux = "linux_armv8l"
    _, arch = linux.split("_", 1)
    archs = {"armv8l": ["armv8l", "armv7l"]}.get(arch, [arch])
    yield from _manylinux.platform_tags(archs)
    yield from _musllinux.platform_tags(archs)
    for arch in archs:
        yield f"linux_{arch}"


def _generic_platforms() -> Iterator[str]:
    yield _normalize_string(sysconfig.get_platform())


def platform_tags() -> Iterator[str]:
    """
    Provides the platform tags for this installation.
    """
    if platform.system() == "Darwin":
        return mac_platforms()
    elif platform.system() == "iOS":
        return ios_platforms()
    elif platform.system() == "Linux":
        return _linux_platforms()
    else:
        return _generic_platforms()


def interpreter_name() -> str:
    """
    Returns the name of the running interpreter.

    Some implementations have a reserved, two-letter abbreviation which will
    be returned when appropriate.
    """
    name = sys.implementation.name
    return INTERPRETER_SHORT_NAMES.get(name) or name


def interpreter_version(*, warn: bool = False) -> str:
    """
    Returns the version of the running interpreter.
    """
    version = _get_config_var("py_version_nodot", warn=warn)
    if version:
        version = str(version)
    else:
        version = _version_nodot(sys.version_info[:2])
    return version


def _version_nodot(version: PythonVersion) -> str:
    return "".join(map(str, version))


def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
    """
    Returns the sequence of tag triples for the running interpreter.

    The order of the sequence corresponds to priority order for the
    interpreter, from most to least important.
    """

    interp_name = interpreter_name()
    if interp_name == "cp":
        yield from cpython_tags(warn=warn)
    else:
        yield from generic_tags()

    if interp_name == "pp":
        interp = "pp3"
    elif interp_name == "cp":
        interp = "cp" + interpreter_version(warn=warn)
    else:
        interp = None
    yield from compatible_tags(interpreter=interp)
utils.py000064400000012247150044141310006255 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.

from __future__ import annotations

import re
from typing import NewType, Tuple, Union, cast

from .tags import Tag, parse_tag
from .version import InvalidVersion, Version

BuildTag = Union[Tuple[()], Tuple[int, str]]
NormalizedName = NewType("NormalizedName", str)


class InvalidName(ValueError):
    """
    An invalid distribution name; users should refer to the packaging user guide.
    """


class InvalidWheelFilename(ValueError):
    """
    An invalid wheel filename was found, users should refer to PEP 427.
    """


class InvalidSdistFilename(ValueError):
    """
    An invalid sdist filename was found, users should refer to the packaging user guide.
    """


# Core metadata spec for `Name`
_validate_regex = re.compile(
    r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
)
_canonicalize_regex = re.compile(r"[-_.]+")
_normalized_regex = re.compile(r"^([a-z0-9]|[a-z0-9]([a-z0-9-](?!--))*[a-z0-9])$")
# PEP 427: The build number must start with a digit.
_build_tag_regex = re.compile(r"(\d+)(.*)")


def canonicalize_name(name: str, *, validate: bool = False) -> NormalizedName:
    if validate and not _validate_regex.match(name):
        raise InvalidName(f"name is invalid: {name!r}")
    # This is taken from PEP 503.
    value = _canonicalize_regex.sub("-", name).lower()
    return cast(NormalizedName, value)


def is_normalized_name(name: str) -> bool:
    return _normalized_regex.match(name) is not None


def canonicalize_version(
    version: Version | str, *, strip_trailing_zero: bool = True
) -> str:
    """
    This is very similar to Version.__str__, but has one subtle difference
    with the way it handles the release segment.
    """
    if isinstance(version, str):
        try:
            parsed = Version(version)
        except InvalidVersion:
            # Legacy versions cannot be normalized
            return version
    else:
        parsed = version

    parts = []

    # Epoch
    if parsed.epoch != 0:
        parts.append(f"{parsed.epoch}!")

    # Release segment
    release_segment = ".".join(str(x) for x in parsed.release)
    if strip_trailing_zero:
        # NB: This strips trailing '.0's to normalize
        release_segment = re.sub(r"(\.0)+$", "", release_segment)
    parts.append(release_segment)

    # Pre-release
    if parsed.pre is not None:
        parts.append("".join(str(x) for x in parsed.pre))

    # Post-release
    if parsed.post is not None:
        parts.append(f".post{parsed.post}")

    # Development release
    if parsed.dev is not None:
        parts.append(f".dev{parsed.dev}")

    # Local version segment
    if parsed.local is not None:
        parts.append(f"+{parsed.local}")

    return "".join(parts)


def parse_wheel_filename(
    filename: str,
) -> tuple[NormalizedName, Version, BuildTag, frozenset[Tag]]:
    if not filename.endswith(".whl"):
        raise InvalidWheelFilename(
            f"Invalid wheel filename (extension must be '.whl'): {filename}"
        )

    filename = filename[:-4]
    dashes = filename.count("-")
    if dashes not in (4, 5):
        raise InvalidWheelFilename(
            f"Invalid wheel filename (wrong number of parts): {filename}"
        )

    parts = filename.split("-", dashes - 2)
    name_part = parts[0]
    # See PEP 427 for the rules on escaping the project name.
    if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
        raise InvalidWheelFilename(f"Invalid project name: {filename}")
    name = canonicalize_name(name_part)

    try:
        version = Version(parts[1])
    except InvalidVersion as e:
        raise InvalidWheelFilename(
            f"Invalid wheel filename (invalid version): {filename}"
        ) from e

    if dashes == 5:
        build_part = parts[2]
        build_match = _build_tag_regex.match(build_part)
        if build_match is None:
            raise InvalidWheelFilename(
                f"Invalid build number: {build_part} in '{filename}'"
            )
        build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
    else:
        build = ()
    tags = parse_tag(parts[-1])
    return (name, version, build, tags)


def parse_sdist_filename(filename: str) -> tuple[NormalizedName, Version]:
    if filename.endswith(".tar.gz"):
        file_stem = filename[: -len(".tar.gz")]
    elif filename.endswith(".zip"):
        file_stem = filename[: -len(".zip")]
    else:
        raise InvalidSdistFilename(
            f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
            f" {filename}"
        )

    # We are requiring a PEP 440 version, which cannot contain dashes,
    # so we split on the last dash.
    name_part, sep, version_part = file_stem.rpartition("-")
    if not sep:
        raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")

    name = canonicalize_name(name_part)

    try:
        version = Version(version_part)
    except InvalidVersion as e:
        raise InvalidSdistFilename(
            f"Invalid sdist filename (invalid version): {filename}"
        ) from e

    return (name, version)
version.py000064400000037522150044141310006605 0ustar00# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
"""
.. testsetup::

    from pip._vendor.packaging.version import parse, Version
"""

from __future__ import annotations

import itertools
import re
from typing import Any, Callable, NamedTuple, SupportsInt, Tuple, Union

from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType

__all__ = ["VERSION_PATTERN", "parse", "Version", "InvalidVersion"]

LocalType = Tuple[Union[int, str], ...]

CmpPrePostDevType = Union[InfinityType, NegativeInfinityType, Tuple[str, int]]
CmpLocalType = Union[
    NegativeInfinityType,
    Tuple[Union[Tuple[int, str], Tuple[NegativeInfinityType, Union[int, str]]], ...],
]
CmpKey = Tuple[
    int,
    Tuple[int, ...],
    CmpPrePostDevType,
    CmpPrePostDevType,
    CmpPrePostDevType,
    CmpLocalType,
]
VersionComparisonMethod = Callable[[CmpKey, CmpKey], bool]


class _Version(NamedTuple):
    epoch: int
    release: tuple[int, ...]
    dev: tuple[str, int] | None
    pre: tuple[str, int] | None
    post: tuple[str, int] | None
    local: LocalType | None


def parse(version: str) -> Version:
    """Parse the given version string.

    >>> parse('1.0.dev1')
    

    :param version: The version string to parse.
    :raises InvalidVersion: When the version string is not a valid version.
    """
    return Version(version)


class InvalidVersion(ValueError):
    """Raised when a version string is not a valid version.

    >>> Version("invalid")
    Traceback (most recent call last):
        ...
    packaging.version.InvalidVersion: Invalid version: 'invalid'
    """


class _BaseVersion:
    _key: tuple[Any, ...]

    def __hash__(self) -> int:
        return hash(self._key)

    # Please keep the duplicated `isinstance` check
    # in the six comparisons hereunder
    # unless you find a way to avoid adding overhead function calls.
    def __lt__(self, other: _BaseVersion) -> bool:
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return self._key < other._key

    def __le__(self, other: _BaseVersion) -> bool:
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return self._key <= other._key

    def __eq__(self, other: object) -> bool:
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return self._key == other._key

    def __ge__(self, other: _BaseVersion) -> bool:
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return self._key >= other._key

    def __gt__(self, other: _BaseVersion) -> bool:
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return self._key > other._key

    def __ne__(self, other: object) -> bool:
        if not isinstance(other, _BaseVersion):
            return NotImplemented

        return self._key != other._key


# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
_VERSION_PATTERN = r"""
    v?
    (?:
        (?:(?P[0-9]+)!)?                           # epoch
        (?P[0-9]+(?:\.[0-9]+)*)                  # release segment
        (?P
                                          # pre-release
            [-_\.]?
            (?Palpha|a|beta|b|preview|pre|c|rc)
            [-_\.]?
            (?P[0-9]+)?
        )?
        (?P                                         # post release
            (?:-(?P[0-9]+))
            |
            (?:
                [-_\.]?
                (?Ppost|rev|r)
                [-_\.]?
                (?P[0-9]+)?
            )
        )?
        (?P                                          # dev release
            [-_\.]?
            (?Pdev)
            [-_\.]?
            (?P[0-9]+)?
        )?
    )
    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
"""

VERSION_PATTERN = _VERSION_PATTERN
"""
A string containing the regular expression used to match a valid version.

The pattern is not anchored at either end, and is intended for embedding in larger
expressions (for example, matching a version number as part of a file name). The
regular expression should be compiled with the ``re.VERBOSE`` and ``re.IGNORECASE``
flags set.

:meta hide-value:
"""


class Version(_BaseVersion):
    """This class abstracts handling of a project's versions.

    A :class:`Version` instance is comparison aware and can be compared and
    sorted using the standard Python interfaces.

    >>> v1 = Version("1.0a5")
    >>> v2 = Version("1.0")
    >>> v1
    
    >>> v2
    
    >>> v1 < v2
    True
    >>> v1 == v2
    False
    >>> v1 > v2
    False
    >>> v1 >= v2
    False
    >>> v1 <= v2
    True
    """

    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
    _key: CmpKey

    def __init__(self, version: str) -> None:
        """Initialize a Version object.

        :param version:
            The string representation of a version which will be parsed and normalized
            before use.
        :raises InvalidVersion:
            If the ``version`` does not conform to PEP 440 in any way then this
            exception will be raised.
        """

        # Validate the version and parse it into pieces
        match = self._regex.search(version)
        if not match:
            raise InvalidVersion(f"Invalid version: '{version}'")

        # Store the parsed out pieces of the version
        self._version = _Version(
            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
            release=tuple(int(i) for i in match.group("release").split(".")),
            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
            post=_parse_letter_version(
                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
            ),
            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
            local=_parse_local_version(match.group("local")),
        )

        # Generate a key which will be used for sorting
        self._key = _cmpkey(
            self._version.epoch,
            self._version.release,
            self._version.pre,
            self._version.post,
            self._version.dev,
            self._version.local,
        )

    def __repr__(self) -> str:
        """A representation of the Version that shows all internal state.

        >>> Version('1.0.0')
        
        """
        return f""

    def __str__(self) -> str:
        """A string representation of the version that can be rounded-tripped.

        >>> str(Version("1.0a5"))
        '1.0a5'
        """
        parts = []

        # Epoch
        if self.epoch != 0:
            parts.append(f"{self.epoch}!")

        # Release segment
        parts.append(".".join(str(x) for x in self.release))

        # Pre-release
        if self.pre is not None:
            parts.append("".join(str(x) for x in self.pre))

        # Post-release
        if self.post is not None:
            parts.append(f".post{self.post}")

        # Development release
        if self.dev is not None:
            parts.append(f".dev{self.dev}")

        # Local version segment
        if self.local is not None:
            parts.append(f"+{self.local}")

        return "".join(parts)

    @property
    def epoch(self) -> int:
        """The epoch of the version.

        >>> Version("2.0.0").epoch
        0
        >>> Version("1!2.0.0").epoch
        1
        """
        return self._version.epoch

    @property
    def release(self) -> tuple[int, ...]:
        """The components of the "release" segment of the version.

        >>> Version("1.2.3").release
        (1, 2, 3)
        >>> Version("2.0.0").release
        (2, 0, 0)
        >>> Version("1!2.0.0.post0").release
        (2, 0, 0)

        Includes trailing zeroes but not the epoch or any pre-release / development /
        post-release suffixes.
        """
        return self._version.release

    @property
    def pre(self) -> tuple[str, int] | None:
        """The pre-release segment of the version.

        >>> print(Version("1.2.3").pre)
        None
        >>> Version("1.2.3a1").pre
        ('a', 1)
        >>> Version("1.2.3b1").pre
        ('b', 1)
        >>> Version("1.2.3rc1").pre
        ('rc', 1)
        """
        return self._version.pre

    @property
    def post(self) -> int | None:
        """The post-release number of the version.

        >>> print(Version("1.2.3").post)
        None
        >>> Version("1.2.3.post1").post
        1
        """
        return self._version.post[1] if self._version.post else None

    @property
    def dev(self) -> int | None:
        """The development number of the version.

        >>> print(Version("1.2.3").dev)
        None
        >>> Version("1.2.3.dev1").dev
        1
        """
        return self._version.dev[1] if self._version.dev else None

    @property
    def local(self) -> str | None:
        """The local version segment of the version.

        >>> print(Version("1.2.3").local)
        None
        >>> Version("1.2.3+abc").local
        'abc'
        """
        if self._version.local:
            return ".".join(str(x) for x in self._version.local)
        else:
            return None

    @property
    def public(self) -> str:
        """The public portion of the version.

        >>> Version("1.2.3").public
        '1.2.3'
        >>> Version("1.2.3+abc").public
        '1.2.3'
        >>> Version("1.2.3+abc.dev1").public
        '1.2.3'
        """
        return str(self).split("+", 1)[0]

    @property
    def base_version(self) -> str:
        """The "base version" of the version.

        >>> Version("1.2.3").base_version
        '1.2.3'
        >>> Version("1.2.3+abc").base_version
        '1.2.3'
        >>> Version("1!1.2.3+abc.dev1").base_version
        '1!1.2.3'

        The "base version" is the public version of the project without any pre or post
        release markers.
        """
        parts = []

        # Epoch
        if self.epoch != 0:
            parts.append(f"{self.epoch}!")

        # Release segment
        parts.append(".".join(str(x) for x in self.release))

        return "".join(parts)

    @property
    def is_prerelease(self) -> bool:
        """Whether this version is a pre-release.

        >>> Version("1.2.3").is_prerelease
        False
        >>> Version("1.2.3a1").is_prerelease
        True
        >>> Version("1.2.3b1").is_prerelease
        True
        >>> Version("1.2.3rc1").is_prerelease
        True
        >>> Version("1.2.3dev1").is_prerelease
        True
        """
        return self.dev is not None or self.pre is not None

    @property
    def is_postrelease(self) -> bool:
        """Whether this version is a post-release.

        >>> Version("1.2.3").is_postrelease
        False
        >>> Version("1.2.3.post1").is_postrelease
        True
        """
        return self.post is not None

    @property
    def is_devrelease(self) -> bool:
        """Whether this version is a development release.

        >>> Version("1.2.3").is_devrelease
        False
        >>> Version("1.2.3.dev1").is_devrelease
        True
        """
        return self.dev is not None

    @property
    def major(self) -> int:
        """The first item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").major
        1
        """
        return self.release[0] if len(self.release) >= 1 else 0

    @property
    def minor(self) -> int:
        """The second item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").minor
        2
        >>> Version("1").minor
        0
        """
        return self.release[1] if len(self.release) >= 2 else 0

    @property
    def micro(self) -> int:
        """The third item of :attr:`release` or ``0`` if unavailable.

        >>> Version("1.2.3").micro
        3
        >>> Version("1").micro
        0
        """
        return self.release[2] if len(self.release) >= 3 else 0


def _parse_letter_version(
    letter: str | None, number: str | bytes | SupportsInt | None
) -> tuple[str, int] | None:
    if letter:
        # We consider there to be an implicit 0 in a pre-release if there is
        # not a numeral associated with it.
        if number is None:
            number = 0

        # We normalize any letters to their lower case form
        letter = letter.lower()

        # We consider some words to be alternate spellings of other words and
        # in those cases we want to normalize the spellings to our preferred
        # spelling.
        if letter == "alpha":
            letter = "a"
        elif letter == "beta":
            letter = "b"
        elif letter in ["c", "pre", "preview"]:
            letter = "rc"
        elif letter in ["rev", "r"]:
            letter = "post"

        return letter, int(number)
    if not letter and number:
        # We assume if we are given a number, but we are not given a letter
        # then this is using the implicit post release syntax (e.g. 1.0-1)
        letter = "post"

        return letter, int(number)

    return None


_local_version_separators = re.compile(r"[\._-]")


def _parse_local_version(local: str | None) -> LocalType | None:
    """
    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
    """
    if local is not None:
        return tuple(
            part.lower() if not part.isdigit() else int(part)
            for part in _local_version_separators.split(local)
        )
    return None


def _cmpkey(
    epoch: int,
    release: tuple[int, ...],
    pre: tuple[str, int] | None,
    post: tuple[str, int] | None,
    dev: tuple[str, int] | None,
    local: LocalType | None,
) -> CmpKey:
    # When we compare a release version, we want to compare it with all of the
    # trailing zeros removed. So we'll use a reverse the list, drop all the now
    # leading zeros until we come to something non zero, then take the rest
    # re-reverse it back into the correct order and make it a tuple and use
    # that for our sorting key.
    _release = tuple(
        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
    )

    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
    # We'll do this by abusing the pre segment, but we _only_ want to do this
    # if there is not a pre or a post segment. If we have one of those then
    # the normal sorting rules will handle this case correctly.
    if pre is None and post is None and dev is not None:
        _pre: CmpPrePostDevType = NegativeInfinity
    # Versions without a pre-release (except as noted above) should sort after
    # those with one.
    elif pre is None:
        _pre = Infinity
    else:
        _pre = pre

    # Versions without a post segment should sort before those with one.
    if post is None:
        _post: CmpPrePostDevType = NegativeInfinity

    else:
        _post = post

    # Versions without a development segment should sort after those with one.
    if dev is None:
        _dev: CmpPrePostDevType = Infinity

    else:
        _dev = dev

    if local is None:
        # Versions without a local segment should sort before those with one.
        _local: CmpLocalType = NegativeInfinity
    else:
        # Versions with a local segment need that segment parsed to implement
        # the sorting rules in PEP440.
        # - Alpha numeric segments sort before numeric segments
        # - Alpha numeric segments sort lexicographically
        # - Numeric segments sort numerically
        # - Shorter versions sort before longer versions when the prefixes
        #   match exactly
        _local = tuple(
            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
        )

    return epoch, _release, _pre, _post, _dev, _local
py.typed000064400000000000150044141310006222 0ustar00_elffile.py000064400000006322150044141310006657 0ustar00"""
ELF file parser.

This provides a class ``ELFFile`` that parses an ELF executable in a similar
interface to ``ZipFile``. Only the read interface is implemented.

Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
"""

from __future__ import annotations

import enum
import os
import struct
from typing import IO


class ELFInvalid(ValueError):
    pass


class EIClass(enum.IntEnum):
    C32 = 1
    C64 = 2


class EIData(enum.IntEnum):
    Lsb = 1
    Msb = 2


class EMachine(enum.IntEnum):
    I386 = 3
    S390 = 22
    Arm = 40
    X8664 = 62
    AArc64 = 183


class ELFFile:
    """
    Representation of an ELF executable.
    """

    def __init__(self, f: IO[bytes]) -> None:
        self._f = f

        try:
            ident = self._read("16B")
        except struct.error:
            raise ELFInvalid("unable to parse identification")
        magic = bytes(ident[:4])
        if magic != b"\x7fELF":
            raise ELFInvalid(f"invalid magic: {magic!r}")

        self.capacity = ident[4]  # Format for program header (bitness).
        self.encoding = ident[5]  # Data structure encoding (endianness).

        try:
            # e_fmt: Format for program header.
            # p_fmt: Format for section header.
            # p_idx: Indexes to find p_type, p_offset, and p_filesz.
            e_fmt, self._p_fmt, self._p_idx = {
                (1, 1): ("HHIIIIIHHH", ">IIIIIIII", (0, 1, 4)),  # 32-bit MSB.
                (2, 1): ("HHIQQQIHHH", ">IIQQQQQQ", (0, 2, 5)),  # 64-bit MSB.
            }[(self.capacity, self.encoding)]
        except KeyError:
            raise ELFInvalid(
                f"unrecognized capacity ({self.capacity}) or "
                f"encoding ({self.encoding})"
            )

        try:
            (
                _,
                self.machine,  # Architecture type.
                _,
                _,
                self._e_phoff,  # Offset of program header.
                _,
                self.flags,  # Processor-specific flags.
                _,
                self._e_phentsize,  # Size of section.
                self._e_phnum,  # Number of sections.
            ) = self._read(e_fmt)
        except struct.error as e:
            raise ELFInvalid("unable to parse machine and section information") from e

    def _read(self, fmt: str) -> tuple[int, ...]:
        return struct.unpack(fmt, self._f.read(struct.calcsize(fmt)))

    @property
    def interpreter(self) -> str | None:
        """
        The path recorded in the ``PT_INTERP`` section header.
        """
        for index in range(self._e_phnum):
            self._f.seek(self._e_phoff + self._e_phentsize * index)
            try:
                data = self._read(self._p_fmt)
            except struct.error:
                continue
            if data[self._p_idx[0]] != 3:  # Not PT_INTERP.
                continue
            self._f.seek(data[self._p_idx[1]])
            return os.fsdecode(self._f.read(data[self._p_idx[2]])).strip("\0")
        return None
_parser.py000064400000023774150044141310006557 0ustar00"""Handwritten parser of dependency specifiers.

The docstring for each __parse_* function contains EBNF-inspired grammar representing
the implementation.
"""

from __future__ import annotations

import ast
from typing import NamedTuple, Sequence, Tuple, Union

from ._tokenizer import DEFAULT_RULES, Tokenizer


class Node:
    def __init__(self, value: str) -> None:
        self.value = value

    def __str__(self) -> str:
        return self.value

    def __repr__(self) -> str:
        return f"<{self.__class__.__name__}('{self}')>"

    def serialize(self) -> str:
        raise NotImplementedError


class Variable(Node):
    def serialize(self) -> str:
        return str(self)


class Value(Node):
    def serialize(self) -> str:
        return f'"{self}"'


class Op(Node):
    def serialize(self) -> str:
        return str(self)


MarkerVar = Union[Variable, Value]
MarkerItem = Tuple[MarkerVar, Op, MarkerVar]
MarkerAtom = Union[MarkerItem, Sequence["MarkerAtom"]]
MarkerList = Sequence[Union["MarkerList", MarkerAtom, str]]


class ParsedRequirement(NamedTuple):
    name: str
    url: str
    extras: list[str]
    specifier: str
    marker: MarkerList | None


# --------------------------------------------------------------------------------------
# Recursive descent parser for dependency specifier
# --------------------------------------------------------------------------------------
def parse_requirement(source: str) -> ParsedRequirement:
    return _parse_requirement(Tokenizer(source, rules=DEFAULT_RULES))


def _parse_requirement(tokenizer: Tokenizer) -> ParsedRequirement:
    """
    requirement = WS? IDENTIFIER WS? extras WS? requirement_details
    """
    tokenizer.consume("WS")

    name_token = tokenizer.expect(
        "IDENTIFIER", expected="package name at the start of dependency specifier"
    )
    name = name_token.text
    tokenizer.consume("WS")

    extras = _parse_extras(tokenizer)
    tokenizer.consume("WS")

    url, specifier, marker = _parse_requirement_details(tokenizer)
    tokenizer.expect("END", expected="end of dependency specifier")

    return ParsedRequirement(name, url, extras, specifier, marker)


def _parse_requirement_details(
    tokenizer: Tokenizer,
) -> tuple[str, str, MarkerList | None]:
    """
    requirement_details = AT URL (WS requirement_marker?)?
                        | specifier WS? (requirement_marker)?
    """

    specifier = ""
    url = ""
    marker = None

    if tokenizer.check("AT"):
        tokenizer.read()
        tokenizer.consume("WS")

        url_start = tokenizer.position
        url = tokenizer.expect("URL", expected="URL after @").text
        if tokenizer.check("END", peek=True):
            return (url, specifier, marker)

        tokenizer.expect("WS", expected="whitespace after URL")

        # The input might end after whitespace.
        if tokenizer.check("END", peek=True):
            return (url, specifier, marker)

        marker = _parse_requirement_marker(
            tokenizer, span_start=url_start, after="URL and whitespace"
        )
    else:
        specifier_start = tokenizer.position
        specifier = _parse_specifier(tokenizer)
        tokenizer.consume("WS")

        if tokenizer.check("END", peek=True):
            return (url, specifier, marker)

        marker = _parse_requirement_marker(
            tokenizer,
            span_start=specifier_start,
            after=(
                "version specifier"
                if specifier
                else "name and no valid version specifier"
            ),
        )

    return (url, specifier, marker)


def _parse_requirement_marker(
    tokenizer: Tokenizer, *, span_start: int, after: str
) -> MarkerList:
    """
    requirement_marker = SEMICOLON marker WS?
    """

    if not tokenizer.check("SEMICOLON"):
        tokenizer.raise_syntax_error(
            f"Expected end or semicolon (after {after})",
            span_start=span_start,
        )
    tokenizer.read()

    marker = _parse_marker(tokenizer)
    tokenizer.consume("WS")

    return marker


def _parse_extras(tokenizer: Tokenizer) -> list[str]:
    """
    extras = (LEFT_BRACKET wsp* extras_list? wsp* RIGHT_BRACKET)?
    """
    if not tokenizer.check("LEFT_BRACKET", peek=True):
        return []

    with tokenizer.enclosing_tokens(
        "LEFT_BRACKET",
        "RIGHT_BRACKET",
        around="extras",
    ):
        tokenizer.consume("WS")
        extras = _parse_extras_list(tokenizer)
        tokenizer.consume("WS")

    return extras


def _parse_extras_list(tokenizer: Tokenizer) -> list[str]:
    """
    extras_list = identifier (wsp* ',' wsp* identifier)*
    """
    extras: list[str] = []

    if not tokenizer.check("IDENTIFIER"):
        return extras

    extras.append(tokenizer.read().text)

    while True:
        tokenizer.consume("WS")
        if tokenizer.check("IDENTIFIER", peek=True):
            tokenizer.raise_syntax_error("Expected comma between extra names")
        elif not tokenizer.check("COMMA"):
            break

        tokenizer.read()
        tokenizer.consume("WS")

        extra_token = tokenizer.expect("IDENTIFIER", expected="extra name after comma")
        extras.append(extra_token.text)

    return extras


def _parse_specifier(tokenizer: Tokenizer) -> str:
    """
    specifier = LEFT_PARENTHESIS WS? version_many WS? RIGHT_PARENTHESIS
              | WS? version_many WS?
    """
    with tokenizer.enclosing_tokens(
        "LEFT_PARENTHESIS",
        "RIGHT_PARENTHESIS",
        around="version specifier",
    ):
        tokenizer.consume("WS")
        parsed_specifiers = _parse_version_many(tokenizer)
        tokenizer.consume("WS")

    return parsed_specifiers


def _parse_version_many(tokenizer: Tokenizer) -> str:
    """
    version_many = (SPECIFIER (WS? COMMA WS? SPECIFIER)*)?
    """
    parsed_specifiers = ""
    while tokenizer.check("SPECIFIER"):
        span_start = tokenizer.position
        parsed_specifiers += tokenizer.read().text
        if tokenizer.check("VERSION_PREFIX_TRAIL", peek=True):
            tokenizer.raise_syntax_error(
                ".* suffix can only be used with `==` or `!=` operators",
                span_start=span_start,
                span_end=tokenizer.position + 1,
            )
        if tokenizer.check("VERSION_LOCAL_LABEL_TRAIL", peek=True):
            tokenizer.raise_syntax_error(
                "Local version label can only be used with `==` or `!=` operators",
                span_start=span_start,
                span_end=tokenizer.position,
            )
        tokenizer.consume("WS")
        if not tokenizer.check("COMMA"):
            break
        parsed_specifiers += tokenizer.read().text
        tokenizer.consume("WS")

    return parsed_specifiers


# --------------------------------------------------------------------------------------
# Recursive descent parser for marker expression
# --------------------------------------------------------------------------------------
def parse_marker(source: str) -> MarkerList:
    return _parse_full_marker(Tokenizer(source, rules=DEFAULT_RULES))


def _parse_full_marker(tokenizer: Tokenizer) -> MarkerList:
    retval = _parse_marker(tokenizer)
    tokenizer.expect("END", expected="end of marker expression")
    return retval


def _parse_marker(tokenizer: Tokenizer) -> MarkerList:
    """
    marker = marker_atom (BOOLOP marker_atom)+
    """
    expression = [_parse_marker_atom(tokenizer)]
    while tokenizer.check("BOOLOP"):
        token = tokenizer.read()
        expr_right = _parse_marker_atom(tokenizer)
        expression.extend((token.text, expr_right))
    return expression


def _parse_marker_atom(tokenizer: Tokenizer) -> MarkerAtom:
    """
    marker_atom = WS? LEFT_PARENTHESIS WS? marker WS? RIGHT_PARENTHESIS WS?
                | WS? marker_item WS?
    """

    tokenizer.consume("WS")
    if tokenizer.check("LEFT_PARENTHESIS", peek=True):
        with tokenizer.enclosing_tokens(
            "LEFT_PARENTHESIS",
            "RIGHT_PARENTHESIS",
            around="marker expression",
        ):
            tokenizer.consume("WS")
            marker: MarkerAtom = _parse_marker(tokenizer)
            tokenizer.consume("WS")
    else:
        marker = _parse_marker_item(tokenizer)
    tokenizer.consume("WS")
    return marker


def _parse_marker_item(tokenizer: Tokenizer) -> MarkerItem:
    """
    marker_item = WS? marker_var WS? marker_op WS? marker_var WS?
    """
    tokenizer.consume("WS")
    marker_var_left = _parse_marker_var(tokenizer)
    tokenizer.consume("WS")
    marker_op = _parse_marker_op(tokenizer)
    tokenizer.consume("WS")
    marker_var_right = _parse_marker_var(tokenizer)
    tokenizer.consume("WS")
    return (marker_var_left, marker_op, marker_var_right)


def _parse_marker_var(tokenizer: Tokenizer) -> MarkerVar:
    """
    marker_var = VARIABLE | QUOTED_STRING
    """
    if tokenizer.check("VARIABLE"):
        return process_env_var(tokenizer.read().text.replace(".", "_"))
    elif tokenizer.check("QUOTED_STRING"):
        return process_python_str(tokenizer.read().text)
    else:
        tokenizer.raise_syntax_error(
            message="Expected a marker variable or quoted string"
        )


def process_env_var(env_var: str) -> Variable:
    if env_var in ("platform_python_implementation", "python_implementation"):
        return Variable("platform_python_implementation")
    else:
        return Variable(env_var)


def process_python_str(python_str: str) -> Value:
    value = ast.literal_eval(python_str)
    return Value(str(value))


def _parse_marker_op(tokenizer: Tokenizer) -> Op:
    """
    marker_op = IN | NOT IN | OP
    """
    if tokenizer.check("IN"):
        tokenizer.read()
        return Op("in")
    elif tokenizer.check("NOT"):
        tokenizer.read()
        tokenizer.expect("WS", expected="whitespace after 'not'")
        tokenizer.expect("IN", expected="'in' after 'not'")
        return Op("not in")
    elif tokenizer.check("OP"):
        return Op(tokenizer.read().text)
    else:
        return tokenizer.raise_syntax_error(
            "Expected marker operator, one of "
            "<=, <, !=, ==, >=, >, ~=, ===, in, not in"
        )
_tokenizer.py000064400000012231150044141310007257 0ustar00from __future__ import annotations

import contextlib
import re
from dataclasses import dataclass
from typing import Iterator, NoReturn

from .specifiers import Specifier


@dataclass
class Token:
    name: str
    text: str
    position: int


class ParserSyntaxError(Exception):
    """The provided source text could not be parsed correctly."""

    def __init__(
        self,
        message: str,
        *,
        source: str,
        span: tuple[int, int],
    ) -> None:
        self.span = span
        self.message = message
        self.source = source

        super().__init__()

    def __str__(self) -> str:
        marker = " " * self.span[0] + "~" * (self.span[1] - self.span[0]) + "^"
        return "\n    ".join([self.message, self.source, marker])


DEFAULT_RULES: dict[str, str | re.Pattern[str]] = {
    "LEFT_PARENTHESIS": r"\(",
    "RIGHT_PARENTHESIS": r"\)",
    "LEFT_BRACKET": r"\[",
    "RIGHT_BRACKET": r"\]",
    "SEMICOLON": r";",
    "COMMA": r",",
    "QUOTED_STRING": re.compile(
        r"""
            (
                ('[^']*')
                |
                ("[^"]*")
            )
        """,
        re.VERBOSE,
    ),
    "OP": r"(===|==|~=|!=|<=|>=|<|>)",
    "BOOLOP": r"\b(or|and)\b",
    "IN": r"\bin\b",
    "NOT": r"\bnot\b",
    "VARIABLE": re.compile(
        r"""
            \b(
                python_version
                |python_full_version
                |os[._]name
                |sys[._]platform
                |platform_(release|system)
                |platform[._](version|machine|python_implementation)
                |python_implementation
                |implementation_(name|version)
                |extra
            )\b
        """,
        re.VERBOSE,
    ),
    "SPECIFIER": re.compile(
        Specifier._operator_regex_str + Specifier._version_regex_str,
        re.VERBOSE | re.IGNORECASE,
    ),
    "AT": r"\@",
    "URL": r"[^ \t]+",
    "IDENTIFIER": r"\b[a-zA-Z0-9][a-zA-Z0-9._-]*\b",
    "VERSION_PREFIX_TRAIL": r"\.\*",
    "VERSION_LOCAL_LABEL_TRAIL": r"\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*",
    "WS": r"[ \t]+",
    "END": r"$",
}


class Tokenizer:
    """Context-sensitive token parsing.

    Provides methods to examine the input stream to check whether the next token
    matches.
    """

    def __init__(
        self,
        source: str,
        *,
        rules: dict[str, str | re.Pattern[str]],
    ) -> None:
        self.source = source
        self.rules: dict[str, re.Pattern[str]] = {
            name: re.compile(pattern) for name, pattern in rules.items()
        }
        self.next_token: Token | None = None
        self.position = 0

    def consume(self, name: str) -> None:
        """Move beyond provided token name, if at current position."""
        if self.check(name):
            self.read()

    def check(self, name: str, *, peek: bool = False) -> bool:
        """Check whether the next token has the provided name.

        By default, if the check succeeds, the token *must* be read before
        another check. If `peek` is set to `True`, the token is not loaded and
        would need to be checked again.
        """
        assert (
            self.next_token is None
        ), f"Cannot check for {name!r}, already have {self.next_token!r}"
        assert name in self.rules, f"Unknown token name: {name!r}"

        expression = self.rules[name]

        match = expression.match(self.source, self.position)
        if match is None:
            return False
        if not peek:
            self.next_token = Token(name, match[0], self.position)
        return True

    def expect(self, name: str, *, expected: str) -> Token:
        """Expect a certain token name next, failing with a syntax error otherwise.

        The token is *not* read.
        """
        if not self.check(name):
            raise self.raise_syntax_error(f"Expected {expected}")
        return self.read()

    def read(self) -> Token:
        """Consume the next token and return it."""
        token = self.next_token
        assert token is not None

        self.position += len(token.text)
        self.next_token = None

        return token

    def raise_syntax_error(
        self,
        message: str,
        *,
        span_start: int | None = None,
        span_end: int | None = None,
    ) -> NoReturn:
        """Raise ParserSyntaxError at the given position."""
        span = (
            self.position if span_start is None else span_start,
            self.position if span_end is None else span_end,
        )
        raise ParserSyntaxError(
            message,
            source=self.source,
            span=span,
        )

    @contextlib.contextmanager
    def enclosing_tokens(
        self, open_token: str, close_token: str, *, around: str
    ) -> Iterator[None]:
        if self.check(open_token):
            open_position = self.position
            self.read()
        else:
            open_position = None

        yield

        if open_position is None:
            return

        if not self.check(close_token):
            self.raise_syntax_error(
                f"Expected matching {close_token} for {open_token}, after {around}",
                span_start=open_position,
            )

        self.read()
metadata.py000064400000077135150044141310006704 0ustar00from __future__ import annotations

import email.feedparser
import email.header
import email.message
import email.parser
import email.policy
import typing
from typing import (
    Any,
    Callable,
    Generic,
    Literal,
    TypedDict,
    cast,
)

from . import requirements, specifiers, utils
from . import version as version_module

T = typing.TypeVar("T")


try:
    ExceptionGroup
except NameError:  # pragma: no cover

    class ExceptionGroup(Exception):
        """A minimal implementation of :external:exc:`ExceptionGroup` from Python 3.11.

        If :external:exc:`ExceptionGroup` is already defined by Python itself,
        that version is used instead.
        """

        message: str
        exceptions: list[Exception]

        def __init__(self, message: str, exceptions: list[Exception]) -> None:
            self.message = message
            self.exceptions = exceptions

        def __repr__(self) -> str:
            return f"{self.__class__.__name__}({self.message!r}, {self.exceptions!r})"

else:  # pragma: no cover
    ExceptionGroup = ExceptionGroup


class InvalidMetadata(ValueError):
    """A metadata field contains invalid data."""

    field: str
    """The name of the field that contains invalid data."""

    def __init__(self, field: str, message: str) -> None:
        self.field = field
        super().__init__(message)


# The RawMetadata class attempts to make as few assumptions about the underlying
# serialization formats as possible. The idea is that as long as a serialization
# formats offer some very basic primitives in *some* way then we can support
# serializing to and from that format.
class RawMetadata(TypedDict, total=False):
    """A dictionary of raw core metadata.

    Each field in core metadata maps to a key of this dictionary (when data is
    provided). The key is lower-case and underscores are used instead of dashes
    compared to the equivalent core metadata field. Any core metadata field that
    can be specified multiple times or can hold multiple values in a single
    field have a key with a plural name. See :class:`Metadata` whose attributes
    match the keys of this dictionary.

    Core metadata fields that can be specified multiple times are stored as a
    list or dict depending on which is appropriate for the field. Any fields
    which hold multiple values in a single field are stored as a list.

    """

    # Metadata 1.0 - PEP 241
    metadata_version: str
    name: str
    version: str
    platforms: list[str]
    summary: str
    description: str
    keywords: list[str]
    home_page: str
    author: str
    author_email: str
    license: str

    # Metadata 1.1 - PEP 314
    supported_platforms: list[str]
    download_url: str
    classifiers: list[str]
    requires: list[str]
    provides: list[str]
    obsoletes: list[str]

    # Metadata 1.2 - PEP 345
    maintainer: str
    maintainer_email: str
    requires_dist: list[str]
    provides_dist: list[str]
    obsoletes_dist: list[str]
    requires_python: str
    requires_external: list[str]
    project_urls: dict[str, str]

    # Metadata 2.0
    # PEP 426 attempted to completely revamp the metadata format
    # but got stuck without ever being able to build consensus on
    # it and ultimately ended up withdrawn.
    #
    # However, a number of tools had started emitting METADATA with
    # `2.0` Metadata-Version, so for historical reasons, this version
    # was skipped.

    # Metadata 2.1 - PEP 566
    description_content_type: str
    provides_extra: list[str]

    # Metadata 2.2 - PEP 643
    dynamic: list[str]

    # Metadata 2.3 - PEP 685
    # No new fields were added in PEP 685, just some edge case were
    # tightened up to provide better interoptability.


_STRING_FIELDS = {
    "author",
    "author_email",
    "description",
    "description_content_type",
    "download_url",
    "home_page",
    "license",
    "maintainer",
    "maintainer_email",
    "metadata_version",
    "name",
    "requires_python",
    "summary",
    "version",
}

_LIST_FIELDS = {
    "classifiers",
    "dynamic",
    "obsoletes",
    "obsoletes_dist",
    "platforms",
    "provides",
    "provides_dist",
    "provides_extra",
    "requires",
    "requires_dist",
    "requires_external",
    "supported_platforms",
}

_DICT_FIELDS = {
    "project_urls",
}


def _parse_keywords(data: str) -> list[str]:
    """Split a string of comma-separate keyboards into a list of keywords."""
    return [k.strip() for k in data.split(",")]


def _parse_project_urls(data: list[str]) -> dict[str, str]:
    """Parse a list of label/URL string pairings separated by a comma."""
    urls = {}
    for pair in data:
        # Our logic is slightly tricky here as we want to try and do
        # *something* reasonable with malformed data.
        #
        # The main thing that we have to worry about, is data that does
        # not have a ',' at all to split the label from the Value. There
        # isn't a singular right answer here, and we will fail validation
        # later on (if the caller is validating) so it doesn't *really*
        # matter, but since the missing value has to be an empty str
        # and our return value is dict[str, str], if we let the key
        # be the missing value, then they'd have multiple '' values that
        # overwrite each other in a accumulating dict.
        #
        # The other potentional issue is that it's possible to have the
        # same label multiple times in the metadata, with no solid "right"
        # answer with what to do in that case. As such, we'll do the only
        # thing we can, which is treat the field as unparseable and add it
        # to our list of unparsed fields.
        parts = [p.strip() for p in pair.split(",", 1)]
        parts.extend([""] * (max(0, 2 - len(parts))))  # Ensure 2 items

        # TODO: The spec doesn't say anything about if the keys should be
        #       considered case sensitive or not... logically they should
        #       be case-preserving and case-insensitive, but doing that
        #       would open up more cases where we might have duplicate
        #       entries.
        label, url = parts
        if label in urls:
            # The label already exists in our set of urls, so this field
            # is unparseable, and we can just add the whole thing to our
            # unparseable data and stop processing it.
            raise KeyError("duplicate labels in project urls")
        urls[label] = url

    return urls


def _get_payload(msg: email.message.Message, source: bytes | str) -> str:
    """Get the body of the message."""
    # If our source is a str, then our caller has managed encodings for us,
    # and we don't need to deal with it.
    if isinstance(source, str):
        payload: str = msg.get_payload()
        return payload
    # If our source is a bytes, then we're managing the encoding and we need
    # to deal with it.
    else:
        bpayload: bytes = msg.get_payload(decode=True)
        try:
            return bpayload.decode("utf8", "strict")
        except UnicodeDecodeError:
            raise ValueError("payload in an invalid encoding")


# The various parse_FORMAT functions here are intended to be as lenient as
# possible in their parsing, while still returning a correctly typed
# RawMetadata.
#
# To aid in this, we also generally want to do as little touching of the
# data as possible, except where there are possibly some historic holdovers
# that make valid data awkward to work with.
#
# While this is a lower level, intermediate format than our ``Metadata``
# class, some light touch ups can make a massive difference in usability.

# Map METADATA fields to RawMetadata.
_EMAIL_TO_RAW_MAPPING = {
    "author": "author",
    "author-email": "author_email",
    "classifier": "classifiers",
    "description": "description",
    "description-content-type": "description_content_type",
    "download-url": "download_url",
    "dynamic": "dynamic",
    "home-page": "home_page",
    "keywords": "keywords",
    "license": "license",
    "maintainer": "maintainer",
    "maintainer-email": "maintainer_email",
    "metadata-version": "metadata_version",
    "name": "name",
    "obsoletes": "obsoletes",
    "obsoletes-dist": "obsoletes_dist",
    "platform": "platforms",
    "project-url": "project_urls",
    "provides": "provides",
    "provides-dist": "provides_dist",
    "provides-extra": "provides_extra",
    "requires": "requires",
    "requires-dist": "requires_dist",
    "requires-external": "requires_external",
    "requires-python": "requires_python",
    "summary": "summary",
    "supported-platform": "supported_platforms",
    "version": "version",
}
_RAW_TO_EMAIL_MAPPING = {raw: email for email, raw in _EMAIL_TO_RAW_MAPPING.items()}


def parse_email(data: bytes | str) -> tuple[RawMetadata, dict[str, list[str]]]:
    """Parse a distribution's metadata stored as email headers (e.g. from ``METADATA``).

    This function returns a two-item tuple of dicts. The first dict is of
    recognized fields from the core metadata specification. Fields that can be
    parsed and translated into Python's built-in types are converted
    appropriately. All other fields are left as-is. Fields that are allowed to
    appear multiple times are stored as lists.

    The second dict contains all other fields from the metadata. This includes
    any unrecognized fields. It also includes any fields which are expected to
    be parsed into a built-in type but were not formatted appropriately. Finally,
    any fields that are expected to appear only once but are repeated are
    included in this dict.

    """
    raw: dict[str, str | list[str] | dict[str, str]] = {}
    unparsed: dict[str, list[str]] = {}

    if isinstance(data, str):
        parsed = email.parser.Parser(policy=email.policy.compat32).parsestr(data)
    else:
        parsed = email.parser.BytesParser(policy=email.policy.compat32).parsebytes(data)

    # We have to wrap parsed.keys() in a set, because in the case of multiple
    # values for a key (a list), the key will appear multiple times in the
    # list of keys, but we're avoiding that by using get_all().
    for name in frozenset(parsed.keys()):
        # Header names in RFC are case insensitive, so we'll normalize to all
        # lower case to make comparisons easier.
        name = name.lower()

        # We use get_all() here, even for fields that aren't multiple use,
        # because otherwise someone could have e.g. two Name fields, and we
        # would just silently ignore it rather than doing something about it.
        headers = parsed.get_all(name) or []

        # The way the email module works when parsing bytes is that it
        # unconditionally decodes the bytes as ascii using the surrogateescape
        # handler. When you pull that data back out (such as with get_all() ),
        # it looks to see if the str has any surrogate escapes, and if it does
        # it wraps it in a Header object instead of returning the string.
        #
        # As such, we'll look for those Header objects, and fix up the encoding.
        value = []
        # Flag if we have run into any issues processing the headers, thus
        # signalling that the data belongs in 'unparsed'.
        valid_encoding = True
        for h in headers:
            # It's unclear if this can return more types than just a Header or
            # a str, so we'll just assert here to make sure.
            assert isinstance(h, (email.header.Header, str))

            # If it's a header object, we need to do our little dance to get
            # the real data out of it. In cases where there is invalid data
            # we're going to end up with mojibake, but there's no obvious, good
            # way around that without reimplementing parts of the Header object
            # ourselves.
            #
            # That should be fine since, if mojibacked happens, this key is
            # going into the unparsed dict anyways.
            if isinstance(h, email.header.Header):
                # The Header object stores it's data as chunks, and each chunk
                # can be independently encoded, so we'll need to check each
                # of them.
                chunks: list[tuple[bytes, str | None]] = []
                for bin, encoding in email.header.decode_header(h):
                    try:
                        bin.decode("utf8", "strict")
                    except UnicodeDecodeError:
                        # Enable mojibake.
                        encoding = "latin1"
                        valid_encoding = False
                    else:
                        encoding = "utf8"
                    chunks.append((bin, encoding))

                # Turn our chunks back into a Header object, then let that
                # Header object do the right thing to turn them into a
                # string for us.
                value.append(str(email.header.make_header(chunks)))
            # This is already a string, so just add it.
            else:
                value.append(h)

        # We've processed all of our values to get them into a list of str,
        # but we may have mojibake data, in which case this is an unparsed
        # field.
        if not valid_encoding:
            unparsed[name] = value
            continue

        raw_name = _EMAIL_TO_RAW_MAPPING.get(name)
        if raw_name is None:
            # This is a bit of a weird situation, we've encountered a key that
            # we don't know what it means, so we don't know whether it's meant
            # to be a list or not.
            #
            # Since we can't really tell one way or another, we'll just leave it
            # as a list, even though it may be a single item list, because that's
            # what makes the most sense for email headers.
            unparsed[name] = value
            continue

        # If this is one of our string fields, then we'll check to see if our
        # value is a list of a single item. If it is then we'll assume that
        # it was emitted as a single string, and unwrap the str from inside
        # the list.
        #
        # If it's any other kind of data, then we haven't the faintest clue
        # what we should parse it as, and we have to just add it to our list
        # of unparsed stuff.
        if raw_name in _STRING_FIELDS and len(value) == 1:
            raw[raw_name] = value[0]
        # If this is one of our list of string fields, then we can just assign
        # the value, since email *only* has strings, and our get_all() call
        # above ensures that this is a list.
        elif raw_name in _LIST_FIELDS:
            raw[raw_name] = value
        # Special Case: Keywords
        # The keywords field is implemented in the metadata spec as a str,
        # but it conceptually is a list of strings, and is serialized using
        # ", ".join(keywords), so we'll do some light data massaging to turn
        # this into what it logically is.
        elif raw_name == "keywords" and len(value) == 1:
            raw[raw_name] = _parse_keywords(value[0])
        # Special Case: Project-URL
        # The project urls is implemented in the metadata spec as a list of
        # specially-formatted strings that represent a key and a value, which
        # is fundamentally a mapping, however the email format doesn't support
        # mappings in a sane way, so it was crammed into a list of strings
        # instead.
        #
        # We will do a little light data massaging to turn this into a map as
        # it logically should be.
        elif raw_name == "project_urls":
            try:
                raw[raw_name] = _parse_project_urls(value)
            except KeyError:
                unparsed[name] = value
        # Nothing that we've done has managed to parse this, so it'll just
        # throw it in our unparseable data and move on.
        else:
            unparsed[name] = value

    # We need to support getting the Description from the message payload in
    # addition to getting it from the the headers. This does mean, though, there
    # is the possibility of it being set both ways, in which case we put both
    # in 'unparsed' since we don't know which is right.
    try:
        payload = _get_payload(parsed, data)
    except ValueError:
        unparsed.setdefault("description", []).append(
            parsed.get_payload(decode=isinstance(data, bytes))
        )
    else:
        if payload:
            # Check to see if we've already got a description, if so then both
            # it, and this body move to unparseable.
            if "description" in raw:
                description_header = cast(str, raw.pop("description"))
                unparsed.setdefault("description", []).extend(
                    [description_header, payload]
                )
            elif "description" in unparsed:
                unparsed["description"].append(payload)
            else:
                raw["description"] = payload

    # We need to cast our `raw` to a metadata, because a TypedDict only support
    # literal key names, but we're computing our key names on purpose, but the
    # way this function is implemented, our `TypedDict` can only have valid key
    # names.
    return cast(RawMetadata, raw), unparsed


_NOT_FOUND = object()


# Keep the two values in sync.
_VALID_METADATA_VERSIONS = ["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"]
_MetadataVersion = Literal["1.0", "1.1", "1.2", "2.1", "2.2", "2.3"]

_REQUIRED_ATTRS = frozenset(["metadata_version", "name", "version"])


class _Validator(Generic[T]):
    """Validate a metadata field.

    All _process_*() methods correspond to a core metadata field. The method is
    called with the field's raw value. If the raw value is valid it is returned
    in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field).
    If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause
    as appropriate).
    """

    name: str
    raw_name: str
    added: _MetadataVersion

    def __init__(
        self,
        *,
        added: _MetadataVersion = "1.0",
    ) -> None:
        self.added = added

    def __set_name__(self, _owner: Metadata, name: str) -> None:
        self.name = name
        self.raw_name = _RAW_TO_EMAIL_MAPPING[name]

    def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T:
        # With Python 3.8, the caching can be replaced with functools.cached_property().
        # No need to check the cache as attribute lookup will resolve into the
        # instance's __dict__ before __get__ is called.
        cache = instance.__dict__
        value = instance._raw.get(self.name)

        # To make the _process_* methods easier, we'll check if the value is None
        # and if this field is NOT a required attribute, and if both of those
        # things are true, we'll skip the the converter. This will mean that the
        # converters never have to deal with the None union.
        if self.name in _REQUIRED_ATTRS or value is not None:
            try:
                converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}")
            except AttributeError:
                pass
            else:
                value = converter(value)

        cache[self.name] = value
        try:
            del instance._raw[self.name]  # type: ignore[misc]
        except KeyError:
            pass

        return cast(T, value)

    def _invalid_metadata(
        self, msg: str, cause: Exception | None = None
    ) -> InvalidMetadata:
        exc = InvalidMetadata(
            self.raw_name, msg.format_map({"field": repr(self.raw_name)})
        )
        exc.__cause__ = cause
        return exc

    def _process_metadata_version(self, value: str) -> _MetadataVersion:
        # Implicitly makes Metadata-Version required.
        if value not in _VALID_METADATA_VERSIONS:
            raise self._invalid_metadata(f"{value!r} is not a valid metadata version")
        return cast(_MetadataVersion, value)

    def _process_name(self, value: str) -> str:
        if not value:
            raise self._invalid_metadata("{field} is a required field")
        # Validate the name as a side-effect.
        try:
            utils.canonicalize_name(value, validate=True)
        except utils.InvalidName as exc:
            raise self._invalid_metadata(
                f"{value!r} is invalid for {{field}}", cause=exc
            )
        else:
            return value

    def _process_version(self, value: str) -> version_module.Version:
        if not value:
            raise self._invalid_metadata("{field} is a required field")
        try:
            return version_module.parse(value)
        except version_module.InvalidVersion as exc:
            raise self._invalid_metadata(
                f"{value!r} is invalid for {{field}}", cause=exc
            )

    def _process_summary(self, value: str) -> str:
        """Check the field contains no newlines."""
        if "\n" in value:
            raise self._invalid_metadata("{field} must be a single line")
        return value

    def _process_description_content_type(self, value: str) -> str:
        content_types = {"text/plain", "text/x-rst", "text/markdown"}
        message = email.message.EmailMessage()
        message["content-type"] = value

        content_type, parameters = (
            # Defaults to `text/plain` if parsing failed.
            message.get_content_type().lower(),
            message["content-type"].params,
        )
        # Check if content-type is valid or defaulted to `text/plain` and thus was
        # not parseable.
        if content_type not in content_types or content_type not in value.lower():
            raise self._invalid_metadata(
                f"{{field}} must be one of {list(content_types)}, not {value!r}"
            )

        charset = parameters.get("charset", "UTF-8")
        if charset != "UTF-8":
            raise self._invalid_metadata(
                f"{{field}} can only specify the UTF-8 charset, not {list(charset)}"
            )

        markdown_variants = {"GFM", "CommonMark"}
        variant = parameters.get("variant", "GFM")  # Use an acceptable default.
        if content_type == "text/markdown" and variant not in markdown_variants:
            raise self._invalid_metadata(
                f"valid Markdown variants for {{field}} are {list(markdown_variants)}, "
                f"not {variant!r}",
            )
        return value

    def _process_dynamic(self, value: list[str]) -> list[str]:
        for dynamic_field in map(str.lower, value):
            if dynamic_field in {"name", "version", "metadata-version"}:
                raise self._invalid_metadata(
                    f"{value!r} is not allowed as a dynamic field"
                )
            elif dynamic_field not in _EMAIL_TO_RAW_MAPPING:
                raise self._invalid_metadata(f"{value!r} is not a valid dynamic field")
        return list(map(str.lower, value))

    def _process_provides_extra(
        self,
        value: list[str],
    ) -> list[utils.NormalizedName]:
        normalized_names = []
        try:
            for name in value:
                normalized_names.append(utils.canonicalize_name(name, validate=True))
        except utils.InvalidName as exc:
            raise self._invalid_metadata(
                f"{name!r} is invalid for {{field}}", cause=exc
            )
        else:
            return normalized_names

    def _process_requires_python(self, value: str) -> specifiers.SpecifierSet:
        try:
            return specifiers.SpecifierSet(value)
        except specifiers.InvalidSpecifier as exc:
            raise self._invalid_metadata(
                f"{value!r} is invalid for {{field}}", cause=exc
            )

    def _process_requires_dist(
        self,
        value: list[str],
    ) -> list[requirements.Requirement]:
        reqs = []
        try:
            for req in value:
                reqs.append(requirements.Requirement(req))
        except requirements.InvalidRequirement as exc:
            raise self._invalid_metadata(f"{req!r} is invalid for {{field}}", cause=exc)
        else:
            return reqs


class Metadata:
    """Representation of distribution metadata.

    Compared to :class:`RawMetadata`, this class provides objects representing
    metadata fields instead of only using built-in types. Any invalid metadata
    will cause :exc:`InvalidMetadata` to be raised (with a
    :py:attr:`~BaseException.__cause__` attribute as appropriate).
    """

    _raw: RawMetadata

    @classmethod
    def from_raw(cls, data: RawMetadata, *, validate: bool = True) -> Metadata:
        """Create an instance from :class:`RawMetadata`.

        If *validate* is true, all metadata will be validated. All exceptions
        related to validation will be gathered and raised as an :class:`ExceptionGroup`.
        """
        ins = cls()
        ins._raw = data.copy()  # Mutations occur due to caching enriched values.

        if validate:
            exceptions: list[Exception] = []
            try:
                metadata_version = ins.metadata_version
                metadata_age = _VALID_METADATA_VERSIONS.index(metadata_version)
            except InvalidMetadata as metadata_version_exc:
                exceptions.append(metadata_version_exc)
                metadata_version = None

            # Make sure to check for the fields that are present, the required
            # fields (so their absence can be reported).
            fields_to_check = frozenset(ins._raw) | _REQUIRED_ATTRS
            # Remove fields that have already been checked.
            fields_to_check -= {"metadata_version"}

            for key in fields_to_check:
                try:
                    if metadata_version:
                        # Can't use getattr() as that triggers descriptor protocol which
                        # will fail due to no value for the instance argument.
                        try:
                            field_metadata_version = cls.__dict__[key].added
                        except KeyError:
                            exc = InvalidMetadata(key, f"unrecognized field: {key!r}")
                            exceptions.append(exc)
                            continue
                        field_age = _VALID_METADATA_VERSIONS.index(
                            field_metadata_version
                        )
                        if field_age > metadata_age:
                            field = _RAW_TO_EMAIL_MAPPING[key]
                            exc = InvalidMetadata(
                                field,
                                "{field} introduced in metadata version "
                                "{field_metadata_version}, not {metadata_version}",
                            )
                            exceptions.append(exc)
                            continue
                    getattr(ins, key)
                except InvalidMetadata as exc:
                    exceptions.append(exc)

            if exceptions:
                raise ExceptionGroup("invalid metadata", exceptions)

        return ins

    @classmethod
    def from_email(cls, data: bytes | str, *, validate: bool = True) -> Metadata:
        """Parse metadata from email headers.

        If *validate* is true, the metadata will be validated. All exceptions
        related to validation will be gathered and raised as an :class:`ExceptionGroup`.
        """
        raw, unparsed = parse_email(data)

        if validate:
            exceptions: list[Exception] = []
            for unparsed_key in unparsed:
                if unparsed_key in _EMAIL_TO_RAW_MAPPING:
                    message = f"{unparsed_key!r} has invalid data"
                else:
                    message = f"unrecognized field: {unparsed_key!r}"
                exceptions.append(InvalidMetadata(unparsed_key, message))

            if exceptions:
                raise ExceptionGroup("unparsed", exceptions)

        try:
            return cls.from_raw(raw, validate=validate)
        except ExceptionGroup as exc_group:
            raise ExceptionGroup(
                "invalid or unparsed metadata", exc_group.exceptions
            ) from None

    metadata_version: _Validator[_MetadataVersion] = _Validator()
    """:external:ref:`core-metadata-metadata-version`
    (required; validated to be a valid metadata version)"""
    name: _Validator[str] = _Validator()
    """:external:ref:`core-metadata-name`
    (required; validated using :func:`~packaging.utils.canonicalize_name` and its
    *validate* parameter)"""
    version: _Validator[version_module.Version] = _Validator()
    """:external:ref:`core-metadata-version` (required)"""
    dynamic: _Validator[list[str] | None] = _Validator(
        added="2.2",
    )
    """:external:ref:`core-metadata-dynamic`
    (validated against core metadata field names and lowercased)"""
    platforms: _Validator[list[str] | None] = _Validator()
    """:external:ref:`core-metadata-platform`"""
    supported_platforms: _Validator[list[str] | None] = _Validator(added="1.1")
    """:external:ref:`core-metadata-supported-platform`"""
    summary: _Validator[str | None] = _Validator()
    """:external:ref:`core-metadata-summary` (validated to contain no newlines)"""
    description: _Validator[str | None] = _Validator()  # TODO 2.1: can be in body
    """:external:ref:`core-metadata-description`"""
    description_content_type: _Validator[str | None] = _Validator(added="2.1")
    """:external:ref:`core-metadata-description-content-type` (validated)"""
    keywords: _Validator[list[str] | None] = _Validator()
    """:external:ref:`core-metadata-keywords`"""
    home_page: _Validator[str | None] = _Validator()
    """:external:ref:`core-metadata-home-page`"""
    download_url: _Validator[str | None] = _Validator(added="1.1")
    """:external:ref:`core-metadata-download-url`"""
    author: _Validator[str | None] = _Validator()
    """:external:ref:`core-metadata-author`"""
    author_email: _Validator[str | None] = _Validator()
    """:external:ref:`core-metadata-author-email`"""
    maintainer: _Validator[str | None] = _Validator(added="1.2")
    """:external:ref:`core-metadata-maintainer`"""
    maintainer_email: _Validator[str | None] = _Validator(added="1.2")
    """:external:ref:`core-metadata-maintainer-email`"""
    license: _Validator[str | None] = _Validator()
    """:external:ref:`core-metadata-license`"""
    classifiers: _Validator[list[str] | None] = _Validator(added="1.1")
    """:external:ref:`core-metadata-classifier`"""
    requires_dist: _Validator[list[requirements.Requirement] | None] = _Validator(
        added="1.2"
    )
    """:external:ref:`core-metadata-requires-dist`"""
    requires_python: _Validator[specifiers.SpecifierSet | None] = _Validator(
        added="1.2"
    )
    """:external:ref:`core-metadata-requires-python`"""
    # Because `Requires-External` allows for non-PEP 440 version specifiers, we
    # don't do any processing on the values.
    requires_external: _Validator[list[str] | None] = _Validator(added="1.2")
    """:external:ref:`core-metadata-requires-external`"""
    project_urls: _Validator[dict[str, str] | None] = _Validator(added="1.2")
    """:external:ref:`core-metadata-project-url`"""
    # PEP 685 lets us raise an error if an extra doesn't pass `Name` validation
    # regardless of metadata version.
    provides_extra: _Validator[list[utils.NormalizedName] | None] = _Validator(
        added="2.1",
    )
    """:external:ref:`core-metadata-provides-extra`"""
    provides_dist: _Validator[list[str] | None] = _Validator(added="1.2")
    """:external:ref:`core-metadata-provides-dist`"""
    obsoletes_dist: _Validator[list[str] | None] = _Validator(added="1.2")
    """:external:ref:`core-metadata-obsoletes-dist`"""
    requires: _Validator[list[str] | None] = _Validator(added="1.1")
    """``Requires`` (deprecated)"""
    provides: _Validator[list[str] | None] = _Validator(added="1.1")
    """``Provides`` (deprecated)"""
    obsoletes: _Validator[list[str] | None] = _Validator(added="1.1")
    """``Obsoletes`` (deprecated)"""