1"""Parse (absolute and relative) URLs. 2 3urlparse module is based upon the following RFC specifications. 4 5RFC 3986 (STD66): "Uniform Resource Identifiers" by T. Berners-Lee, R. Fielding 6and L. Masinter, January 2005. 7 8RFC 2732 : "Format for Literal IPv6 Addresses in URL's by R.Hinden, B.Carpenter 9and L.Masinter, December 1999. 10 11RFC 2396: "Uniform Resource Identifiers (URI)": Generic Syntax by T. 12Berners-Lee, R. Fielding, and L. Masinter, August 1998. 13 14RFC 2368: "The mailto URL scheme", by P.Hoffman , L Masinter, J. Zawinski, July 1998. 15 16RFC 1808: "Relative Uniform Resource Locators", by R. Fielding, UC Irvine, June 171995. 18 19RFC 1738: "Uniform Resource Locators (URL)" by T. Berners-Lee, L. Masinter, M. 20McCahill, December 1994 21 22RFC 3986 is considered the current standard and any future changes to 23urlparse module should conform with it. The urlparse module is 24currently not entirely compliant with this RFC due to defacto 25scenarios for parsing, and for backward compatibility purposes, some 26parsing quirks from older RFCs are retained. The testcases in 27test_urlparse.py provides a good indicator of parsing behavior. 28 29The WHATWG URL Parser spec should also be considered. We are not compliant with 30it either due to existing user code API behavior expectations (Hyrum's Law). 31It serves as a useful guide when making changes. 32""" 33 34from collections import namedtuple 35import functools 36import re 37import sys 38import types 39import warnings 40import ipaddress 41 42__all__ = ["urlparse", "urlunparse", "urljoin", "urldefrag", 43 "urlsplit", "urlunsplit", "urlencode", "parse_qs", 44 "parse_qsl", "quote", "quote_plus", "quote_from_bytes", 45 "unquote", "unquote_plus", "unquote_to_bytes", 46 "DefragResult", "ParseResult", "SplitResult", 47 "DefragResultBytes", "ParseResultBytes", "SplitResultBytes"] 48 49# A classification of schemes. 50# The empty string classifies URLs with no scheme specified, 51# being the default value returned by “urlsplit” and “urlparse”. 52 53uses_relative = ['', 'ftp', 'http', 'gopher', 'nntp', 'imap', 54 'wais', 'file', 'https', 'shttp', 'mms', 55 'prospero', 'rtsp', 'rtspu', 'sftp', 56 'svn', 'svn+ssh', 'ws', 'wss'] 57 58uses_netloc = ['', 'ftp', 'http', 'gopher', 'nntp', 'telnet', 59 'imap', 'wais', 'file', 'mms', 'https', 'shttp', 60 'snews', 'prospero', 'rtsp', 'rtspu', 'rsync', 61 'svn', 'svn+ssh', 'sftp', 'nfs', 'git', 'git+ssh', 62 'ws', 'wss'] 63 64uses_params = ['', 'ftp', 'hdl', 'prospero', 'http', 'imap', 65 'https', 'shttp', 'rtsp', 'rtspu', 'sip', 'sips', 66 'mms', 'sftp', 'tel'] 67 68# These are not actually used anymore, but should stay for backwards 69# compatibility. (They are undocumented, but have a public-looking name.) 70 71non_hierarchical = ['gopher', 'hdl', 'mailto', 'news', 72 'telnet', 'wais', 'imap', 'snews', 'sip', 'sips'] 73 74uses_query = ['', 'http', 'wais', 'imap', 'https', 'shttp', 'mms', 75 'gopher', 'rtsp', 'rtspu', 'sip', 'sips'] 76 77uses_fragment = ['', 'ftp', 'hdl', 'http', 'gopher', 'news', 78 'nntp', 'wais', 'https', 'shttp', 'snews', 79 'file', 'prospero'] 80 81# Characters valid in scheme names 82scheme_chars = ('abcdefghijklmnopqrstuvwxyz' 83 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 84 '0123456789' 85 '+-.') 86 87# Leading and trailing C0 control and space to be stripped per WHATWG spec. 88# == "".join([chr(i) for i in range(0, 0x20 + 1)]) 89_WHATWG_C0_CONTROL_OR_SPACE = '\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f ' 90 91# Unsafe bytes to be removed per WHATWG spec 92_UNSAFE_URL_BYTES_TO_REMOVE = ['\t', '\r', '\n'] 93 94def clear_cache(): 95 """Clear internal performance caches. Undocumented; some tests want it.""" 96 urlsplit.cache_clear() 97 _byte_quoter_factory.cache_clear() 98 99# Helpers for bytes handling 100# For 3.2, we deliberately require applications that 101# handle improperly quoted URLs to do their own 102# decoding and encoding. If valid use cases are 103# presented, we may relax this by using latin-1 104# decoding internally for 3.3 105_implicit_encoding = 'ascii' 106_implicit_errors = 'strict' 107 108def _noop(obj): 109 return obj 110 111def _encode_result(obj, encoding=_implicit_encoding, 112 errors=_implicit_errors): 113 return obj.encode(encoding, errors) 114 115def _decode_args(args, encoding=_implicit_encoding, 116 errors=_implicit_errors): 117 return tuple(x.decode(encoding, errors) if x else '' for x in args) 118 119def _coerce_args(*args): 120 # Invokes decode if necessary to create str args 121 # and returns the coerced inputs along with 122 # an appropriate result coercion function 123 # - noop for str inputs 124 # - encoding function otherwise 125 str_input = isinstance(args[0], str) 126 for arg in args[1:]: 127 # We special-case the empty string to support the 128 # "scheme=''" default argument to some functions 129 if arg and isinstance(arg, str) != str_input: 130 raise TypeError("Cannot mix str and non-str arguments") 131 if str_input: 132 return args + (_noop,) 133 return _decode_args(args) + (_encode_result,) 134 135# Result objects are more helpful than simple tuples 136class _ResultMixinStr(object): 137 """Standard approach to encoding parsed results from str to bytes""" 138 __slots__ = () 139 140 def encode(self, encoding='ascii', errors='strict'): 141 return self._encoded_counterpart(*(x.encode(encoding, errors) for x in self)) 142 143 144class _ResultMixinBytes(object): 145 """Standard approach to decoding parsed results from bytes to str""" 146 __slots__ = () 147 148 def decode(self, encoding='ascii', errors='strict'): 149 return self._decoded_counterpart(*(x.decode(encoding, errors) for x in self)) 150 151 152class _NetlocResultMixinBase(object): 153 """Shared methods for the parsed result objects containing a netloc element""" 154 __slots__ = () 155 156 @property 157 def username(self): 158 return self._userinfo[0] 159 160 @property 161 def password(self): 162 return self._userinfo[1] 163 164 @property 165 def hostname(self): 166 hostname = self._hostinfo[0] 167 if not hostname: 168 return None 169 # Scoped IPv6 address may have zone info, which must not be lowercased 170 # like http://[fe80::822a:a8ff:fe49:470c%tESt]:1234/keys 171 separator = '%' if isinstance(hostname, str) else b'%' 172 hostname, percent, zone = hostname.partition(separator) 173 return hostname.lower() + percent + zone 174 175 @property 176 def port(self): 177 port = self._hostinfo[1] 178 if port is not None: 179 if port.isdigit() and port.isascii(): 180 port = int(port) 181 else: 182 raise ValueError(f"Port could not be cast to integer value as {port!r}") 183 if not (0 <= port <= 65535): 184 raise ValueError("Port out of range 0-65535") 185 return port 186 187 __class_getitem__ = classmethod(types.GenericAlias) 188 189 190class _NetlocResultMixinStr(_NetlocResultMixinBase, _ResultMixinStr): 191 __slots__ = () 192 193 @property 194 def _userinfo(self): 195 netloc = self.netloc 196 userinfo, have_info, hostinfo = netloc.rpartition('@') 197 if have_info: 198 username, have_password, password = userinfo.partition(':') 199 if not have_password: 200 password = None 201 else: 202 username = password = None 203 return username, password 204 205 @property 206 def _hostinfo(self): 207 netloc = self.netloc 208 _, _, hostinfo = netloc.rpartition('@') 209 _, have_open_br, bracketed = hostinfo.partition('[') 210 if have_open_br: 211 hostname, _, port = bracketed.partition(']') 212 _, _, port = port.partition(':') 213 else: 214 hostname, _, port = hostinfo.partition(':') 215 if not port: 216 port = None 217 return hostname, port 218 219 220class _NetlocResultMixinBytes(_NetlocResultMixinBase, _ResultMixinBytes): 221 __slots__ = () 222 223 @property 224 def _userinfo(self): 225 netloc = self.netloc 226 userinfo, have_info, hostinfo = netloc.rpartition(b'@') 227 if have_info: 228 username, have_password, password = userinfo.partition(b':') 229 if not have_password: 230 password = None 231 else: 232 username = password = None 233 return username, password 234 235 @property 236 def _hostinfo(self): 237 netloc = self.netloc 238 _, _, hostinfo = netloc.rpartition(b'@') 239 _, have_open_br, bracketed = hostinfo.partition(b'[') 240 if have_open_br: 241 hostname, _, port = bracketed.partition(b']') 242 _, _, port = port.partition(b':') 243 else: 244 hostname, _, port = hostinfo.partition(b':') 245 if not port: 246 port = None 247 return hostname, port 248 249 250_DefragResultBase = namedtuple('DefragResult', 'url fragment') 251_SplitResultBase = namedtuple( 252 'SplitResult', 'scheme netloc path query fragment') 253_ParseResultBase = namedtuple( 254 'ParseResult', 'scheme netloc path params query fragment') 255 256_DefragResultBase.__doc__ = """ 257DefragResult(url, fragment) 258 259A 2-tuple that contains the url without fragment identifier and the fragment 260identifier as a separate argument. 261""" 262 263_DefragResultBase.url.__doc__ = """The URL with no fragment identifier.""" 264 265_DefragResultBase.fragment.__doc__ = """ 266Fragment identifier separated from URL, that allows indirect identification of a 267secondary resource by reference to a primary resource and additional identifying 268information. 269""" 270 271_SplitResultBase.__doc__ = """ 272SplitResult(scheme, netloc, path, query, fragment) 273 274A 5-tuple that contains the different components of a URL. Similar to 275ParseResult, but does not split params. 276""" 277 278_SplitResultBase.scheme.__doc__ = """Specifies URL scheme for the request.""" 279 280_SplitResultBase.netloc.__doc__ = """ 281Network location where the request is made to. 282""" 283 284_SplitResultBase.path.__doc__ = """ 285The hierarchical path, such as the path to a file to download. 286""" 287 288_SplitResultBase.query.__doc__ = """ 289The query component, that contains non-hierarchical data, that along with data 290in path component, identifies a resource in the scope of URI's scheme and 291network location. 292""" 293 294_SplitResultBase.fragment.__doc__ = """ 295Fragment identifier, that allows indirect identification of a secondary resource 296by reference to a primary resource and additional identifying information. 297""" 298 299_ParseResultBase.__doc__ = """ 300ParseResult(scheme, netloc, path, params, query, fragment) 301 302A 6-tuple that contains components of a parsed URL. 303""" 304 305_ParseResultBase.scheme.__doc__ = _SplitResultBase.scheme.__doc__ 306_ParseResultBase.netloc.__doc__ = _SplitResultBase.netloc.__doc__ 307_ParseResultBase.path.__doc__ = _SplitResultBase.path.__doc__ 308_ParseResultBase.params.__doc__ = """ 309Parameters for last path element used to dereference the URI in order to provide 310access to perform some operation on the resource. 311""" 312 313_ParseResultBase.query.__doc__ = _SplitResultBase.query.__doc__ 314_ParseResultBase.fragment.__doc__ = _SplitResultBase.fragment.__doc__ 315 316 317# For backwards compatibility, alias _NetlocResultMixinStr 318# ResultBase is no longer part of the documented API, but it is 319# retained since deprecating it isn't worth the hassle 320ResultBase = _NetlocResultMixinStr 321 322# Structured result objects for string data 323class DefragResult(_DefragResultBase, _ResultMixinStr): 324 __slots__ = () 325 def geturl(self): 326 if self.fragment: 327 return self.url + '#' + self.fragment 328 else: 329 return self.url 330 331class SplitResult(_SplitResultBase, _NetlocResultMixinStr): 332 __slots__ = () 333 def geturl(self): 334 return urlunsplit(self) 335 336class ParseResult(_ParseResultBase, _NetlocResultMixinStr): 337 __slots__ = () 338 def geturl(self): 339 return urlunparse(self) 340 341# Structured result objects for bytes data 342class DefragResultBytes(_DefragResultBase, _ResultMixinBytes): 343 __slots__ = () 344 def geturl(self): 345 if self.fragment: 346 return self.url + b'#' + self.fragment 347 else: 348 return self.url 349 350class SplitResultBytes(_SplitResultBase, _NetlocResultMixinBytes): 351 __slots__ = () 352 def geturl(self): 353 return urlunsplit(self) 354 355class ParseResultBytes(_ParseResultBase, _NetlocResultMixinBytes): 356 __slots__ = () 357 def geturl(self): 358 return urlunparse(self) 359 360# Set up the encode/decode result pairs 361def _fix_result_transcoding(): 362 _result_pairs = ( 363 (DefragResult, DefragResultBytes), 364 (SplitResult, SplitResultBytes), 365 (ParseResult, ParseResultBytes), 366 ) 367 for _decoded, _encoded in _result_pairs: 368 _decoded._encoded_counterpart = _encoded 369 _encoded._decoded_counterpart = _decoded 370 371_fix_result_transcoding() 372del _fix_result_transcoding 373 374def urlparse(url, scheme='', allow_fragments=True): 375 """Parse a URL into 6 components: 376 <scheme>://<netloc>/<path>;<params>?<query>#<fragment> 377 378 The result is a named 6-tuple with fields corresponding to the 379 above. It is either a ParseResult or ParseResultBytes object, 380 depending on the type of the url parameter. 381 382 The username, password, hostname, and port sub-components of netloc 383 can also be accessed as attributes of the returned object. 384 385 The scheme argument provides the default value of the scheme 386 component when no scheme is found in url. 387 388 If allow_fragments is False, no attempt is made to separate the 389 fragment component from the previous component, which can be either 390 path or query. 391 392 Note that % escapes are not expanded. 393 """ 394 url, scheme, _coerce_result = _coerce_args(url, scheme) 395 splitresult = urlsplit(url, scheme, allow_fragments) 396 scheme, netloc, url, query, fragment = splitresult 397 if scheme in uses_params and ';' in url: 398 url, params = _splitparams(url) 399 else: 400 params = '' 401 result = ParseResult(scheme, netloc, url, params, query, fragment) 402 return _coerce_result(result) 403 404def _splitparams(url): 405 if '/' in url: 406 i = url.find(';', url.rfind('/')) 407 if i < 0: 408 return url, '' 409 else: 410 i = url.find(';') 411 return url[:i], url[i+1:] 412 413def _splitnetloc(url, start=0): 414 delim = len(url) # position of end of domain part of url, default is end 415 for c in '/?#': # look for delimiters; the order is NOT important 416 wdelim = url.find(c, start) # find first of this delim 417 if wdelim >= 0: # if found 418 delim = min(delim, wdelim) # use earliest delim position 419 return url[start:delim], url[delim:] # return (domain, rest) 420 421def _checknetloc(netloc): 422 if not netloc or netloc.isascii(): 423 return 424 # looking for characters like \u2100 that expand to 'a/c' 425 # IDNA uses NFKC equivalence, so normalize for this check 426 import unicodedata 427 n = netloc.replace('@', '') # ignore characters already included 428 n = n.replace(':', '') # but not the surrounding text 429 n = n.replace('#', '') 430 n = n.replace('?', '') 431 netloc2 = unicodedata.normalize('NFKC', n) 432 if n == netloc2: 433 return 434 for c in '/?#@:': 435 if c in netloc2: 436 raise ValueError("netloc '" + netloc + "' contains invalid " + 437 "characters under NFKC normalization") 438 439# Valid bracketed hosts are defined in 440# https://www.rfc-editor.org/rfc/rfc3986#page-49 and https://url.spec.whatwg.org/ 441def _check_bracketed_host(hostname): 442 if hostname.startswith('v'): 443 if not re.match(r"\Av[a-fA-F0-9]+\..+\Z", hostname): 444 raise ValueError(f"IPvFuture address is invalid") 445 else: 446 ip = ipaddress.ip_address(hostname) # Throws Value Error if not IPv6 or IPv4 447 if isinstance(ip, ipaddress.IPv4Address): 448 raise ValueError(f"An IPv4 address cannot be in brackets") 449 450# typed=True avoids BytesWarnings being emitted during cache key 451# comparison since this API supports both bytes and str input. 452@functools.lru_cache(typed=True) 453def urlsplit(url, scheme='', allow_fragments=True): 454 """Parse a URL into 5 components: 455 <scheme>://<netloc>/<path>?<query>#<fragment> 456 457 The result is a named 5-tuple with fields corresponding to the 458 above. It is either a SplitResult or SplitResultBytes object, 459 depending on the type of the url parameter. 460 461 The username, password, hostname, and port sub-components of netloc 462 can also be accessed as attributes of the returned object. 463 464 The scheme argument provides the default value of the scheme 465 component when no scheme is found in url. 466 467 If allow_fragments is False, no attempt is made to separate the 468 fragment component from the previous component, which can be either 469 path or query. 470 471 Note that % escapes are not expanded. 472 """ 473 474 url, scheme, _coerce_result = _coerce_args(url, scheme) 475 # Only lstrip url as some applications rely on preserving trailing space. 476 # (https://url.spec.whatwg.org/#concept-basic-url-parser would strip both) 477 url = url.lstrip(_WHATWG_C0_CONTROL_OR_SPACE) 478 scheme = scheme.strip(_WHATWG_C0_CONTROL_OR_SPACE) 479 480 for b in _UNSAFE_URL_BYTES_TO_REMOVE: 481 url = url.replace(b, "") 482 scheme = scheme.replace(b, "") 483 484 allow_fragments = bool(allow_fragments) 485 netloc = query = fragment = '' 486 i = url.find(':') 487 if i > 0 and url[0].isascii() and url[0].isalpha(): 488 for c in url[:i]: 489 if c not in scheme_chars: 490 break 491 else: 492 scheme, url = url[:i].lower(), url[i+1:] 493 if url[:2] == '//': 494 netloc, url = _splitnetloc(url, 2) 495 if (('[' in netloc and ']' not in netloc) or 496 (']' in netloc and '[' not in netloc)): 497 raise ValueError("Invalid IPv6 URL") 498 if '[' in netloc and ']' in netloc: 499 bracketed_host = netloc.partition('[')[2].partition(']')[0] 500 _check_bracketed_host(bracketed_host) 501 if allow_fragments and '#' in url: 502 url, fragment = url.split('#', 1) 503 if '?' in url: 504 url, query = url.split('?', 1) 505 _checknetloc(netloc) 506 v = SplitResult(scheme, netloc, url, query, fragment) 507 return _coerce_result(v) 508 509def urlunparse(components): 510 """Put a parsed URL back together again. This may result in a 511 slightly different, but equivalent URL, if the URL that was parsed 512 originally had redundant delimiters, e.g. a ? with an empty query 513 (the draft states that these are equivalent).""" 514 scheme, netloc, url, params, query, fragment, _coerce_result = ( 515 _coerce_args(*components)) 516 if params: 517 url = "%s;%s" % (url, params) 518 return _coerce_result(urlunsplit((scheme, netloc, url, query, fragment))) 519 520def urlunsplit(components): 521 """Combine the elements of a tuple as returned by urlsplit() into a 522 complete URL as a string. The data argument can be any five-item iterable. 523 This may result in a slightly different, but equivalent URL, if the URL that 524 was parsed originally had unnecessary delimiters (for example, a ? with an 525 empty query; the RFC states that these are equivalent).""" 526 scheme, netloc, url, query, fragment, _coerce_result = ( 527 _coerce_args(*components)) 528 if netloc or (scheme and scheme in uses_netloc and url[:2] != '//'): 529 if url and url[:1] != '/': url = '/' + url 530 url = '//' + (netloc or '') + url 531 if scheme: 532 url = scheme + ':' + url 533 if query: 534 url = url + '?' + query 535 if fragment: 536 url = url + '#' + fragment 537 return _coerce_result(url) 538 539def urljoin(base, url, allow_fragments=True): 540 """Join a base URL and a possibly relative URL to form an absolute 541 interpretation of the latter.""" 542 if not base: 543 return url 544 if not url: 545 return base 546 547 base, url, _coerce_result = _coerce_args(base, url) 548 bscheme, bnetloc, bpath, bparams, bquery, bfragment = \ 549 urlparse(base, '', allow_fragments) 550 scheme, netloc, path, params, query, fragment = \ 551 urlparse(url, bscheme, allow_fragments) 552 553 if scheme != bscheme or scheme not in uses_relative: 554 return _coerce_result(url) 555 if scheme in uses_netloc: 556 if netloc: 557 return _coerce_result(urlunparse((scheme, netloc, path, 558 params, query, fragment))) 559 netloc = bnetloc 560 561 if not path and not params: 562 path = bpath 563 params = bparams 564 if not query: 565 query = bquery 566 return _coerce_result(urlunparse((scheme, netloc, path, 567 params, query, fragment))) 568 569 base_parts = bpath.split('/') 570 if base_parts[-1] != '': 571 # the last item is not a directory, so will not be taken into account 572 # in resolving the relative path 573 del base_parts[-1] 574 575 # for rfc3986, ignore all base path should the first character be root. 576 if path[:1] == '/': 577 segments = path.split('/') 578 else: 579 segments = base_parts + path.split('/') 580 # filter out elements that would cause redundant slashes on re-joining 581 # the resolved_path 582 segments[1:-1] = filter(None, segments[1:-1]) 583 584 resolved_path = [] 585 586 for seg in segments: 587 if seg == '..': 588 try: 589 resolved_path.pop() 590 except IndexError: 591 # ignore any .. segments that would otherwise cause an IndexError 592 # when popped from resolved_path if resolving for rfc3986 593 pass 594 elif seg == '.': 595 continue 596 else: 597 resolved_path.append(seg) 598 599 if segments[-1] in ('.', '..'): 600 # do some post-processing here. if the last segment was a relative dir, 601 # then we need to append the trailing '/' 602 resolved_path.append('') 603 604 return _coerce_result(urlunparse((scheme, netloc, '/'.join( 605 resolved_path) or '/', params, query, fragment))) 606 607 608def urldefrag(url): 609 """Removes any existing fragment from URL. 610 611 Returns a tuple of the defragmented URL and the fragment. If 612 the URL contained no fragments, the second element is the 613 empty string. 614 """ 615 url, _coerce_result = _coerce_args(url) 616 if '#' in url: 617 s, n, p, a, q, frag = urlparse(url) 618 defrag = urlunparse((s, n, p, a, q, '')) 619 else: 620 frag = '' 621 defrag = url 622 return _coerce_result(DefragResult(defrag, frag)) 623 624_hexdig = '0123456789ABCDEFabcdef' 625_hextobyte = None 626 627def unquote_to_bytes(string): 628 """unquote_to_bytes('abc%20def') -> b'abc def'.""" 629 # Note: strings are encoded as UTF-8. This is only an issue if it contains 630 # unescaped non-ASCII characters, which URIs should not. 631 if not string: 632 # Is it a string-like object? 633 string.split 634 return b'' 635 if isinstance(string, str): 636 string = string.encode('utf-8') 637 bits = string.split(b'%') 638 if len(bits) == 1: 639 return string 640 res = [bits[0]] 641 append = res.append 642 # Delay the initialization of the table to not waste memory 643 # if the function is never called 644 global _hextobyte 645 if _hextobyte is None: 646 _hextobyte = {(a + b).encode(): bytes.fromhex(a + b) 647 for a in _hexdig for b in _hexdig} 648 for item in bits[1:]: 649 try: 650 append(_hextobyte[item[:2]]) 651 append(item[2:]) 652 except KeyError: 653 append(b'%') 654 append(item) 655 return b''.join(res) 656 657_asciire = re.compile('([\x00-\x7f]+)') 658 659def unquote(string, encoding='utf-8', errors='replace'): 660 """Replace %xx escapes by their single-character equivalent. The optional 661 encoding and errors parameters specify how to decode percent-encoded 662 sequences into Unicode characters, as accepted by the bytes.decode() 663 method. 664 By default, percent-encoded sequences are decoded with UTF-8, and invalid 665 sequences are replaced by a placeholder character. 666 667 unquote('abc%20def') -> 'abc def'. 668 """ 669 if isinstance(string, bytes): 670 return unquote_to_bytes(string).decode(encoding, errors) 671 if '%' not in string: 672 string.split 673 return string 674 if encoding is None: 675 encoding = 'utf-8' 676 if errors is None: 677 errors = 'replace' 678 bits = _asciire.split(string) 679 res = [bits[0]] 680 append = res.append 681 for i in range(1, len(bits), 2): 682 append(unquote_to_bytes(bits[i]).decode(encoding, errors)) 683 append(bits[i + 1]) 684 return ''.join(res) 685 686 687def parse_qs(qs, keep_blank_values=False, strict_parsing=False, 688 encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): 689 """Parse a query given as a string argument. 690 691 Arguments: 692 693 qs: percent-encoded query string to be parsed 694 695 keep_blank_values: flag indicating whether blank values in 696 percent-encoded queries should be treated as blank strings. 697 A true value indicates that blanks should be retained as 698 blank strings. The default false value indicates that 699 blank values are to be ignored and treated as if they were 700 not included. 701 702 strict_parsing: flag indicating what to do with parsing errors. 703 If false (the default), errors are silently ignored. 704 If true, errors raise a ValueError exception. 705 706 encoding and errors: specify how to decode percent-encoded sequences 707 into Unicode characters, as accepted by the bytes.decode() method. 708 709 max_num_fields: int. If set, then throws a ValueError if there 710 are more than n fields read by parse_qsl(). 711 712 separator: str. The symbol to use for separating the query arguments. 713 Defaults to &. 714 715 Returns a dictionary. 716 """ 717 parsed_result = {} 718 pairs = parse_qsl(qs, keep_blank_values, strict_parsing, 719 encoding=encoding, errors=errors, 720 max_num_fields=max_num_fields, separator=separator) 721 for name, value in pairs: 722 if name in parsed_result: 723 parsed_result[name].append(value) 724 else: 725 parsed_result[name] = [value] 726 return parsed_result 727 728 729def parse_qsl(qs, keep_blank_values=False, strict_parsing=False, 730 encoding='utf-8', errors='replace', max_num_fields=None, separator='&'): 731 """Parse a query given as a string argument. 732 733 Arguments: 734 735 qs: percent-encoded query string to be parsed 736 737 keep_blank_values: flag indicating whether blank values in 738 percent-encoded queries should be treated as blank strings. 739 A true value indicates that blanks should be retained as blank 740 strings. The default false value indicates that blank values 741 are to be ignored and treated as if they were not included. 742 743 strict_parsing: flag indicating what to do with parsing errors. If 744 false (the default), errors are silently ignored. If true, 745 errors raise a ValueError exception. 746 747 encoding and errors: specify how to decode percent-encoded sequences 748 into Unicode characters, as accepted by the bytes.decode() method. 749 750 max_num_fields: int. If set, then throws a ValueError 751 if there are more than n fields read by parse_qsl(). 752 753 separator: str. The symbol to use for separating the query arguments. 754 Defaults to &. 755 756 Returns a list, as G-d intended. 757 """ 758 qs, _coerce_result = _coerce_args(qs) 759 separator, _ = _coerce_args(separator) 760 761 if not separator or (not isinstance(separator, (str, bytes))): 762 raise ValueError("Separator must be of type string or bytes.") 763 764 # If max_num_fields is defined then check that the number of fields 765 # is less than max_num_fields. This prevents a memory exhaustion DOS 766 # attack via post bodies with many fields. 767 if max_num_fields is not None: 768 num_fields = 1 + qs.count(separator) if qs else 0 769 if max_num_fields < num_fields: 770 raise ValueError('Max number of fields exceeded') 771 772 r = [] 773 query_args = qs.split(separator) if qs else [] 774 for name_value in query_args: 775 if not name_value and not strict_parsing: 776 continue 777 nv = name_value.split('=', 1) 778 if len(nv) != 2: 779 if strict_parsing: 780 raise ValueError("bad query field: %r" % (name_value,)) 781 # Handle case of a control-name with no equal sign 782 if keep_blank_values: 783 nv.append('') 784 else: 785 continue 786 if len(nv[1]) or keep_blank_values: 787 name = nv[0].replace('+', ' ') 788 name = unquote(name, encoding=encoding, errors=errors) 789 name = _coerce_result(name) 790 value = nv[1].replace('+', ' ') 791 value = unquote(value, encoding=encoding, errors=errors) 792 value = _coerce_result(value) 793 r.append((name, value)) 794 return r 795 796def unquote_plus(string, encoding='utf-8', errors='replace'): 797 """Like unquote(), but also replace plus signs by spaces, as required for 798 unquoting HTML form values. 799 800 unquote_plus('%7e/abc+def') -> '~/abc def' 801 """ 802 string = string.replace('+', ' ') 803 return unquote(string, encoding, errors) 804 805_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 806 b'abcdefghijklmnopqrstuvwxyz' 807 b'0123456789' 808 b'_.-~') 809_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE) 810 811def __getattr__(name): 812 if name == 'Quoter': 813 warnings.warn('Deprecated in 3.11. ' 814 'urllib.parse.Quoter will be removed in Python 3.14. ' 815 'It was not intended to be a public API.', 816 DeprecationWarning, stacklevel=2) 817 return _Quoter 818 raise AttributeError(f'module {__name__!r} has no attribute {name!r}') 819 820class _Quoter(dict): 821 """A mapping from bytes numbers (in range(0,256)) to strings. 822 823 String values are percent-encoded byte values, unless the key < 128, and 824 in either of the specified safe set, or the always safe set. 825 """ 826 # Keeps a cache internally, via __missing__, for efficiency (lookups 827 # of cached keys don't call Python code at all). 828 def __init__(self, safe): 829 """safe: bytes object.""" 830 self.safe = _ALWAYS_SAFE.union(safe) 831 832 def __repr__(self): 833 return f"<Quoter {dict(self)!r}>" 834 835 def __missing__(self, b): 836 # Handle a cache miss. Store quoted string in cache and return. 837 res = chr(b) if b in self.safe else '%{:02X}'.format(b) 838 self[b] = res 839 return res 840 841def quote(string, safe='/', encoding=None, errors=None): 842 """quote('abc def') -> 'abc%20def' 843 844 Each part of a URL, e.g. the path info, the query, etc., has a 845 different set of reserved characters that must be quoted. The 846 quote function offers a cautious (not minimal) way to quote a 847 string for most of these parts. 848 849 RFC 3986 Uniform Resource Identifier (URI): Generic Syntax lists 850 the following (un)reserved characters. 851 852 unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" 853 reserved = gen-delims / sub-delims 854 gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@" 855 sub-delims = "!" / "$" / "&" / "'" / "(" / ")" 856 / "*" / "+" / "," / ";" / "=" 857 858 Each of the reserved characters is reserved in some component of a URL, 859 but not necessarily in all of them. 860 861 The quote function %-escapes all characters that are neither in the 862 unreserved chars ("always safe") nor the additional chars set via the 863 safe arg. 864 865 The default for the safe arg is '/'. The character is reserved, but in 866 typical usage the quote function is being called on a path where the 867 existing slash characters are to be preserved. 868 869 Python 3.7 updates from using RFC 2396 to RFC 3986 to quote URL strings. 870 Now, "~" is included in the set of unreserved characters. 871 872 string and safe may be either str or bytes objects. encoding and errors 873 must not be specified if string is a bytes object. 874 875 The optional encoding and errors parameters specify how to deal with 876 non-ASCII characters, as accepted by the str.encode method. 877 By default, encoding='utf-8' (characters are encoded with UTF-8), and 878 errors='strict' (unsupported characters raise a UnicodeEncodeError). 879 """ 880 if isinstance(string, str): 881 if not string: 882 return string 883 if encoding is None: 884 encoding = 'utf-8' 885 if errors is None: 886 errors = 'strict' 887 string = string.encode(encoding, errors) 888 else: 889 if encoding is not None: 890 raise TypeError("quote() doesn't support 'encoding' for bytes") 891 if errors is not None: 892 raise TypeError("quote() doesn't support 'errors' for bytes") 893 return quote_from_bytes(string, safe) 894 895def quote_plus(string, safe='', encoding=None, errors=None): 896 """Like quote(), but also replace ' ' with '+', as required for quoting 897 HTML form values. Plus signs in the original string are escaped unless 898 they are included in safe. It also does not have safe default to '/'. 899 """ 900 # Check if ' ' in string, where string may either be a str or bytes. If 901 # there are no spaces, the regular quote will produce the right answer. 902 if ((isinstance(string, str) and ' ' not in string) or 903 (isinstance(string, bytes) and b' ' not in string)): 904 return quote(string, safe, encoding, errors) 905 if isinstance(safe, str): 906 space = ' ' 907 else: 908 space = b' ' 909 string = quote(string, safe + space, encoding, errors) 910 return string.replace(' ', '+') 911 912# Expectation: A typical program is unlikely to create more than 5 of these. 913@functools.lru_cache 914def _byte_quoter_factory(safe): 915 return _Quoter(safe).__getitem__ 916 917def quote_from_bytes(bs, safe='/'): 918 """Like quote(), but accepts a bytes object rather than a str, and does 919 not perform string-to-bytes encoding. It always returns an ASCII string. 920 quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f' 921 """ 922 if not isinstance(bs, (bytes, bytearray)): 923 raise TypeError("quote_from_bytes() expected bytes") 924 if not bs: 925 return '' 926 if isinstance(safe, str): 927 # Normalize 'safe' by converting to bytes and removing non-ASCII chars 928 safe = safe.encode('ascii', 'ignore') 929 else: 930 # List comprehensions are faster than generator expressions. 931 safe = bytes([c for c in safe if c < 128]) 932 if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe): 933 return bs.decode() 934 quoter = _byte_quoter_factory(safe) 935 return ''.join([quoter(char) for char in bs]) 936 937def urlencode(query, doseq=False, safe='', encoding=None, errors=None, 938 quote_via=quote_plus): 939 """Encode a dict or sequence of two-element tuples into a URL query string. 940 941 If any values in the query arg are sequences and doseq is true, each 942 sequence element is converted to a separate parameter. 943 944 If the query arg is a sequence of two-element tuples, the order of the 945 parameters in the output will match the order of parameters in the 946 input. 947 948 The components of a query arg may each be either a string or a bytes type. 949 950 The safe, encoding, and errors parameters are passed down to the function 951 specified by quote_via (encoding and errors only if a component is a str). 952 """ 953 954 if hasattr(query, "items"): 955 query = query.items() 956 else: 957 # It's a bother at times that strings and string-like objects are 958 # sequences. 959 try: 960 # non-sequence items should not work with len() 961 # non-empty strings will fail this 962 if len(query) and not isinstance(query[0], tuple): 963 raise TypeError 964 # Zero-length sequences of all types will get here and succeed, 965 # but that's a minor nit. Since the original implementation 966 # allowed empty dicts that type of behavior probably should be 967 # preserved for consistency 968 except TypeError as err: 969 raise TypeError("not a valid non-string sequence " 970 "or mapping object") from err 971 972 l = [] 973 if not doseq: 974 for k, v in query: 975 if isinstance(k, bytes): 976 k = quote_via(k, safe) 977 else: 978 k = quote_via(str(k), safe, encoding, errors) 979 980 if isinstance(v, bytes): 981 v = quote_via(v, safe) 982 else: 983 v = quote_via(str(v), safe, encoding, errors) 984 l.append(k + '=' + v) 985 else: 986 for k, v in query: 987 if isinstance(k, bytes): 988 k = quote_via(k, safe) 989 else: 990 k = quote_via(str(k), safe, encoding, errors) 991 992 if isinstance(v, bytes): 993 v = quote_via(v, safe) 994 l.append(k + '=' + v) 995 elif isinstance(v, str): 996 v = quote_via(v, safe, encoding, errors) 997 l.append(k + '=' + v) 998 else: 999 try: 1000 # Is this a sufficient test for sequence-ness? 1001 x = len(v) 1002 except TypeError: 1003 # not a sequence 1004 v = quote_via(str(v), safe, encoding, errors) 1005 l.append(k + '=' + v) 1006 else: 1007 # loop over the sequence 1008 for elt in v: 1009 if isinstance(elt, bytes): 1010 elt = quote_via(elt, safe) 1011 else: 1012 elt = quote_via(str(elt), safe, encoding, errors) 1013 l.append(k + '=' + elt) 1014 return '&'.join(l) 1015 1016 1017def to_bytes(url): 1018 warnings.warn("urllib.parse.to_bytes() is deprecated as of 3.8", 1019 DeprecationWarning, stacklevel=2) 1020 return _to_bytes(url) 1021 1022 1023def _to_bytes(url): 1024 """to_bytes(u"URL") --> 'URL'.""" 1025 # Most URL schemes require ASCII. If that changes, the conversion 1026 # can be relaxed. 1027 # XXX get rid of to_bytes() 1028 if isinstance(url, str): 1029 try: 1030 url = url.encode("ASCII").decode() 1031 except UnicodeError: 1032 raise UnicodeError("URL " + repr(url) + 1033 " contains non-ASCII characters") 1034 return url 1035 1036 1037def unwrap(url): 1038 """Transform a string like '<URL:scheme://host/path>' into 'scheme://host/path'. 1039 1040 The string is returned unchanged if it's not a wrapped URL. 1041 """ 1042 url = str(url).strip() 1043 if url[:1] == '<' and url[-1:] == '>': 1044 url = url[1:-1].strip() 1045 if url[:4] == 'URL:': 1046 url = url[4:].strip() 1047 return url 1048 1049 1050def splittype(url): 1051 warnings.warn("urllib.parse.splittype() is deprecated as of 3.8, " 1052 "use urllib.parse.urlparse() instead", 1053 DeprecationWarning, stacklevel=2) 1054 return _splittype(url) 1055 1056 1057_typeprog = None 1058def _splittype(url): 1059 """splittype('type:opaquestring') --> 'type', 'opaquestring'.""" 1060 global _typeprog 1061 if _typeprog is None: 1062 _typeprog = re.compile('([^/:]+):(.*)', re.DOTALL) 1063 1064 match = _typeprog.match(url) 1065 if match: 1066 scheme, data = match.groups() 1067 return scheme.lower(), data 1068 return None, url 1069 1070 1071def splithost(url): 1072 warnings.warn("urllib.parse.splithost() is deprecated as of 3.8, " 1073 "use urllib.parse.urlparse() instead", 1074 DeprecationWarning, stacklevel=2) 1075 return _splithost(url) 1076 1077 1078_hostprog = None 1079def _splithost(url): 1080 """splithost('//host[:port]/path') --> 'host[:port]', '/path'.""" 1081 global _hostprog 1082 if _hostprog is None: 1083 _hostprog = re.compile('//([^/#?]*)(.*)', re.DOTALL) 1084 1085 match = _hostprog.match(url) 1086 if match: 1087 host_port, path = match.groups() 1088 if path and path[0] != '/': 1089 path = '/' + path 1090 return host_port, path 1091 return None, url 1092 1093 1094def splituser(host): 1095 warnings.warn("urllib.parse.splituser() is deprecated as of 3.8, " 1096 "use urllib.parse.urlparse() instead", 1097 DeprecationWarning, stacklevel=2) 1098 return _splituser(host) 1099 1100 1101def _splituser(host): 1102 """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" 1103 user, delim, host = host.rpartition('@') 1104 return (user if delim else None), host 1105 1106 1107def splitpasswd(user): 1108 warnings.warn("urllib.parse.splitpasswd() is deprecated as of 3.8, " 1109 "use urllib.parse.urlparse() instead", 1110 DeprecationWarning, stacklevel=2) 1111 return _splitpasswd(user) 1112 1113 1114def _splitpasswd(user): 1115 """splitpasswd('user:passwd') -> 'user', 'passwd'.""" 1116 user, delim, passwd = user.partition(':') 1117 return user, (passwd if delim else None) 1118 1119 1120def splitport(host): 1121 warnings.warn("urllib.parse.splitport() is deprecated as of 3.8, " 1122 "use urllib.parse.urlparse() instead", 1123 DeprecationWarning, stacklevel=2) 1124 return _splitport(host) 1125 1126 1127# splittag('/path#tag') --> '/path', 'tag' 1128_portprog = None 1129def _splitport(host): 1130 """splitport('host:port') --> 'host', 'port'.""" 1131 global _portprog 1132 if _portprog is None: 1133 _portprog = re.compile('(.*):([0-9]*)', re.DOTALL) 1134 1135 match = _portprog.fullmatch(host) 1136 if match: 1137 host, port = match.groups() 1138 if port: 1139 return host, port 1140 return host, None 1141 1142 1143def splitnport(host, defport=-1): 1144 warnings.warn("urllib.parse.splitnport() is deprecated as of 3.8, " 1145 "use urllib.parse.urlparse() instead", 1146 DeprecationWarning, stacklevel=2) 1147 return _splitnport(host, defport) 1148 1149 1150def _splitnport(host, defport=-1): 1151 """Split host and port, returning numeric port. 1152 Return given default port if no ':' found; defaults to -1. 1153 Return numerical port if a valid number is found after ':'. 1154 Return None if ':' but not a valid number.""" 1155 host, delim, port = host.rpartition(':') 1156 if not delim: 1157 host = port 1158 elif port: 1159 if port.isdigit() and port.isascii(): 1160 nport = int(port) 1161 else: 1162 nport = None 1163 return host, nport 1164 return host, defport 1165 1166 1167def splitquery(url): 1168 warnings.warn("urllib.parse.splitquery() is deprecated as of 3.8, " 1169 "use urllib.parse.urlparse() instead", 1170 DeprecationWarning, stacklevel=2) 1171 return _splitquery(url) 1172 1173 1174def _splitquery(url): 1175 """splitquery('/path?query') --> '/path', 'query'.""" 1176 path, delim, query = url.rpartition('?') 1177 if delim: 1178 return path, query 1179 return url, None 1180 1181 1182def splittag(url): 1183 warnings.warn("urllib.parse.splittag() is deprecated as of 3.8, " 1184 "use urllib.parse.urlparse() instead", 1185 DeprecationWarning, stacklevel=2) 1186 return _splittag(url) 1187 1188 1189def _splittag(url): 1190 """splittag('/path#tag') --> '/path', 'tag'.""" 1191 path, delim, tag = url.rpartition('#') 1192 if delim: 1193 return path, tag 1194 return url, None 1195 1196 1197def splitattr(url): 1198 warnings.warn("urllib.parse.splitattr() is deprecated as of 3.8, " 1199 "use urllib.parse.urlparse() instead", 1200 DeprecationWarning, stacklevel=2) 1201 return _splitattr(url) 1202 1203 1204def _splitattr(url): 1205 """splitattr('/path;attr1=value1;attr2=value2;...') -> 1206 '/path', ['attr1=value1', 'attr2=value2', ...].""" 1207 words = url.split(';') 1208 return words[0], words[1:] 1209 1210 1211def splitvalue(attr): 1212 warnings.warn("urllib.parse.splitvalue() is deprecated as of 3.8, " 1213 "use urllib.parse.parse_qsl() instead", 1214 DeprecationWarning, stacklevel=2) 1215 return _splitvalue(attr) 1216 1217 1218def _splitvalue(attr): 1219 """splitvalue('attr=value') --> 'attr', 'value'.""" 1220 attr, delim, value = attr.partition('=') 1221 return attr, (value if delim else None) 1222