fix: 포트 충돌 회피 — note_bridge 8098, intent_service 8099
Jellyfin(8096), OrbStack(8097) 포트 충돌으로 변경. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2020 TAHRI Ahmed R.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -0,0 +1,157 @@
|
||||
"""
|
||||
Kiss-Headers
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
Kiss-Headers is a headers, HTTP or IMAP4 _(message, email)_ flavour, utility, written in pure Python, for humans.
|
||||
Object oriented headers. Keep it sweet and simple.
|
||||
Basic usage:
|
||||
|
||||
>>> import requests
|
||||
>>> from kiss_headers import parse_it
|
||||
>>> r = requests.get('https://www.python.org')
|
||||
>>> headers = parse_it(r)
|
||||
>>> 'charset' in headers.content_type
|
||||
True
|
||||
>>> headers.content_type.charset
|
||||
'utf-8'
|
||||
>>> 'text/html' in headers.content_type
|
||||
True
|
||||
>>> headers.content_type == 'text/html'
|
||||
True
|
||||
>>> headers -= 'content-type'
|
||||
>>> 'Content-Type' in headers
|
||||
False
|
||||
|
||||
... or from a raw IMAP4 message:
|
||||
|
||||
>>> message = requests.get("https://gist.githubusercontent.com/Ousret/8b84b736c375bb6aa3d389e86b5116ec/raw/21cb2f7af865e401c37d9b053fb6fe1abf63165b/sample-message.eml").content
|
||||
>>> headers = parse_it(message)
|
||||
>>> 'Sender' in headers
|
||||
True
|
||||
|
||||
Others methods and usages are available - see the full documentation
|
||||
at <https://github.com/jawah/kiss-headers>.
|
||||
|
||||
:copyright: (c) 2020 by Ahmed TAHRI
|
||||
:license: MIT, see LICENSE for more details.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .api import dumps, explain, get_polymorphic, parse_it
|
||||
from .builder import (
|
||||
Accept,
|
||||
AcceptEncoding,
|
||||
AcceptLanguage,
|
||||
Allow,
|
||||
AltSvc,
|
||||
Authorization,
|
||||
BasicAuthorization,
|
||||
CacheControl,
|
||||
Connection,
|
||||
ContentDisposition,
|
||||
ContentEncoding,
|
||||
ContentLength,
|
||||
ContentRange,
|
||||
ContentSecurityPolicy,
|
||||
ContentType,
|
||||
CrossOriginResourcePolicy,
|
||||
CustomHeader,
|
||||
Date,
|
||||
Digest,
|
||||
Dnt,
|
||||
Etag,
|
||||
Expires,
|
||||
Forwarded,
|
||||
From,
|
||||
Host,
|
||||
IfMatch,
|
||||
IfModifiedSince,
|
||||
IfNoneMatch,
|
||||
IfUnmodifiedSince,
|
||||
KeepAlive,
|
||||
LastModified,
|
||||
Location,
|
||||
ProxyAuthorization,
|
||||
Referer,
|
||||
ReferrerPolicy,
|
||||
RetryAfter,
|
||||
Server,
|
||||
SetCookie,
|
||||
StrictTransportSecurity,
|
||||
TransferEncoding,
|
||||
UpgradeInsecureRequests,
|
||||
UserAgent,
|
||||
Vary,
|
||||
WwwAuthenticate,
|
||||
XContentTypeOptions,
|
||||
XDnsPrefetchControl,
|
||||
XFrameOptions,
|
||||
XXssProtection,
|
||||
)
|
||||
from .models import Attributes, Header, Headers, lock_output_type
|
||||
from .serializer import decode, encode
|
||||
from .version import VERSION, __version__
|
||||
|
||||
__all__ = (
|
||||
"dumps",
|
||||
"explain",
|
||||
"get_polymorphic",
|
||||
"parse_it",
|
||||
"Attributes",
|
||||
"Header",
|
||||
"Headers",
|
||||
"lock_output_type",
|
||||
"decode",
|
||||
"encode",
|
||||
"VERSION",
|
||||
"__version__",
|
||||
"Accept",
|
||||
"AcceptEncoding",
|
||||
"AcceptLanguage",
|
||||
"Allow",
|
||||
"AltSvc",
|
||||
"Authorization",
|
||||
"BasicAuthorization",
|
||||
"CacheControl",
|
||||
"Connection",
|
||||
"ContentDisposition",
|
||||
"ContentEncoding",
|
||||
"ContentLength",
|
||||
"ContentRange",
|
||||
"ContentSecurityPolicy",
|
||||
"ContentType",
|
||||
"CrossOriginResourcePolicy",
|
||||
"CustomHeader",
|
||||
"Date",
|
||||
"Digest",
|
||||
"Dnt",
|
||||
"Etag",
|
||||
"Expires",
|
||||
"Forwarded",
|
||||
"From",
|
||||
"Host",
|
||||
"IfMatch",
|
||||
"IfModifiedSince",
|
||||
"IfNoneMatch",
|
||||
"IfUnmodifiedSince",
|
||||
"KeepAlive",
|
||||
"LastModified",
|
||||
"Location",
|
||||
"ProxyAuthorization",
|
||||
"Referer",
|
||||
"ReferrerPolicy",
|
||||
"RetryAfter",
|
||||
"Server",
|
||||
"SetCookie",
|
||||
"StrictTransportSecurity",
|
||||
"TransferEncoding",
|
||||
"UpgradeInsecureRequests",
|
||||
"UserAgent",
|
||||
"Vary",
|
||||
"WwwAuthenticate",
|
||||
"XContentTypeOptions",
|
||||
"XDnsPrefetchControl",
|
||||
"XFrameOptions",
|
||||
"XXssProtection",
|
||||
)
|
||||
@@ -0,0 +1,209 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from copy import deepcopy
|
||||
from email.message import Message
|
||||
from email.parser import HeaderParser
|
||||
from io import BufferedReader, RawIOBase
|
||||
from json import dumps as json_dumps
|
||||
from json import loads as json_loads
|
||||
from typing import Any, Iterable, Mapping, TypeVar
|
||||
|
||||
from .builder import CustomHeader
|
||||
from .models import Header, Headers
|
||||
from .serializer import decode, encode
|
||||
from .structures import CaseInsensitiveDict
|
||||
from .utils import (
|
||||
class_to_header_name,
|
||||
decode_partials,
|
||||
extract_class_name,
|
||||
extract_encoded_headers,
|
||||
header_content_split,
|
||||
header_name_to_class,
|
||||
is_content_json_object,
|
||||
is_legal_header_name,
|
||||
normalize_str,
|
||||
transform_possible_encoded,
|
||||
)
|
||||
|
||||
T = TypeVar("T", bound=CustomHeader, covariant=True)
|
||||
|
||||
|
||||
def parse_it(raw_headers: Any) -> Headers:
|
||||
"""
|
||||
Just decode anything that could contain headers. That simple PERIOD.
|
||||
If passed with a Headers instance, return a deep copy of it.
|
||||
:param raw_headers: Accept bytes, str, fp, dict, JSON, email.Message, requests.Response, niquests.Response, urllib3.HTTPResponse and httpx.Response.
|
||||
:raises:
|
||||
TypeError: If passed argument cannot be parsed to extract headers from it.
|
||||
"""
|
||||
|
||||
if isinstance(raw_headers, Headers):
|
||||
return deepcopy(raw_headers)
|
||||
|
||||
headers: Iterable[tuple[str | bytes, str | bytes]] | None = None
|
||||
|
||||
if isinstance(raw_headers, str):
|
||||
if raw_headers.startswith("{") and raw_headers.endswith("}"):
|
||||
return decode(json_loads(raw_headers))
|
||||
headers = HeaderParser().parsestr(raw_headers, headersonly=True).items()
|
||||
elif (
|
||||
isinstance(raw_headers, bytes)
|
||||
or isinstance(raw_headers, RawIOBase)
|
||||
or isinstance(raw_headers, BufferedReader)
|
||||
):
|
||||
decoded, not_decoded = extract_encoded_headers(
|
||||
raw_headers if isinstance(raw_headers, bytes) else raw_headers.read() or b""
|
||||
)
|
||||
return parse_it(decoded)
|
||||
elif isinstance(raw_headers, Mapping) or isinstance(raw_headers, Message):
|
||||
headers = raw_headers.items()
|
||||
else:
|
||||
r = extract_class_name(type(raw_headers))
|
||||
|
||||
if r:
|
||||
if r in [
|
||||
"requests.models.Response",
|
||||
"niquests.models.Response",
|
||||
"niquests.models.AsyncResponse",
|
||||
]:
|
||||
headers = []
|
||||
for header_name in raw_headers.raw.headers:
|
||||
for header_content in raw_headers.raw.headers.getlist(header_name):
|
||||
headers.append((header_name, header_content))
|
||||
elif r in [
|
||||
"httpx._models.Response",
|
||||
"urllib3.response.HTTPResponse",
|
||||
"urllib3._async.response.AsyncHTTPResponse",
|
||||
"urllib3_future.response.HTTPResponse",
|
||||
"urllib3_future._async.response.AsyncHTTPResponse",
|
||||
]: # pragma: no cover
|
||||
headers = raw_headers.headers.items()
|
||||
|
||||
if headers is None:
|
||||
raise TypeError( # pragma: no cover
|
||||
f"Cannot parse type {type(raw_headers)} as it is not supported by kiss-header."
|
||||
)
|
||||
|
||||
revised_headers: list[tuple[str, str]] = decode_partials(
|
||||
transform_possible_encoded(headers)
|
||||
)
|
||||
|
||||
# Sometime raw content does not begin with headers. If that is the case, search for the next line.
|
||||
if (
|
||||
len(revised_headers) == 0
|
||||
and len(raw_headers) > 0
|
||||
and (isinstance(raw_headers, bytes) or isinstance(raw_headers, str))
|
||||
):
|
||||
next_iter = raw_headers.split(
|
||||
b"\n" if isinstance(raw_headers, bytes) else "\n", # type: ignore[arg-type]
|
||||
maxsplit=1,
|
||||
)
|
||||
|
||||
if len(next_iter) >= 2:
|
||||
return parse_it(next_iter[-1])
|
||||
|
||||
# Prepare Header objects
|
||||
list_of_headers: list[Header] = []
|
||||
|
||||
for head, content in revised_headers:
|
||||
# We should ignore when a illegal name is considered as an header. We avoid ValueError (in __init__ of Header)
|
||||
if is_legal_header_name(head) is False:
|
||||
continue
|
||||
|
||||
is_json_obj: bool = is_content_json_object(content)
|
||||
entries: list[str]
|
||||
|
||||
if is_json_obj is False:
|
||||
entries = header_content_split(content, ",")
|
||||
else:
|
||||
entries = [content]
|
||||
|
||||
# Multiple entries are detected in one content at the only exception that its not IMAP header "Subject".
|
||||
if len(entries) > 1 and normalize_str(head) != "subject":
|
||||
for entry in entries:
|
||||
list_of_headers.append(Header(head, entry))
|
||||
else:
|
||||
list_of_headers.append(Header(head, content))
|
||||
|
||||
return Headers(*list_of_headers)
|
||||
|
||||
|
||||
def explain(headers: Headers) -> CaseInsensitiveDict:
|
||||
"""
|
||||
Return a brief explanation of each header present in headers if available.
|
||||
"""
|
||||
if not Header.__subclasses__():
|
||||
raise LookupError( # pragma: no cover
|
||||
"You cannot use explain() function without properly importing the public package."
|
||||
)
|
||||
|
||||
explanations: CaseInsensitiveDict = CaseInsensitiveDict()
|
||||
|
||||
for header in headers:
|
||||
if header.name in explanations:
|
||||
continue
|
||||
|
||||
try:
|
||||
target_class = header_name_to_class(header.name, Header.__subclasses__()[0])
|
||||
except TypeError:
|
||||
explanations[header.name] = "Unknown explanation."
|
||||
continue
|
||||
|
||||
explanations[header.name] = (
|
||||
target_class.__doc__.replace("\n", "").lstrip().replace(" ", " ").rstrip()
|
||||
if target_class.__doc__
|
||||
else "Missing docstring."
|
||||
)
|
||||
|
||||
return explanations
|
||||
|
||||
|
||||
def get_polymorphic(
|
||||
target: Headers | Header, desired_output: type[T]
|
||||
) -> T | list[T] | None:
|
||||
"""Experimental. Transform a Header or Headers object to its target `CustomHeader` subclass
|
||||
to access more ready-to-use methods. eg. You have a Header object named 'Set-Cookie' and you wish
|
||||
to extract the expiration date as a datetime.
|
||||
>>> header = Header("Set-Cookie", "1P_JAR=2020-03-16-21; expires=Wed, 15-Apr-2020 21:27:31 GMT")
|
||||
>>> header["expires"]
|
||||
'Wed, 15-Apr-2020 21:27:31 GMT'
|
||||
>>> from kiss_headers import SetCookie
|
||||
>>> set_cookie = get_polymorphic(header, SetCookie)
|
||||
>>> set_cookie.get_expire()
|
||||
datetime.datetime(2020, 4, 15, 21, 27, 31, tzinfo=datetime.timezone.utc)
|
||||
"""
|
||||
|
||||
if not issubclass(desired_output, Header):
|
||||
raise TypeError(
|
||||
f"The desired output should be a subclass of Header not {desired_output}."
|
||||
)
|
||||
|
||||
desired_output_header_name: str = class_to_header_name(desired_output)
|
||||
|
||||
if isinstance(target, Headers):
|
||||
r = target.get(desired_output_header_name)
|
||||
|
||||
if r is None:
|
||||
return None
|
||||
|
||||
elif isinstance(target, Header):
|
||||
if header_name_to_class(target.name, Header) != desired_output:
|
||||
raise TypeError(
|
||||
f"The target class does not match the desired output class. {target.__class__} != {desired_output}."
|
||||
)
|
||||
r = target
|
||||
else:
|
||||
raise TypeError(f"Unable to apply get_polymorphic on type {target.__class__}.")
|
||||
|
||||
# Change __class__ attribute.
|
||||
if not isinstance(r, list):
|
||||
r.__class__ = desired_output
|
||||
else:
|
||||
for header in r:
|
||||
header.__class__ = desired_output
|
||||
|
||||
return r # type: ignore
|
||||
|
||||
|
||||
def dumps(headers: Headers, **kwargs: Any | None) -> str:
|
||||
return json_dumps(encode(headers), **kwargs) # type: ignore
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,64 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .models import Header, Headers
|
||||
|
||||
|
||||
def encode(headers: Headers) -> dict[str, list[dict]]:
|
||||
"""
|
||||
Provide an opinionated but reliable way to encode headers to dict for serialization purposes.
|
||||
"""
|
||||
result: dict[str, list[dict]] = dict()
|
||||
|
||||
for header in headers:
|
||||
if header.name not in result:
|
||||
result[header.name] = list()
|
||||
|
||||
encoded_header: dict[str, str | None | list[str]] = dict()
|
||||
|
||||
for attribute, value in header:
|
||||
if attribute not in encoded_header:
|
||||
encoded_header[attribute] = value
|
||||
continue
|
||||
|
||||
if isinstance(encoded_header[attribute], list) is False:
|
||||
# Here encoded_header[attribute] most certainly is str
|
||||
# Had to silent mypy error.
|
||||
encoded_header[attribute] = [encoded_header[attribute]] # type: ignore
|
||||
|
||||
encoded_header[attribute].append(value) # type: ignore
|
||||
|
||||
result[header.name].append(encoded_header)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def decode(encoded_headers: dict[str, list[dict]]) -> Headers:
|
||||
"""
|
||||
Decode any previously encoded headers to a Headers object.
|
||||
"""
|
||||
headers: Headers = Headers()
|
||||
|
||||
for header_name, encoded_header_list in encoded_headers.items():
|
||||
if not isinstance(encoded_header_list, list):
|
||||
raise ValueError("Decode require first level values to be List")
|
||||
|
||||
for encoded_header in encoded_header_list:
|
||||
if not isinstance(encoded_header, dict):
|
||||
raise ValueError("Decode require each list element to be Dict")
|
||||
|
||||
header = Header(header_name, "")
|
||||
|
||||
for attr, value in encoded_header.items():
|
||||
if value is None:
|
||||
header += attr
|
||||
continue
|
||||
if isinstance(value, str):
|
||||
header[attr] = value
|
||||
continue
|
||||
|
||||
for sub_value in value:
|
||||
header.insert(-1, **{attr: sub_value})
|
||||
|
||||
headers += header
|
||||
|
||||
return headers
|
||||
@@ -0,0 +1,95 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import OrderedDict
|
||||
from collections.abc import Mapping, MutableMapping
|
||||
from typing import (
|
||||
Any,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
)
|
||||
from typing import (
|
||||
MutableMapping as MutableMappingType,
|
||||
)
|
||||
|
||||
from .utils import normalize_str
|
||||
|
||||
"""
|
||||
Disclaimer : CaseInsensitiveDict has been borrowed from `psf/requests`.
|
||||
Minors changes has been made.
|
||||
"""
|
||||
|
||||
|
||||
class CaseInsensitiveDict(MutableMapping):
|
||||
"""A case-insensitive ``dict``-like object.
|
||||
|
||||
Implements all methods and operations of
|
||||
``MutableMapping`` as well as dict's ``copy``. Also
|
||||
provides ``lower_items``.
|
||||
|
||||
All keys are expected to be strings. The structure remembers the
|
||||
case of the last key to be set, and ``iter(instance)``,
|
||||
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
|
||||
will contain case-sensitive keys. However, querying and contains
|
||||
testing is case insensitive::
|
||||
|
||||
cid = CaseInsensitiveDict()
|
||||
cid['Accept'] = 'application/json'
|
||||
cid['aCCEPT'] == 'application/json' # True
|
||||
list(cid) == ['Accept'] # True
|
||||
|
||||
For example, ``headers['content-encoding']`` will return the
|
||||
value of a ``'Content-Encoding'`` response header, regardless
|
||||
of how the header name was originally stored.
|
||||
|
||||
If the constructor, ``.update``, or equality comparison
|
||||
operations are given keys that have equal ``.lower()``s, the
|
||||
behavior is undefined.
|
||||
"""
|
||||
|
||||
def __init__(self, data: Mapping | None = None, **kwargs: Any):
|
||||
self._store: OrderedDict = OrderedDict()
|
||||
if data is None:
|
||||
data = {}
|
||||
self.update(data, **kwargs)
|
||||
|
||||
def __setitem__(self, key: str, value: Any) -> None:
|
||||
# Use the lowercased key for lookups, but store the actual
|
||||
# key alongside the value.
|
||||
self._store[normalize_str(key)] = (key, value)
|
||||
|
||||
def __getitem__(self, key: str) -> Any:
|
||||
return self._store[normalize_str(key)][1]
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
del self._store[normalize_str(key)]
|
||||
|
||||
def __iter__(self) -> Iterator[tuple[str, Any]]:
|
||||
return (casedkey for casedkey, mappedvalue in self._store.values())
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._store)
|
||||
|
||||
def lower_items(self) -> Iterator[tuple[str, Any]]:
|
||||
"""Like iteritems(), but with all lowercase keys."""
|
||||
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, Mapping):
|
||||
other = CaseInsensitiveDict(other)
|
||||
else:
|
||||
return NotImplemented
|
||||
# Compare insensitively
|
||||
return dict(self.lower_items()) == dict(other.lower_items())
|
||||
|
||||
# Copy is required
|
||||
def copy(self) -> CaseInsensitiveDict:
|
||||
return CaseInsensitiveDict(dict(self._store.values()))
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return str(dict(self.items()))
|
||||
|
||||
|
||||
AttributeDescription = Tuple[List[Optional[str]], List[int]]
|
||||
AttributeBag = MutableMappingType[str, AttributeDescription]
|
||||
@@ -0,0 +1,487 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from email.header import decode_header
|
||||
from json import dumps
|
||||
from re import findall, search, sub
|
||||
from typing import Any, Iterable
|
||||
|
||||
RESERVED_KEYWORD: set[str] = {
|
||||
"and_",
|
||||
"assert_",
|
||||
"in_",
|
||||
"not_",
|
||||
"pass_",
|
||||
"finally_",
|
||||
"while_",
|
||||
"yield_",
|
||||
"is_",
|
||||
"as_",
|
||||
"break_",
|
||||
"return_",
|
||||
"elif_",
|
||||
"except_",
|
||||
"def_",
|
||||
"from_",
|
||||
"for_",
|
||||
}
|
||||
|
||||
|
||||
def normalize_str(string: str) -> str:
|
||||
"""
|
||||
Normalize a string by applying on it lowercase and replacing '-' to '_'.
|
||||
>>> normalize_str("Content-Type")
|
||||
'content_type'
|
||||
>>> normalize_str("X-content-type")
|
||||
'x_content_type'
|
||||
"""
|
||||
return string.lower().replace("-", "_")
|
||||
|
||||
|
||||
def normalize_list(strings: list[str]) -> list[str]:
|
||||
"""Normalize a list of string by applying fn normalize_str over each element."""
|
||||
return list(map(normalize_str, strings))
|
||||
|
||||
|
||||
def unpack_protected_keyword(name: str) -> str:
|
||||
"""
|
||||
By choice, this project aims to allow developers to access header or attribute in header by using the property
|
||||
notation. Some keywords are protected by the language itself. So :
|
||||
When starting by a number, prepend an underscore to it. When using a protected keyword, append an underscore to it.
|
||||
>>> unpack_protected_keyword("_3to1")
|
||||
'3to1'
|
||||
>>> unpack_protected_keyword("from_")
|
||||
'from'
|
||||
>>> unpack_protected_keyword("_from")
|
||||
'_from'
|
||||
>>> unpack_protected_keyword("3")
|
||||
'3'
|
||||
>>> unpack_protected_keyword("FroM_")
|
||||
'FroM_'
|
||||
"""
|
||||
if len(name) < 2:
|
||||
return name
|
||||
|
||||
if name[0] == "_" and name[1].isdigit():
|
||||
name = name[1:]
|
||||
|
||||
if name in RESERVED_KEYWORD:
|
||||
name = name[:-1]
|
||||
|
||||
return name
|
||||
|
||||
|
||||
def extract_class_name(type_: type) -> str | None:
|
||||
"""
|
||||
Typically extract a class name from a Type.
|
||||
"""
|
||||
r = findall(r"<class '([a-zA-Z0-9._]+)'>", str(type_))
|
||||
return r[0] if r else None
|
||||
|
||||
|
||||
def header_content_split(string: str, delimiter: str) -> list[str]:
|
||||
"""
|
||||
Take a string and split it according to the passed delimiter.
|
||||
It will ignore delimiter if inside between double quote, inside a value, or in parenthesis.
|
||||
The input string is considered perfectly formed. This function does not split coma on a day
|
||||
when attached, see "RFC 7231, section 7.1.1.2: Date".
|
||||
>>> header_content_split("Wed, 15-Apr-2020 21:27:31 GMT, Fri, 01-Jan-2038 00:00:00 GMT", ",")
|
||||
['Wed, 15-Apr-2020 21:27:31 GMT', 'Fri, 01-Jan-2038 00:00:00 GMT']
|
||||
>>> header_content_split('quic=":443"; ma=2592000; v="46,43", h3-Q050=":443"; ma=2592000, h3-Q049=":443"; ma=2592000', ",")
|
||||
['quic=":443"; ma=2592000; v="46,43"', 'h3-Q050=":443"; ma=2592000', 'h3-Q049=":443"; ma=2592000']
|
||||
>>> header_content_split("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:50.0) Gecko/20100101 Firefox/50.0", ";")
|
||||
['Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:50.0) Gecko/20100101 Firefox/50.0']
|
||||
>>> header_content_split("text/html; charset=UTF-8", ";")
|
||||
['text/html', 'charset=UTF-8']
|
||||
>>> header_content_split('text/html; charset="UTF-\\\"8"', ";")
|
||||
['text/html', 'charset="UTF-"8"']
|
||||
"""
|
||||
if len(delimiter) != 1 or delimiter not in {";", ",", " "}:
|
||||
raise ValueError("Delimiter should be either semi-colon, a coma or a space.")
|
||||
|
||||
in_double_quote: bool = False
|
||||
in_parenthesis: bool = False
|
||||
in_value: bool = False
|
||||
is_on_a_day: bool = False
|
||||
|
||||
result: list[str] = [""]
|
||||
|
||||
for letter, index in zip(string, range(0, len(string))):
|
||||
if letter == '"':
|
||||
in_double_quote = not in_double_quote
|
||||
|
||||
if in_value and not in_double_quote:
|
||||
in_value = False
|
||||
|
||||
elif letter == "(" and not in_parenthesis:
|
||||
in_parenthesis = True
|
||||
elif letter == ")" and in_parenthesis:
|
||||
in_parenthesis = False
|
||||
else:
|
||||
is_on_a_day = index >= 3 and string[index - 3 : index] in {
|
||||
"Mon",
|
||||
"Tue",
|
||||
"Wed",
|
||||
"Thu",
|
||||
"Fri",
|
||||
"Sat",
|
||||
"Sun",
|
||||
}
|
||||
|
||||
if not in_double_quote:
|
||||
if not in_value and letter == "=":
|
||||
in_value = True
|
||||
elif letter == ";" and in_value:
|
||||
in_value = False
|
||||
|
||||
if in_value and letter == delimiter and not is_on_a_day:
|
||||
in_value = False
|
||||
|
||||
if letter == delimiter and (
|
||||
(in_value or in_double_quote or in_parenthesis or is_on_a_day) is False
|
||||
):
|
||||
result[-1] = result[-1].lstrip().rstrip()
|
||||
result.append("")
|
||||
|
||||
continue
|
||||
|
||||
result[-1] += letter
|
||||
|
||||
if result:
|
||||
result[-1] = result[-1].lstrip().rstrip()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def class_to_header_name(type_: type) -> str:
|
||||
"""
|
||||
Take a type and infer its header name.
|
||||
>>> from kiss_headers.builder import ContentType, XContentTypeOptions, BasicAuthorization
|
||||
>>> class_to_header_name(ContentType)
|
||||
'Content-Type'
|
||||
>>> class_to_header_name(XContentTypeOptions)
|
||||
'X-Content-Type-Options'
|
||||
>>> class_to_header_name(BasicAuthorization)
|
||||
'Authorization'
|
||||
"""
|
||||
if hasattr(type_, "__override__") and type_.__override__ is not None:
|
||||
return type_.__override__
|
||||
|
||||
class_raw_name: str = str(type_).split("'")[-2].split(".")[-1]
|
||||
|
||||
if class_raw_name.endswith("_"):
|
||||
class_raw_name = class_raw_name[:-1]
|
||||
|
||||
if class_raw_name.startswith("_"):
|
||||
class_raw_name = class_raw_name[1:]
|
||||
|
||||
header_name: str = ""
|
||||
|
||||
for letter in class_raw_name:
|
||||
if letter.isupper() and header_name != "":
|
||||
header_name += "-" + letter
|
||||
continue
|
||||
header_name += letter
|
||||
|
||||
return header_name
|
||||
|
||||
|
||||
def header_name_to_class(name: str, root_type: type) -> type:
|
||||
"""
|
||||
The opposite of class_to_header_name function. Will raise TypeError if no corresponding entry is found.
|
||||
Do it recursively from the root type.
|
||||
>>> from kiss_headers.builder import CustomHeader, ContentType, XContentTypeOptions, LastModified, Date
|
||||
>>> header_name_to_class("Content-Type", CustomHeader)
|
||||
<class 'kiss_headers.builder.ContentType'>
|
||||
>>> header_name_to_class("Last-Modified", CustomHeader)
|
||||
<class 'kiss_headers.builder.LastModified'>
|
||||
"""
|
||||
|
||||
normalized_name = normalize_str(name).replace("_", "")
|
||||
|
||||
for subclass in root_type.__subclasses__():
|
||||
class_name = extract_class_name(subclass)
|
||||
|
||||
if class_name is None:
|
||||
continue
|
||||
|
||||
if (
|
||||
not (
|
||||
hasattr(subclass, "__override__") and subclass.__override__ is not None
|
||||
)
|
||||
and normalize_str(class_name.split(".")[-1]) == normalized_name
|
||||
):
|
||||
return subclass
|
||||
|
||||
if subclass.__subclasses__():
|
||||
try:
|
||||
return header_name_to_class(name, subclass)
|
||||
except TypeError:
|
||||
continue
|
||||
|
||||
raise TypeError(f"Cannot find a class matching header named '{name}'.")
|
||||
|
||||
|
||||
def prettify_header_name(name: str) -> str:
|
||||
"""
|
||||
Take a header name and prettify it.
|
||||
>>> prettify_header_name("x-hEllo-wORLD")
|
||||
'X-Hello-World'
|
||||
>>> prettify_header_name("server")
|
||||
'Server'
|
||||
>>> prettify_header_name("contEnt-TYPE")
|
||||
'Content-Type'
|
||||
>>> prettify_header_name("content_type")
|
||||
'Content-Type'
|
||||
"""
|
||||
return "-".join([el.capitalize() for el in name.replace("_", "-").split("-")])
|
||||
|
||||
|
||||
def decode_partials(items: Iterable[tuple[str, Any]]) -> list[tuple[str, str]]:
|
||||
"""
|
||||
This function takes a list of tuples, representing headers by key, value. Where value is bytes or string containing
|
||||
(RFC 2047 encoded) partials fragments like the following :
|
||||
>>> decode_partials([("Subject", "=?iso-8859-1?q?p=F6stal?=")])
|
||||
[('Subject', 'pöstal')]
|
||||
"""
|
||||
revised_items: list[tuple[str, str]] = list()
|
||||
|
||||
for head, content in items:
|
||||
revised_content: str = ""
|
||||
|
||||
for partial, partial_encoding in decode_header(content):
|
||||
if isinstance(partial, str):
|
||||
revised_content += partial
|
||||
if isinstance(partial, bytes):
|
||||
revised_content += partial.decode(
|
||||
partial_encoding if partial_encoding is not None else "utf-8",
|
||||
errors="ignore",
|
||||
)
|
||||
|
||||
revised_items.append((head, revised_content))
|
||||
|
||||
return revised_items
|
||||
|
||||
|
||||
def unquote(string: str) -> str:
|
||||
"""
|
||||
Remove simple quote or double quote around a string if any.
|
||||
>>> unquote('"hello"')
|
||||
'hello'
|
||||
>>> unquote('"hello')
|
||||
'"hello'
|
||||
>>> unquote('"a"')
|
||||
'a'
|
||||
>>> unquote('""')
|
||||
''
|
||||
"""
|
||||
if (
|
||||
len(string) >= 2
|
||||
and (string.startswith('"') and string.endswith('"'))
|
||||
or (string.startswith("'") and string.endswith("'"))
|
||||
):
|
||||
return string[1:-1]
|
||||
|
||||
return string
|
||||
|
||||
|
||||
def quote(string: str) -> str:
|
||||
"""
|
||||
Surround string by a double quote char.
|
||||
>>> quote("hello")
|
||||
'"hello"'
|
||||
>>> quote('"hello')
|
||||
'""hello"'
|
||||
>>> quote('"hello"')
|
||||
'"hello"'
|
||||
"""
|
||||
return '"' + unquote(string) + '"'
|
||||
|
||||
|
||||
def count_leftover_space(content: str) -> int:
|
||||
"""
|
||||
A recursive function that counts trailing white space at the end of the given string.
|
||||
>>> count_leftover_space("hello ")
|
||||
3
|
||||
>>> count_leftover_space("byebye ")
|
||||
1
|
||||
>>> count_leftover_space(" hello ")
|
||||
1
|
||||
>>> count_leftover_space(" hello ")
|
||||
4
|
||||
"""
|
||||
if content.endswith(" "):
|
||||
return count_leftover_space(content[:-1]) + 1
|
||||
return 0
|
||||
|
||||
|
||||
def header_strip(content: str, elem: str) -> str:
|
||||
"""
|
||||
Remove a member for a given header content and take care of the unneeded leftover semi-colon.
|
||||
>>> header_strip("text/html; charset=UTF-8; format=flowed", "charset=UTF-8")
|
||||
'text/html; format=flowed'
|
||||
>>> header_strip("text/html; charset=UTF-8; format=flowed", "charset=UTF-8")
|
||||
'text/html; format=flowed'
|
||||
"""
|
||||
next_semi_colon_index: int | None = None
|
||||
|
||||
try:
|
||||
elem_index: int = content.index(elem)
|
||||
except ValueError:
|
||||
# If the target element in not found within the content, just return the unmodified content.
|
||||
return content
|
||||
|
||||
elem_end_index: int = elem_index + len(elem)
|
||||
|
||||
elem = (" " * count_leftover_space(content[:elem_index])) + elem
|
||||
|
||||
try:
|
||||
next_semi_colon_index = elem_end_index + content[elem_end_index:].index(";")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
content = (
|
||||
content.replace(
|
||||
elem
|
||||
+ (
|
||||
content[elem_end_index:next_semi_colon_index] + ";"
|
||||
if next_semi_colon_index is not None
|
||||
else ""
|
||||
),
|
||||
"",
|
||||
)
|
||||
.rstrip(" ")
|
||||
.lstrip(" ")
|
||||
)
|
||||
|
||||
if content.startswith(";"):
|
||||
content = content[1:]
|
||||
|
||||
if content.endswith(";"):
|
||||
content = content[:-1]
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def is_legal_header_name(name: str) -> bool:
|
||||
"""
|
||||
Verify if a provided header name is valid.
|
||||
>>> is_legal_header_name(":hello")
|
||||
False
|
||||
>>> is_legal_header_name("hello")
|
||||
True
|
||||
>>> is_legal_header_name("Content-Type")
|
||||
True
|
||||
>>> is_legal_header_name("Hello;")
|
||||
False
|
||||
>>> is_legal_header_name("Hello\\rWorld")
|
||||
False
|
||||
>>> is_legal_header_name("Hello \\tWorld")
|
||||
False
|
||||
>>> is_legal_header_name('Hello World"')
|
||||
False
|
||||
>>> is_legal_header_name("Hello-World/")
|
||||
True
|
||||
>>> is_legal_header_name("\x07")
|
||||
False
|
||||
"""
|
||||
return (
|
||||
name != ""
|
||||
and search(r"[^\x21-\x7F]|[:;(),<>=@?\[\]\r\n\t &{}\"\\]", name) is None
|
||||
)
|
||||
|
||||
|
||||
def extract_comments(content: str) -> list[str]:
|
||||
"""
|
||||
Extract parts of content that are considered as comments. Between parenthesis.
|
||||
>>> extract_comments("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:50.0) Gecko/20100101 Firefox/50.0 (hello) llll (abc)")
|
||||
['Macintosh; Intel Mac OS X 10.9; rv:50.0', 'hello', 'abc']
|
||||
"""
|
||||
return findall(r"\(([^)]+)\)", content)
|
||||
|
||||
|
||||
def unfold(content: str) -> str:
|
||||
r"""Some header content may have folded content (CRLF + n spaces) in it, making your job at reading them a little more difficult.
|
||||
This function undoes the folding in the given content.
|
||||
>>> unfold("___utmvbtouVBFmB=gZg\r\n XbNOjalT: Lte; path=/; Max-Age=900")
|
||||
'___utmvbtouVBFmB=gZg XbNOjalT: Lte; path=/; Max-Age=900'
|
||||
"""
|
||||
return sub(r"\r\n[ ]+", " ", content)
|
||||
|
||||
|
||||
def extract_encoded_headers(payload: bytes) -> tuple[str, bytes]:
|
||||
"""This function's purpose is to extract lines that can be decoded using the UTF-8 decoder.
|
||||
>>> extract_encoded_headers("Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n\\r\\n".encode("utf-8"))
|
||||
('Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n', b'')
|
||||
>>> extract_encoded_headers("Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n\\r\\nThat IS totally random.".encode("utf-8"))
|
||||
('Host: developer.mozilla.org\\r\\nX-Hello-World: 死の漢字\\r\\n', b'That IS totally random.')
|
||||
"""
|
||||
result: str = ""
|
||||
lines: list[bytes] = payload.splitlines()
|
||||
index: int = 0
|
||||
|
||||
for line, index in zip(lines, range(0, len(lines))):
|
||||
if line == b"":
|
||||
return result, b"\r\n".join(lines[index + 1 :])
|
||||
|
||||
try:
|
||||
result += line.decode("utf-8") + "\r\n"
|
||||
except UnicodeDecodeError:
|
||||
break
|
||||
|
||||
return result, b"\r\n".join(lines[index + 1 :])
|
||||
|
||||
|
||||
def unescape_double_quote(content: str) -> str:
|
||||
"""
|
||||
Replace escaped double quote in content by removing the backslash.
|
||||
>>> unescape_double_quote(r'UTF\"-8')
|
||||
'UTF"-8'
|
||||
>>> unescape_double_quote(r'UTF"-8')
|
||||
'UTF"-8'
|
||||
"""
|
||||
return content.replace(r"\"", '"')
|
||||
|
||||
|
||||
def escape_double_quote(content: str) -> str:
|
||||
r"""
|
||||
Replace not escaped double quote in content by adding a backslash beforehand.
|
||||
>>> escape_double_quote(r'UTF\"-8')
|
||||
'UTF\\"-8'
|
||||
>>> escape_double_quote(r'UTF"-8')
|
||||
'UTF\\"-8'
|
||||
"""
|
||||
return unescape_double_quote(content).replace('"', r"\"")
|
||||
|
||||
|
||||
def is_content_json_object(content: str) -> bool:
|
||||
"""
|
||||
Sometime, you may receive a header that hold a JSON list or object.
|
||||
This function detect it.
|
||||
"""
|
||||
content = content.strip()
|
||||
return (content.startswith("{") and content.endswith("}")) or (
|
||||
content.startswith("[") and content.endswith("]")
|
||||
)
|
||||
|
||||
|
||||
def transform_possible_encoded(
|
||||
headers: Iterable[tuple[str | bytes, str | bytes]],
|
||||
) -> Iterable[tuple[str, str]]:
|
||||
decoded = []
|
||||
|
||||
for k, v in headers:
|
||||
# we shall discard it if set to None.
|
||||
if v is None:
|
||||
continue
|
||||
if isinstance(k, bytes):
|
||||
k = k.decode("utf_8")
|
||||
if isinstance(v, bytes):
|
||||
v = v.decode("utf_8")
|
||||
elif isinstance(v, str) is False:
|
||||
if isinstance(v, (dict, list)):
|
||||
v = dumps(v)
|
||||
else:
|
||||
v = str(v)
|
||||
decoded.append((k, v))
|
||||
|
||||
return decoded
|
||||
@@ -0,0 +1,8 @@
|
||||
"""
|
||||
Expose version
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
__version__ = "2.5.0"
|
||||
VERSION = __version__.split(".")
|
||||
Reference in New Issue
Block a user