fix: 포트 충돌 회피 — note_bridge 8098, intent_service 8099

Jellyfin(8096), OrbStack(8097) 포트 충돌으로 변경.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Hyungi Ahn
2026-03-19 13:53:55 +09:00
parent dc08d29509
commit c2257d3a86
2709 changed files with 619549 additions and 10 deletions

View File

@@ -0,0 +1,39 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ._configuration import QuicTLSConfig
from .protocols import (
HTTP1Protocol,
HTTP2Protocol,
HTTP3Protocol,
HTTPOverQUICProtocol,
HTTPOverTCPProtocol,
HTTPProtocol,
HTTPProtocolFactory,
)
__all__ = (
"QuicTLSConfig",
"HTTP1Protocol",
"HTTP2Protocol",
"HTTP3Protocol",
"HTTPOverQUICProtocol",
"HTTPOverTCPProtocol",
"HTTPProtocol",
"HTTPProtocolFactory",
)

View File

@@ -0,0 +1,59 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import dataclasses
from typing import Any, Mapping
@dataclasses.dataclass
class QuicTLSConfig:
"""
Client TLS configuration.
"""
#: Allows to proceed for server without valid TLS certificates.
insecure: bool = False
#: File with CA certificates to trust for server verification
cafile: str | None = None
#: Directory with CA certificates to trust for server verification
capath: str | None = None
#: Blob with CA certificates to trust for server verification
cadata: bytes | None = None
#: If provided, will trigger an additional load_cert_chain() upon the QUIC Configuration
certfile: str | bytes | None = None
keyfile: str | bytes | None = None
keypassword: str | bytes | None = None
#: The QUIC session ticket which should be used for session resumption
session_ticket: Any | None = None
cert_fingerprint: str | None = None
cert_use_common_name: bool = False
verify_hostname: bool = True
assert_hostname: str | None = None
ciphers: list[Mapping[str, Any]] | None = None
idle_timeout: float = 300.0

View File

@@ -0,0 +1,151 @@
from __future__ import annotations
import typing
from collections import deque
from .events import Event
class StreamMatrix:
"""Efficient way to store events for concurrent streams."""
__slots__ = (
"_matrix",
"_count",
"_event_cursor_id",
)
def __init__(self) -> None:
self._matrix: dict[int | None, deque[Event]] = {}
self._count: int = 0
self._event_cursor_id: int = 0
def __len__(self) -> int:
return self._count
def __bool__(self) -> bool:
return self._count > 0
@property
def streams(self) -> list[int]:
return sorted(i for i in self._matrix.keys() if i is not None)
def append(self, event: Event) -> None:
matrix_idx = getattr(event, "stream_id", None)
event._id = self._event_cursor_id
self._event_cursor_id += 1
if matrix_idx not in self._matrix:
self._matrix[matrix_idx] = deque()
self._matrix[matrix_idx].append(event)
self._count += 1
def extend(self, events: typing.Iterable[Event]) -> None:
triaged_events: dict[int | None, list[Event]] = {}
for event in events:
matrix_idx = getattr(event, "stream_id", None)
event._id = self._event_cursor_id
self._event_cursor_id += 1
self._count += 1
if matrix_idx not in triaged_events:
triaged_events[matrix_idx] = []
triaged_events[matrix_idx].append(event)
for k, v in triaged_events.items():
if k not in self._matrix:
self._matrix[k] = deque()
self._matrix[k].extend(v)
def appendleft(self, event: Event) -> None:
matrix_idx = getattr(event, "stream_id", None)
event._id = self._event_cursor_id
self._event_cursor_id += 1
if matrix_idx not in self._matrix:
self._matrix[matrix_idx] = deque()
self._matrix[matrix_idx].appendleft(event)
self._count += 1
def popleft(self, stream_id: int | None = None) -> Event | None:
if self._count == 0:
return None
have_global_event: bool = None in self._matrix and bool(self._matrix[None])
any_stream_event: bool = (
bool(self._matrix) if not have_global_event else len(self._matrix) > 1
)
if stream_id is None and any_stream_event:
matrix_dict_iter = self._matrix.__iter__()
stream_id = next(matrix_dict_iter)
if stream_id is None:
stream_id = next(matrix_dict_iter)
if (
stream_id is not None
and have_global_event
and stream_id in self._matrix
and self._matrix[None][0]._id < self._matrix[stream_id][0]._id
):
stream_id = None
elif have_global_event is True and stream_id not in self._matrix:
stream_id = None
if stream_id not in self._matrix:
return None
ev = self._matrix[stream_id].popleft()
if ev is not None:
self._count -= 1
if stream_id is not None and not self._matrix[stream_id]:
del self._matrix[stream_id]
return ev
def count(
self,
stream_id: int | None = None,
excl_event: tuple[type[Event], ...] | None = None,
) -> int:
if stream_id is None:
return self._count
if stream_id not in self._matrix:
return 0
return len(
self._matrix[stream_id]
if excl_event is None
else [e for e in self._matrix[stream_id] if not isinstance(e, excl_event)]
)
def has(
self,
stream_id: int | None = None,
excl_event: tuple[type[Event], ...] | None = None,
) -> bool:
if stream_id is None:
return True if self._count else False
if stream_id not in self._matrix:
return False
if excl_event is not None:
return any(
e for e in self._matrix[stream_id] if not isinstance(e, excl_event)
)
return True if self._matrix[stream_id] else False

View File

@@ -0,0 +1,25 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Sequence, Tuple
HeaderType = Tuple[bytes, bytes]
HeadersType = Sequence[HeaderType]
AddressType = Tuple[str, int]
DatagramType = Tuple[bytes, AddressType]

View File

@@ -0,0 +1,43 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ._events import (
ConnectionTerminated,
DataReceived,
EarlyHeadersReceived,
Event,
GoawayReceived,
HandshakeCompleted,
HeadersReceived,
StreamEvent,
StreamReset,
StreamResetReceived,
)
__all__ = (
"Event",
"ConnectionTerminated",
"GoawayReceived",
"StreamEvent",
"StreamReset",
"StreamResetReceived",
"HeadersReceived",
"DataReceived",
"HandshakeCompleted",
"EarlyHeadersReceived",
)

View File

@@ -0,0 +1,202 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import typing
from dataclasses import dataclass, field
from .._typing import HeadersType
class Event:
"""
Base class for HTTP events.
This is an abstract base class that should not be initialized.
"""
_id: int
#
# Connection events
#
@dataclass
class ConnectionTerminated(Event):
"""
Connection was terminated.
Extends :class:`.Event`.
"""
#: Reason for closing the connection.
error_code: int = 0
#: Optional message with more information
message: str | None = field(default=None, compare=False)
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return f"{cls}(error_code={self.error_code!r}, message={self.message!r})"
@dataclass
class GoawayReceived(Event):
"""
GOAWAY frame was received
Extends :class:`.Event`.
"""
#: Highest stream ID that could be processed.
last_stream_id: int
#: Reason for closing the connection.
error_code: int = 0
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return (
f"{cls}(last_stream_id={self.last_stream_id!r}, "
f"error_code={self.error_code!r})"
)
#
# Stream events
#
@dataclass
class StreamEvent(Event):
"""
Event on one HTTP stream.
This is an abstract base class that should not be used directly.
Extends :class:`.Event`.
"""
#: Stream ID
stream_id: int
@dataclass
class StreamReset(StreamEvent):
"""
One stream of an HTTP connection was reset.
When a stream is reset, it must no longer be used, but the parent
connection and other streams are unaffected.
This is an abstract base class that should not be used directly.
More specific subclasses (StreamResetSent or StreamResetReceived)
should be emitted.
Extends :class:`.StreamEvent`.
"""
#: Reason for closing the stream.
error_code: int = 0
end_stream: bool = True
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return f"{cls}(stream_id={self.stream_id!r}, error_code={self.error_code!r})"
@dataclass
class StreamResetReceived(StreamReset):
"""
One stream of an HTTP connection was reset by the peer.
This probably means that we did something that the peer does not like.
Extends :class:`.StreamReset`.
"""
@dataclass
class HandshakeCompleted(Event):
alpn_protocol: str | None
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return f"{cls}(alpn={self.alpn_protocol})"
@dataclass
class HeadersReceived(StreamEvent):
"""
A frame with HTTP headers was received.
Extends :class:`.StreamEvent`.
"""
#: The received HTTP headers
headers: HeadersType
#: Signals that data will not be sent by the peer over the stream.
end_stream: bool = False
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return (
f"{cls}(stream_id={self.stream_id!r}, "
f"len(headers)={len(self.headers)}, end_stream={self.end_stream!r})"
)
@dataclass
class DataReceived(StreamEvent):
"""
A frame with HTTP data was received.
Extends :class:`.StreamEvent`.
"""
#: The received data.
data: bytes
#: Signals that no more data will be sent by the peer over the stream.
end_stream: bool = False
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return (
f"{cls}(stream_id={self.stream_id!r}, "
f"len(data)={len(self.data)}, end_stream={self.end_stream!r})"
)
@dataclass
class EarlyHeadersReceived(StreamEvent):
#: The received HTTP headers
headers: HeadersType
def __repr__(self) -> str: # Defensive: debug purposes only
cls = type(self).__name__
return (
f"{cls}(stream_id={self.stream_id!r}, "
f"len(headers)={len(self.headers)}, end_stream=False)"
)
@property
def end_stream(self) -> typing.Literal[False]:
return False

View File

@@ -0,0 +1,37 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ._factories import HTTPProtocolFactory
from ._protocols import (
HTTP1Protocol,
HTTP2Protocol,
HTTP3Protocol,
HTTPOverQUICProtocol,
HTTPOverTCPProtocol,
HTTPProtocol,
)
__all__ = (
"HTTP1Protocol",
"HTTP2Protocol",
"HTTP3Protocol",
"HTTPOverQUICProtocol",
"HTTPOverTCPProtocol",
"HTTPProtocol",
"HTTPProtocolFactory",
)

View File

@@ -0,0 +1,90 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
HTTP factories create HTTP protools based on defined set of arguments.
We define the :class:`HTTPProtocol` interface to allow interchange
HTTP versions and protocol implementations. But constructors of
the class is not part of the interface. Every implementation
can use a different options to init instances.
Factories unify access to the creation of the protocol instances,
so that clients and servers can swap protocol implementations,
delegating the initialization to factories.
"""
from __future__ import annotations
import importlib
import inspect
from abc import ABCMeta
from typing import Any
from ._protocols import HTTPOverQUICProtocol, HTTPOverTCPProtocol, HTTPProtocol
class HTTPProtocolFactory(metaclass=ABCMeta):
@staticmethod
def new(
type_protocol: type[HTTPProtocol],
implementation: str | None = None,
**kwargs: Any,
) -> HTTPOverQUICProtocol | HTTPOverTCPProtocol:
"""Create a new state-machine that target given protocol type."""
assert type_protocol != HTTPProtocol, (
"HTTPProtocol is ambiguous and cannot be requested in the factory."
)
package_name: str = __name__.split(".")[0]
version_target: str = "".join(
c for c in str(type_protocol).replace(package_name, "") if c.isdigit()
)
module_expr: str = f".protocols.http{version_target}"
if implementation:
module_expr += f"._{implementation.lower()}"
try:
http_module = importlib.import_module(
module_expr, f"{package_name}.contrib.hface"
)
except ImportError as e:
raise NotImplementedError(
f"{type_protocol} cannot be loaded. Tried to import '{module_expr}'."
) from e
implementations: list[
tuple[str, type[HTTPOverQUICProtocol | HTTPOverTCPProtocol]]
] = inspect.getmembers(
http_module,
lambda e: isinstance(e, type)
and issubclass(e, (HTTPOverQUICProtocol, HTTPOverTCPProtocol)),
)
if not implementations:
raise NotImplementedError(
f"{type_protocol} cannot be loaded. "
"No compatible implementation available. "
"Make sure your implementation inherit either from HTTPOverQUICProtocol or HTTPOverTCPProtocol."
)
implementation_target: type[HTTPOverQUICProtocol | HTTPOverTCPProtocol] = (
implementations.pop()[1]
)
return implementation_target(**kwargs)

View File

@@ -0,0 +1,358 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import typing
from abc import ABCMeta, abstractmethod
from typing import Any, Sequence
if typing.TYPE_CHECKING:
from typing_extensions import Literal
from .._typing import HeadersType
from ..events import Event
class BaseProtocol(metaclass=ABCMeta):
"""Sans-IO common methods whenever it is TCP, UDP or QUIC."""
@abstractmethod
def bytes_received(self, data: bytes) -> None:
"""
Called when some data is received.
"""
raise NotImplementedError
# Sending direction
@abstractmethod
def bytes_to_send(self) -> bytes:
"""
Returns data for sending out of the internal data buffer.
"""
raise NotImplementedError
@abstractmethod
def connection_lost(self) -> None:
"""
Called when the connection is lost or closed.
"""
raise NotImplementedError
def should_wait_remote_flow_control(
self, stream_id: int, amt: int | None = None
) -> bool | None:
"""
Verify if the client should listen network incoming data for
the flow control update purposes.
"""
raise NotImplementedError
def max_frame_size(self) -> int:
"""
Determine if the remote set a limited size for each data frame.
"""
raise NotImplementedError
class OverTCPProtocol(BaseProtocol, metaclass=ABCMeta):
"""
Interface for sans-IO protocols on top TCP.
"""
@abstractmethod
def eof_received(self) -> None:
"""
Called when the other end signals it wont send any more data.
"""
raise NotImplementedError
class OverUDPProtocol(BaseProtocol, metaclass=ABCMeta):
"""
Interface for sans-IO protocols on top UDP.
"""
class OverQUICProtocol(OverUDPProtocol):
@property
@abstractmethod
def connection_ids(self) -> Sequence[bytes]:
"""
QUIC connection IDs
This property can be used to assign UDP packets to QUIC connections.
:return: a sequence of connection IDs
"""
raise NotImplementedError
@property
@abstractmethod
def session_ticket(self) -> Any | None:
raise NotImplementedError
@typing.overload
def getpeercert(self, *, binary_form: Literal[True]) -> bytes: ...
@typing.overload
def getpeercert(self, *, binary_form: Literal[False] = ...) -> dict[str, Any]: ...
@abstractmethod
def getpeercert(self, *, binary_form: bool = False) -> bytes | dict[str, Any]:
raise NotImplementedError
@typing.overload
def getissuercert(self, *, binary_form: Literal[True]) -> bytes | None: ...
@typing.overload
def getissuercert(
self, *, binary_form: Literal[False] = ...
) -> dict[str, Any] | None: ...
@abstractmethod
def getissuercert(
self, *, binary_form: bool = False
) -> bytes | dict[str, Any] | None:
raise NotImplementedError
@abstractmethod
def cipher(self) -> str | None:
raise NotImplementedError
class HTTPProtocol(metaclass=ABCMeta):
"""
Sans-IO representation of an HTTP connection
"""
implementation: str
@staticmethod
@abstractmethod
def exceptions() -> tuple[type[BaseException], ...]:
"""Return exception types that should be handled in your application."""
raise NotImplementedError
@property
@abstractmethod
def multiplexed(self) -> bool:
"""
Whether this connection supports multiple parallel streams.
Returns ``True`` for HTTP/2 and HTTP/3 connections.
"""
raise NotImplementedError
@property
@abstractmethod
def max_stream_count(self) -> int:
"""Determine how much concurrent stream the connection can handle."""
raise NotImplementedError
@abstractmethod
def is_idle(self) -> bool:
"""
Return True if this connection is BOTH available and not doing anything.
"""
raise NotImplementedError
@abstractmethod
def is_available(self) -> bool:
"""
Return whether this connection is capable to open new streams.
"""
raise NotImplementedError
@abstractmethod
def has_expired(self) -> bool:
"""
Return whether this connection is closed or should be closed.
"""
raise NotImplementedError
@abstractmethod
def get_available_stream_id(self) -> int:
"""
Return an ID that can be used to create a new stream.
Use the returned ID with :meth:`.submit_headers` to create the stream.
This method may or may not return one value until that method is called.
:return: stream ID
"""
raise NotImplementedError
@abstractmethod
def submit_headers(
self, stream_id: int, headers: HeadersType, end_stream: bool = False
) -> None:
"""
Submit a frame with HTTP headers.
If this is a client connection, this method starts an HTTP request.
If this is a server connection, it starts an HTTP response.
:param stream_id: stream ID
:param headers: HTTP headers
:param end_stream: whether to close the stream for sending
"""
raise NotImplementedError
@abstractmethod
def submit_data(
self, stream_id: int, data: bytes, end_stream: bool = False
) -> None:
"""
Submit a frame with HTTP data.
:param stream_id: stream ID
:param data: payload
:param end_stream: whether to close the stream for sending
"""
raise NotImplementedError
@abstractmethod
def submit_stream_reset(self, stream_id: int, error_code: int = 0) -> None:
"""
Immediate terminate a stream.
Stream reset is used to request cancellation of a stream
or to indicate that an error condition has occurred.
Use :attr:`.error_codes` to obtain error codes for common problems.
:param stream_id: stream ID
:param error_code: indicates why the stream is being terminated
"""
raise NotImplementedError
@abstractmethod
def submit_close(self, error_code: int = 0) -> None:
"""
Submit graceful close the connection.
Use :attr:`.error_codes` to obtain error codes for common problems.
:param error_code: indicates why the connections is being closed
"""
raise NotImplementedError
@abstractmethod
def next_event(self, stream_id: int | None = None) -> Event | None:
"""
Consume next HTTP event.
:return: an event instance
"""
raise NotImplementedError
def events(self, stream_id: int | None = None) -> typing.Iterator[Event]:
"""
Consume available HTTP events.
:return: an iterator that unpack "next_event" until exhausted.
"""
while True:
ev = self.next_event(stream_id=stream_id)
if ev is None:
break
yield ev
@abstractmethod
def has_pending_event(
self,
*,
stream_id: int | None = None,
excl_event: tuple[type[Event], ...] | None = None,
) -> bool:
"""Verify if there is queued event waiting to be consumed."""
raise NotImplementedError
@abstractmethod
def reshelve(self, *events: Event) -> None:
"""Put back events into the deque."""
raise NotImplementedError
@abstractmethod
def ping(self) -> None:
"""Send a PING frame to the remote peer. Thus keeping the connection alive."""
raise NotImplementedError
class HTTPOverTCPProtocol(HTTPProtocol, OverTCPProtocol, metaclass=ABCMeta):
"""
:class:`HTTPProtocol` over a TCP connection
An interface for HTTP/1 and HTTP/2 protocols.
Extends :class:`.HTTPProtocol`.
"""
class HTTPOverQUICProtocol(HTTPProtocol, OverQUICProtocol, metaclass=ABCMeta):
"""
:class:`HTTPProtocol` over a QUIC connection
Abstract base class for HTTP/3 protocols.
Extends :class:`.HTTPProtocol`.
"""
class HTTP1Protocol(HTTPOverTCPProtocol, metaclass=ABCMeta):
"""
Sans-IO representation of an HTTP/1 connection
An interface for HTTP/1 implementations.
Extends :class:`.HTTPOverTCPProtocol`.
"""
@property
def multiplexed(self) -> bool:
return False
def should_wait_remote_flow_control(
self, stream_id: int, amt: int | None = None
) -> bool | None:
return NotImplemented # type: ignore[no-any-return]
class HTTP2Protocol(HTTPOverTCPProtocol, metaclass=ABCMeta):
"""
Sans-IO representation of an HTTP/2 connection
An abstract base class for HTTP/2 implementations.
Extends :class:`.HTTPOverTCPProtocol`.
"""
@property
def multiplexed(self) -> bool:
return True
class HTTP3Protocol(HTTPOverQUICProtocol, metaclass=ABCMeta):
"""
Sans-IO representation of an HTTP/2 connection
An abstract base class for HTTP/3 implementations.
Extends :class:`.HTTPOverQUICProtocol`
"""
@property
def multiplexed(self) -> bool:
return True

View File

@@ -0,0 +1,21 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ._h11 import HTTP1ProtocolHyperImpl
__all__ = ("HTTP1ProtocolHyperImpl",)

View File

@@ -0,0 +1,347 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from functools import lru_cache
import h11
from h11._state import _SWITCH_UPGRADE, ConnectionState
from ..._stream_matrix import StreamMatrix
from ..._typing import HeadersType
from ...events import (
ConnectionTerminated,
DataReceived,
EarlyHeadersReceived,
Event,
HeadersReceived,
)
from .._protocols import HTTP1Protocol
@lru_cache(maxsize=64)
def capitalize_header_name(name: bytes) -> bytes:
"""
Take a header name and capitalize it.
>>> capitalize_header_name(b"x-hEllo-wORLD")
'X-Hello-World'
>>> capitalize_header_name(b"server")
'Server'
>>> capitalize_header_name(b"contEnt-TYPE")
'Content-Type'
>>> capitalize_header_name(b"content_type")
'Content-Type'
"""
return b"-".join(el.capitalize() for el in name.split(b"-"))
def headers_to_request(headers: HeadersType) -> h11.Event:
method = authority = path = host = None
regular_headers = []
for name, value in headers:
if name.startswith(b":"):
if name == b":method":
method = value
elif name == b":scheme":
pass
elif name == b":authority":
authority = value
elif name == b":path":
path = value
else:
raise ValueError("Unexpected request header: " + name.decode())
else:
if host is None and name == b"host":
host = value
# We found that many projects... actually expect the header name to be sent capitalized... hardcoded
# within their tests. Bad news, we have to keep doing this nonsense (namely capitalize_header_name)
regular_headers.append((capitalize_header_name(name), value))
if authority is None:
raise ValueError("Missing request header: :authority")
if method == b"CONNECT" and path is None:
# CONNECT requests are a special case.
target = authority
else:
target = path # type: ignore[assignment]
if host is None:
regular_headers.insert(0, (b"Host", authority))
elif host != authority:
raise ValueError("Host header does not match :authority.")
return h11.Request(
method=method, # type: ignore[arg-type]
headers=regular_headers,
target=target,
)
def headers_from_response(
response: h11.InformationalResponse | h11.Response,
) -> HeadersType:
"""
Converts an HTTP/1.0 or HTTP/1.1 response to HTTP/2-like headers.
Generates from pseudo (colon) headers from a response line.
"""
return [
(b":status", str(response.status_code).encode("ascii"))
] + response.headers.raw_items()
class RelaxConnectionState(ConnectionState):
def process_event( # type: ignore[no-untyped-def]
self,
role,
event_type,
server_switch_event=None,
) -> None:
if server_switch_event is not None:
if server_switch_event not in self.pending_switch_proposals:
if server_switch_event is _SWITCH_UPGRADE:
warnings.warn(
f"Received server {server_switch_event} event without a pending proposal. "
"This will raise an exception in a future version. It is temporarily relaxed to match the "
"legacy http.client standard library.",
DeprecationWarning,
stacklevel=2,
)
self.pending_switch_proposals.add(_SWITCH_UPGRADE)
return super().process_event(role, event_type, server_switch_event)
class HTTP1ProtocolHyperImpl(HTTP1Protocol):
implementation: str = "h11"
def __init__(self) -> None:
self._connection: h11.Connection = h11.Connection(h11.CLIENT)
self._connection._cstate = RelaxConnectionState()
self._data_buffer: list[bytes] = []
self._events: StreamMatrix = StreamMatrix()
self._terminated: bool = False
self._switched: bool = False
self._current_stream_id: int = 1
@staticmethod
def exceptions() -> tuple[type[BaseException], ...]:
return h11.LocalProtocolError, h11.ProtocolError, h11.RemoteProtocolError
def is_available(self) -> bool:
return self._connection.our_state == self._connection.their_state == h11.IDLE
@property
def max_stream_count(self) -> int:
return 1
def is_idle(self) -> bool:
return self._connection.their_state in {
h11.IDLE,
h11.MUST_CLOSE,
}
def has_expired(self) -> bool:
return self._terminated
def get_available_stream_id(self) -> int:
if not self.is_available():
raise RuntimeError(
"Cannot generate a new stream ID because the connection is not idle. "
"HTTP/1.1 is not multiplexed and we do not support HTTP pipelining."
)
return self._current_stream_id
def submit_close(self, error_code: int = 0) -> None:
pass # no-op
def submit_headers(
self, stream_id: int, headers: HeadersType, end_stream: bool = False
) -> None:
if stream_id != self._current_stream_id:
raise ValueError("Invalid stream ID.")
self._h11_submit(headers_to_request(headers))
if end_stream:
self._h11_submit(h11.EndOfMessage())
def submit_data(
self, stream_id: int, data: bytes, end_stream: bool = False
) -> None:
if stream_id != self._current_stream_id:
raise ValueError("Invalid stream ID.")
if self._connection.their_state == h11.SWITCHED_PROTOCOL:
self._data_buffer.append(data)
if end_stream:
self._events.append(self._connection_terminated())
return
self._h11_submit(h11.Data(data))
if end_stream:
self._h11_submit(h11.EndOfMessage())
def submit_stream_reset(self, stream_id: int, error_code: int = 0) -> None:
# HTTP/1 cannot submit a stream (it does not have real streams).
# But if there are no other streams, we can close the connection instead.
self.connection_lost()
def connection_lost(self) -> None:
if self._connection.their_state == h11.SWITCHED_PROTOCOL:
self._events.append(self._connection_terminated())
return
# This method is called when the connection is closed without an EOF.
# But not all connections support EOF, so being here does not
# necessarily mean that something when wrong.
#
# The tricky part is that HTTP/1.0 server can send responses
# without Content-Length or Transfer-Encoding headers,
# meaning that a response body is closed with the connection.
# In such cases, we require a proper EOF to distinguish complete
# messages from partial messages interrupted by network failure.
if not self._terminated:
self._connection.send_failed()
self._events.append(self._connection_terminated())
def eof_received(self) -> None:
if self._connection.their_state == h11.SWITCHED_PROTOCOL:
self._events.append(self._connection_terminated())
return
self._h11_data_received(b"")
def bytes_received(self, data: bytes) -> None:
if not data:
return # h11 treats empty data as EOF.
if self._connection.their_state == h11.SWITCHED_PROTOCOL:
self._events.append(DataReceived(self._current_stream_id, data))
return
else:
self._h11_data_received(data)
def bytes_to_send(self) -> bytes:
data = b"".join(self._data_buffer)
self._data_buffer.clear()
self._maybe_start_next_cycle()
return data
def next_event(self, stream_id: int | None = None) -> Event | None:
return self._events.popleft(stream_id=stream_id)
def has_pending_event(
self,
*,
stream_id: int | None = None,
excl_event: tuple[type[Event], ...] | None = None,
) -> bool:
return self._events.has(stream_id=stream_id, excl_event=excl_event)
def _h11_submit(self, h11_event: h11.Event) -> None:
chunks = self._connection.send_with_data_passthrough(h11_event)
if chunks:
self._data_buffer += chunks
def _h11_data_received(self, data: bytes) -> None:
self._connection.receive_data(data)
self._fetch_events()
def _fetch_events(self) -> None:
a = self._events.append
while not self._terminated:
try:
h11_event = self._connection.next_event()
except h11.RemoteProtocolError as e:
a(self._connection_terminated(e.error_status_hint, str(e)))
break
ev_type = h11_event.__class__
if h11_event is h11.NEED_DATA or h11_event is h11.PAUSED:
if h11.MUST_CLOSE == self._connection.their_state:
a(self._connection_terminated())
else:
break
elif ev_type is h11.Response:
a(
HeadersReceived(
self._current_stream_id,
headers_from_response(h11_event), # type: ignore[arg-type]
)
)
elif ev_type is h11.InformationalResponse:
a(
EarlyHeadersReceived(
stream_id=self._current_stream_id,
headers=headers_from_response(h11_event), # type: ignore[arg-type]
)
)
elif ev_type is h11.Data:
# officially h11 typed data as "bytes"
# but we... found that it store bytearray sometime.
payload = h11_event.data # type: ignore[union-attr]
a(
DataReceived(
self._current_stream_id,
bytes(payload) if payload.__class__ is bytearray else payload,
)
)
elif ev_type is h11.EndOfMessage:
# HTTP/2 and HTTP/3 send END_STREAM flag with HEADERS and DATA frames.
# We emulate similar behavior for HTTP/1.
if h11_event.headers: # type: ignore[union-attr]
last_event: HeadersReceived | DataReceived = HeadersReceived(
self._current_stream_id,
h11_event.headers, # type: ignore[union-attr]
self._connection.their_state != h11.MIGHT_SWITCH_PROTOCOL, # type: ignore[attr-defined]
)
else:
last_event = DataReceived(
self._current_stream_id,
b"",
self._connection.their_state != h11.MIGHT_SWITCH_PROTOCOL, # type: ignore[attr-defined]
)
a(last_event)
self._maybe_start_next_cycle()
elif ev_type is h11.ConnectionClosed:
a(self._connection_terminated())
def _connection_terminated(
self, error_code: int = 0, message: str | None = None
) -> Event:
self._terminated = True
return ConnectionTerminated(error_code, message)
def _maybe_start_next_cycle(self) -> None:
if h11.DONE == self._connection.our_state == self._connection.their_state:
self._connection.start_next_cycle()
self._current_stream_id += 1
if h11.SWITCHED_PROTOCOL == self._connection.their_state and not self._switched:
data, closed = self._connection.trailing_data
if data:
self._events.append(DataReceived(self._current_stream_id, data))
self._switched = True
def reshelve(self, *events: Event) -> None:
for ev in reversed(events):
self._events.appendleft(ev)
def ping(self) -> None:
raise NotImplementedError("http1 does not support PING")

View File

@@ -0,0 +1,21 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ._h2 import HTTP2ProtocolHyperImpl
__all__ = ("HTTP2ProtocolHyperImpl",)

View File

@@ -0,0 +1,312 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from secrets import token_bytes
from typing import Iterator
import jh2.config # type: ignore
import jh2.connection # type: ignore
import jh2.errors # type: ignore
import jh2.events # type: ignore
import jh2.exceptions # type: ignore
import jh2.settings # type: ignore
from ..._stream_matrix import StreamMatrix
from ..._typing import HeadersType
from ...events import (
ConnectionTerminated,
DataReceived,
EarlyHeadersReceived,
Event,
GoawayReceived,
HandshakeCompleted,
HeadersReceived,
StreamResetReceived,
)
from .._protocols import HTTP2Protocol
class _PatchedH2Connection(jh2.connection.H2Connection): # type: ignore[misc]
"""
This is a performance hotfix class. We internally, already keep
track of the open stream count.
"""
def __init__(
self,
config: jh2.config.H2Configuration | None = None,
observable_impl: HTTP2ProtocolHyperImpl | None = None,
) -> None:
super().__init__(config=config)
# by default CONNECT is disabled
# we need it to support natively WebSocket over HTTP/2 for example.
self.local_settings = jh2.settings.Settings(
client=True,
initial_values={
jh2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
jh2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: self.DEFAULT_MAX_HEADER_LIST_SIZE,
jh2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL: 1,
},
)
self._observable_impl = observable_impl
def _open_streams(self, *args, **kwargs) -> int: # type: ignore[no-untyped-def]
if self._observable_impl is not None:
return self._observable_impl._open_stream_count
return super()._open_streams(*args, **kwargs) # type: ignore[no-any-return]
def _receive_goaway_frame(self, frame): # type: ignore[no-untyped-def]
"""
Receive a GOAWAY frame on the connection.
We purposely override this method to work around a known bug of jh2.
"""
events = self.state_machine.process_input(
jh2.connection.ConnectionInputs.RECV_GOAWAY
)
err_code = jh2.errors._error_code_from_int(frame.error_code)
# GOAWAY allows an
# endpoint to gracefully stop accepting new streams while still
# finishing processing of previously established streams.
# see https://tools.ietf.org/html/rfc7540#section-6.8
# hyper/h2 does not allow such a thing for now. let's work around this.
if (
err_code == 0
and self._observable_impl is not None
and self._observable_impl._open_stream_count > 0
):
self.state_machine.state = jh2.connection.ConnectionState.CLIENT_OPEN
# Clear the outbound data buffer: we cannot send further data now.
self.clear_outbound_data_buffer()
# Fire an appropriate ConnectionTerminated event.
new_event = jh2.events.ConnectionTerminated()
new_event.error_code = err_code
new_event.last_stream_id = frame.last_stream_id
new_event.additional_data = (
frame.additional_data if frame.additional_data else None
)
events.append(new_event)
return [], events
HEADER_OR_TRAILER_TYPE_SET = {
jh2.events.ResponseReceived,
jh2.events.TrailersReceived,
}
class HTTP2ProtocolHyperImpl(HTTP2Protocol):
implementation: str = "h2"
def __init__(
self,
*,
validate_outbound_headers: bool = False,
validate_inbound_headers: bool = False,
normalize_outbound_headers: bool = False,
normalize_inbound_headers: bool = True,
) -> None:
self._connection: jh2.connection.H2Connection = _PatchedH2Connection(
jh2.config.H2Configuration(
client_side=True,
validate_outbound_headers=validate_outbound_headers,
normalize_outbound_headers=normalize_outbound_headers,
validate_inbound_headers=validate_inbound_headers,
normalize_inbound_headers=normalize_inbound_headers,
),
observable_impl=self,
)
self._open_stream_count: int = 0
self._connection.initiate_connection()
self._connection.increment_flow_control_window(2**24)
self._events: StreamMatrix = StreamMatrix()
self._terminated: bool = False
self._goaway_to_honor: bool = False
self._max_stream_count: int = (
self._connection.remote_settings.max_concurrent_streams
)
self._max_frame_size: int = self._connection.remote_settings.max_frame_size
def max_frame_size(self) -> int:
return self._max_frame_size
@staticmethod
def exceptions() -> tuple[type[BaseException], ...]:
return jh2.exceptions.ProtocolError, jh2.exceptions.H2Error
def is_available(self) -> bool:
if self._terminated:
return False
return self._max_stream_count > self._open_stream_count
@property
def max_stream_count(self) -> int:
return self._max_stream_count
def is_idle(self) -> bool:
return self._terminated is False and self._open_stream_count == 0
def has_expired(self) -> bool:
return self._terminated or self._goaway_to_honor
def get_available_stream_id(self) -> int:
return self._connection.get_next_available_stream_id() # type: ignore[no-any-return]
def submit_close(self, error_code: int = 0) -> None:
self._connection.close_connection(error_code)
def submit_headers(
self, stream_id: int, headers: HeadersType, end_stream: bool = False
) -> None:
self._connection.send_headers(stream_id, headers, end_stream)
self._connection.increment_flow_control_window(2**24, stream_id=stream_id)
self._open_stream_count += 1
def submit_data(
self, stream_id: int, data: bytes, end_stream: bool = False
) -> None:
self._connection.send_data(stream_id, data, end_stream)
def submit_stream_reset(self, stream_id: int, error_code: int = 0) -> None:
self._connection.reset_stream(stream_id, error_code)
def next_event(self, stream_id: int | None = None) -> Event | None:
return self._events.popleft(stream_id=stream_id)
def has_pending_event(
self,
*,
stream_id: int | None = None,
excl_event: tuple[type[Event], ...] | None = None,
) -> bool:
return self._events.has(stream_id=stream_id, excl_event=excl_event)
def _map_events(self, h2_events: list[jh2.events.Event]) -> Iterator[Event]:
for e in h2_events:
ev_type = e.__class__
if ev_type in HEADER_OR_TRAILER_TYPE_SET:
end_stream = e.stream_ended is not None
if end_stream:
self._open_stream_count -= 1
stream = self._connection.streams.pop(e.stream_id)
self._connection._closed_streams[e.stream_id] = stream.closed_by
yield HeadersReceived(e.stream_id, e.headers, end_stream=end_stream)
elif ev_type is jh2.events.DataReceived:
end_stream = e.stream_ended is not None
if end_stream:
self._open_stream_count -= 1
stream = self._connection.streams.pop(e.stream_id)
self._connection._closed_streams[e.stream_id] = stream.closed_by
self._connection.acknowledge_received_data(
e.flow_controlled_length, e.stream_id
)
yield DataReceived(e.stream_id, e.data, end_stream=end_stream)
elif ev_type is jh2.events.InformationalResponseReceived:
yield EarlyHeadersReceived(
e.stream_id,
e.headers,
)
elif ev_type is jh2.events.StreamReset:
self._open_stream_count -= 1
# event StreamEnded may occur before StreamReset
if e.stream_id in self._connection.streams:
stream = self._connection.streams.pop(e.stream_id)
self._connection._closed_streams[e.stream_id] = stream.closed_by
yield StreamResetReceived(e.stream_id, e.error_code)
elif ev_type is jh2.events.ConnectionTerminated:
# ConnectionTerminated from h2 means that GOAWAY was received.
# A server can send GOAWAY for graceful shutdown, where clients
# do not open new streams, but inflight requests can be completed.
#
# Saying "connection was terminated" can be confusing,
# so we emit an event called "GoawayReceived".
if e.error_code == 0:
self._goaway_to_honor = True
yield GoawayReceived(e.last_stream_id, e.error_code)
else:
self._terminated = True
yield ConnectionTerminated(e.error_code, None)
elif ev_type in {
jh2.events.SettingsAcknowledged,
jh2.events.RemoteSettingsChanged,
}:
yield HandshakeCompleted(alpn_protocol="h2")
def connection_lost(self) -> None:
self._connection_terminated()
def eof_received(self) -> None:
self._connection_terminated()
def bytes_received(self, data: bytes) -> None:
if not data:
return
try:
h2_events = self._connection.receive_data(data)
except jh2.exceptions.ProtocolError as e:
self._connection_terminated(e.error_code, str(e))
else:
self._events.extend(self._map_events(h2_events))
# we want to perpetually mark the connection as "saturated"
if self._goaway_to_honor:
self._max_stream_count = self._open_stream_count
if self._connection.remote_settings.has_update:
if not self._goaway_to_honor:
self._max_stream_count = (
self._connection.remote_settings.max_concurrent_streams
)
self._max_frame_size = self._connection.remote_settings.max_frame_size
def bytes_to_send(self) -> bytes:
return self._connection.data_to_send() # type: ignore[no-any-return]
def _connection_terminated(
self, error_code: int = 0, message: str | None = None
) -> None:
if self._terminated:
return
error_code = int(error_code) # Convert h2 IntEnum to an actual int
self._terminated = True
self._events.append(ConnectionTerminated(error_code, message))
def should_wait_remote_flow_control(
self, stream_id: int, amt: int | None = None
) -> bool | None:
flow_remaining_bytes: int = self._connection.local_flow_control_window(
stream_id
)
if amt is None:
return flow_remaining_bytes == 0
return amt > flow_remaining_bytes
def reshelve(self, *events: Event) -> None:
for ev in reversed(events):
self._events.appendleft(ev)
def ping(self) -> None:
self._connection.ping(token_bytes(8))

View File

@@ -0,0 +1,21 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from ._qh3 import HTTP3ProtocolAioQuicImpl
__all__ = ("HTTP3ProtocolAioQuicImpl",)

View File

@@ -0,0 +1,592 @@
# Copyright 2022 Akamai Technologies, Inc
# Largely rewritten in 2023 for urllib3-future
# Copyright 2024 Ahmed Tahri
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import datetime
import ssl
import typing
from collections import deque
from os import environ
from random import randint
from time import time as monotonic
from typing import Any, Iterable, Sequence
if typing.TYPE_CHECKING:
from typing_extensions import Literal
from qh3 import (
CipherSuite,
H3Connection,
H3Error,
ProtocolError,
QuicConfiguration,
QuicConnection,
QuicConnectionError,
QuicFileLogger,
SessionTicket,
h3_events,
quic_events,
)
from qh3.h3.connection import FrameType
from qh3.quic.connection import QuicConnectionState
from ..._configuration import QuicTLSConfig
from ..._stream_matrix import StreamMatrix
from ..._typing import AddressType, HeadersType
from ...events import (
ConnectionTerminated,
DataReceived,
EarlyHeadersReceived,
Event,
GoawayReceived,
)
from ...events import HandshakeCompleted as _HandshakeCompleted
from ...events import HeadersReceived, StreamResetReceived
from .._protocols import HTTP3Protocol
QUIC_RELEVANT_EVENT_TYPES = {
quic_events.HandshakeCompleted,
quic_events.ConnectionTerminated,
quic_events.StreamReset,
}
class HTTP3ProtocolAioQuicImpl(HTTP3Protocol):
implementation: str = "qh3"
def __init__(
self,
*,
remote_address: AddressType,
server_name: str,
tls_config: QuicTLSConfig,
) -> None:
keylogfile_path: str | None = environ.get("SSLKEYLOGFILE", None)
qlogdir_path: str | None = environ.get("QUICLOGDIR", None)
self._configuration: QuicConfiguration = QuicConfiguration(
is_client=True,
verify_mode=ssl.CERT_NONE if tls_config.insecure else ssl.CERT_REQUIRED,
cafile=tls_config.cafile,
capath=tls_config.capath,
cadata=tls_config.cadata,
alpn_protocols=["h3"],
session_ticket=tls_config.session_ticket,
server_name=server_name,
hostname_checks_common_name=tls_config.cert_use_common_name,
assert_fingerprint=tls_config.cert_fingerprint,
verify_hostname=tls_config.verify_hostname,
secrets_log_file=open(keylogfile_path, "w") if keylogfile_path else None, # type: ignore[arg-type]
quic_logger=QuicFileLogger(qlogdir_path) if qlogdir_path else None,
idle_timeout=tls_config.idle_timeout,
max_data=2**24,
max_stream_data=2**24,
)
if tls_config.ciphers:
available_ciphers = {c.name: c for c in CipherSuite}
chosen_ciphers: list[CipherSuite] = []
for cipher in tls_config.ciphers:
if "name" in cipher and isinstance(cipher["name"], str):
chosen_ciphers.append(
available_ciphers[cipher["name"].replace("TLS_", "")]
)
if len(chosen_ciphers) == 0:
raise ValueError(
f"Unable to find a compatible cipher in '{tls_config.ciphers}' to establish a QUIC connection. "
f"QUIC support one of '{['TLS_' + e for e in available_ciphers.keys()]}' only."
)
self._configuration.cipher_suites = chosen_ciphers
if tls_config.certfile:
self._configuration.load_cert_chain(
tls_config.certfile,
tls_config.keyfile,
tls_config.keypassword,
)
self._quic: QuicConnection = QuicConnection(configuration=self._configuration)
self._connection_ids: set[bytes] = set()
self._remote_address = remote_address
self._events: StreamMatrix = StreamMatrix()
self._packets: deque[bytes] = deque()
self._http: H3Connection | None = None
self._terminated: bool = False
self._data_in_flight: bool = False
self._open_stream_count: int = 0
self._total_stream_count: int = 0
self._goaway_to_honor: bool = False
self._max_stream_count: int = (
100 # safe-default, broadly used. (and set by qh3)
)
self._max_frame_size: int | None = None
@staticmethod
def exceptions() -> tuple[type[BaseException], ...]:
return ProtocolError, H3Error, QuicConnectionError, AssertionError
@property
def max_stream_count(self) -> int:
return self._max_stream_count
def is_available(self) -> bool:
return (
self._terminated is False
and self._max_stream_count > self._quic.open_outbound_streams
)
def is_idle(self) -> bool:
return self._terminated is False and self._open_stream_count == 0
def has_expired(self) -> bool:
if not self._terminated and not self._goaway_to_honor:
now = monotonic()
self._quic.handle_timer(now)
self._packets.extend(
map(lambda e: e[0], self._quic.datagrams_to_send(now=now))
)
if self._quic._state in {
QuicConnectionState.CLOSING,
QuicConnectionState.TERMINATED,
}:
self._terminated = True
if (
hasattr(self._quic, "_close_event")
and self._quic._close_event is not None
):
self._events.extend(self._map_quic_event(self._quic._close_event))
self._terminated = True
return self._terminated or self._goaway_to_honor
@property
def session_ticket(self) -> SessionTicket | None:
return self._quic.tls.session_ticket if self._quic and self._quic.tls else None
def get_available_stream_id(self) -> int:
return self._quic.get_next_available_stream_id()
def submit_close(self, error_code: int = 0) -> None:
# QUIC has two different frame types for closing the connection.
# From RFC 9000 (QUIC: A UDP-Based Multiplexed and Secure Transport):
#
# > An endpoint sends a CONNECTION_CLOSE frame (type=0x1c or 0x1d)
# > to notify its peer that the connection is being closed.
# > The CONNECTION_CLOSE frame with a type of 0x1c is used to signal errors
# > at only the QUIC layer, or the absence of errors (with the NO_ERROR code).
# > The CONNECTION_CLOSE frame with a type of 0x1d is used
# > to signal an error with the application that uses QUIC.
frame_type = 0x1D if error_code else 0x1C
self._quic.close(error_code=error_code, frame_type=frame_type)
def submit_headers(
self, stream_id: int, headers: HeadersType, end_stream: bool = False
) -> None:
assert self._http is not None
self._open_stream_count += 1
self._total_stream_count += 1
self._http.send_headers(stream_id, list(headers), end_stream)
def submit_data(
self, stream_id: int, data: bytes, end_stream: bool = False
) -> None:
assert self._http is not None
self._http.send_data(stream_id, data, end_stream)
if end_stream is False:
self._data_in_flight = True
def submit_stream_reset(self, stream_id: int, error_code: int = 0) -> None:
self._quic.reset_stream(stream_id, error_code)
def next_event(self, stream_id: int | None = None) -> Event | None:
return self._events.popleft(stream_id=stream_id)
def has_pending_event(
self,
*,
stream_id: int | None = None,
excl_event: tuple[type[Event], ...] | None = None,
) -> bool:
return self._events.has(stream_id=stream_id, excl_event=excl_event)
@property
def connection_ids(self) -> Sequence[bytes]:
return list(self._connection_ids)
def connection_lost(self) -> None:
self._terminated = True
self._events.append(ConnectionTerminated())
def bytes_received(self, data: bytes) -> None:
self._quic.receive_datagram(data, self._remote_address, now=monotonic())
self._fetch_events()
if self._data_in_flight:
self._data_in_flight = False
# we want to perpetually mark the connection as "saturated"
if self._goaway_to_honor:
self._max_stream_count = self._open_stream_count
else:
# This section may confuse beginners
# See RFC 9000 -> 19.11. MAX_STREAMS Frames
# footer extract:
# Note that these frames (and the corresponding transport parameters)
# do not describe the number of streams that can be opened
# concurrently. The limit includes streams that have been closed as
# well as those that are open.
#
# so, finding that remote_max_streams_bidi is increasing constantly is normal.
new_stream_limit = (
self._quic._remote_max_streams_bidi - self._total_stream_count
)
if (
new_stream_limit
and new_stream_limit != self._max_stream_count
and new_stream_limit > 0
):
self._max_stream_count = new_stream_limit
if (
self._quic._remote_max_stream_data_bidi_remote
and self._quic._remote_max_stream_data_bidi_remote
!= self._max_frame_size
):
self._max_frame_size = self._quic._remote_max_stream_data_bidi_remote
def bytes_to_send(self) -> bytes:
if not self._packets:
now = monotonic()
if self._http is None:
self._quic.connect(self._remote_address, now=now)
self._http = H3Connection(self._quic)
# the QUIC state machine returns datagrams (addr, packet)
# the client never have to worry about the destination
# unless server yield a preferred address?
self._packets.extend(
map(lambda e: e[0], self._quic.datagrams_to_send(now=now))
)
if not self._packets:
return b""
# it is absolutely crucial to return one at a time
# because UDP don't support sending more than
# MTU (to be more precise, lowest MTU in the network path from A (you) to B (server))
return self._packets.popleft()
def _fetch_events(self) -> None:
assert self._http is not None
for quic_event in iter(self._quic.next_event, None):
self._events.extend(self._map_quic_event(quic_event))
for h3_event in self._http.handle_event(quic_event):
self._events.extend(self._map_h3_event(h3_event))
if hasattr(self._quic, "_close_event") and self._quic._close_event is not None:
self._events.extend(self._map_quic_event(self._quic._close_event))
def _map_quic_event(self, quic_event: quic_events.QuicEvent) -> Iterable[Event]:
ev_type = quic_event.__class__
# fastest path execution, most of the time we don't have those
# 3 event types.
if ev_type not in QUIC_RELEVANT_EVENT_TYPES:
return
if ev_type is quic_events.HandshakeCompleted:
yield _HandshakeCompleted(quic_event.alpn_protocol) # type: ignore[attr-defined]
elif ev_type is quic_events.ConnectionTerminated:
if quic_event.frame_type == FrameType.GOAWAY.value: # type: ignore[attr-defined]
self._goaway_to_honor = True
stream_list: list[int] = [
e for e in self._events._matrix.keys() if e is not None
]
yield GoawayReceived(stream_list[-1], quic_event.error_code) # type: ignore[attr-defined]
else:
self._terminated = True
yield ConnectionTerminated(
quic_event.error_code, # type: ignore[attr-defined]
quic_event.reason_phrase, # type: ignore[attr-defined]
)
elif ev_type is quic_events.StreamReset:
self._open_stream_count -= 1
yield StreamResetReceived(quic_event.stream_id, quic_event.error_code) # type: ignore[attr-defined]
def _map_h3_event(self, h3_event: h3_events.H3Event) -> Iterable[Event]:
ev_type = h3_event.__class__
if ev_type is h3_events.HeadersReceived:
if h3_event.stream_ended: # type: ignore[attr-defined]
self._open_stream_count -= 1
yield HeadersReceived(
h3_event.stream_id, # type: ignore[attr-defined]
h3_event.headers, # type: ignore[attr-defined]
h3_event.stream_ended, # type: ignore[attr-defined]
)
elif ev_type is h3_events.DataReceived:
if h3_event.stream_ended: # type: ignore[attr-defined]
self._open_stream_count -= 1
yield DataReceived(h3_event.stream_id, h3_event.data, h3_event.stream_ended) # type: ignore[attr-defined]
elif ev_type is h3_events.InformationalHeadersReceived:
yield EarlyHeadersReceived(
h3_event.stream_id, # type: ignore[attr-defined]
h3_event.headers, # type: ignore[attr-defined]
)
def should_wait_remote_flow_control(
self, stream_id: int, amt: int | None = None
) -> bool | None:
return self._data_in_flight
@typing.overload
def getissuercert(self, *, binary_form: Literal[True]) -> bytes | None: ...
@typing.overload
def getissuercert(
self, *, binary_form: Literal[False] = ...
) -> dict[str, Any] | None: ...
def getissuercert(
self, *, binary_form: bool = False
) -> bytes | dict[str, typing.Any] | None:
x509_certificate = self._quic.get_peercert()
if x509_certificate is None:
raise ValueError("TLS handshake has not been done yet")
if not self._quic.get_issuercerts():
return None
x509_certificate = self._quic.get_issuercerts()[0]
if binary_form:
return x509_certificate.public_bytes()
datetime.datetime.fromtimestamp(
x509_certificate.not_valid_before, tz=datetime.timezone.utc
)
issuer_info = {
"version": x509_certificate.version + 1,
"serialNumber": x509_certificate.serial_number.upper(),
"subject": [],
"issuer": [],
"notBefore": datetime.datetime.fromtimestamp(
x509_certificate.not_valid_before, tz=datetime.timezone.utc
).strftime("%b %d %H:%M:%S %Y")
+ " UTC",
"notAfter": datetime.datetime.fromtimestamp(
x509_certificate.not_valid_after, tz=datetime.timezone.utc
).strftime("%b %d %H:%M:%S %Y")
+ " UTC",
}
_short_name_assoc = {
"CN": "commonName",
"L": "localityName",
"ST": "stateOrProvinceName",
"O": "organizationName",
"OU": "organizationalUnitName",
"C": "countryName",
"STREET": "streetAddress",
"DC": "domainComponent",
"E": "email",
}
for raw_oid, rfc4514_attribute_name, value in x509_certificate.subject:
if rfc4514_attribute_name not in _short_name_assoc:
continue
issuer_info["subject"].append( # type: ignore[attr-defined]
(
(
_short_name_assoc[rfc4514_attribute_name],
value.decode(),
),
)
)
for raw_oid, rfc4514_attribute_name, value in x509_certificate.issuer:
if rfc4514_attribute_name not in _short_name_assoc:
continue
issuer_info["issuer"].append( # type: ignore[attr-defined]
(
(
_short_name_assoc[rfc4514_attribute_name],
value.decode(),
),
)
)
return issuer_info
@typing.overload
def getpeercert(self, *, binary_form: Literal[True]) -> bytes: ...
@typing.overload
def getpeercert(self, *, binary_form: Literal[False] = ...) -> dict[str, Any]: ...
def getpeercert(
self, *, binary_form: bool = False
) -> bytes | dict[str, typing.Any]:
x509_certificate = self._quic.get_peercert()
if x509_certificate is None:
raise ValueError("TLS handshake has not been done yet")
if binary_form:
return x509_certificate.public_bytes()
peer_info = {
"version": x509_certificate.version + 1,
"serialNumber": x509_certificate.serial_number.upper(),
"subject": [],
"issuer": [],
"notBefore": datetime.datetime.fromtimestamp(
x509_certificate.not_valid_before, tz=datetime.timezone.utc
).strftime("%b %d %H:%M:%S %Y")
+ " UTC",
"notAfter": datetime.datetime.fromtimestamp(
x509_certificate.not_valid_after, tz=datetime.timezone.utc
).strftime("%b %d %H:%M:%S %Y")
+ " UTC",
"subjectAltName": [],
"OCSP": [],
"caIssuers": [],
"crlDistributionPoints": [],
}
_short_name_assoc = {
"CN": "commonName",
"L": "localityName",
"ST": "stateOrProvinceName",
"O": "organizationName",
"OU": "organizationalUnitName",
"C": "countryName",
"STREET": "streetAddress",
"DC": "domainComponent",
"E": "email",
}
for raw_oid, rfc4514_attribute_name, value in x509_certificate.subject:
if rfc4514_attribute_name not in _short_name_assoc:
continue
peer_info["subject"].append( # type: ignore[attr-defined]
(
(
_short_name_assoc[rfc4514_attribute_name],
value.decode(),
),
)
)
for raw_oid, rfc4514_attribute_name, value in x509_certificate.issuer:
if rfc4514_attribute_name not in _short_name_assoc:
continue
peer_info["issuer"].append( # type: ignore[attr-defined]
(
(
_short_name_assoc[rfc4514_attribute_name],
value.decode(),
),
)
)
for alt_name in x509_certificate.get_subject_alt_names():
decoded_alt_name = alt_name.decode()
in_parenthesis = decoded_alt_name[
decoded_alt_name.index("(") + 1 : decoded_alt_name.index(")")
]
if decoded_alt_name.startswith("DNS"):
peer_info["subjectAltName"].append(("DNS", in_parenthesis)) # type: ignore[attr-defined]
else:
from ....resolver.utils import inet4_ntoa, inet6_ntoa
if len(in_parenthesis) == 11:
ip_address_decoded = inet4_ntoa(
bytes.fromhex(in_parenthesis.replace(":", ""))
)
else:
ip_address_decoded = inet6_ntoa(
bytes.fromhex(in_parenthesis.replace(":", ""))
)
peer_info["subjectAltName"].append(("IP Address", ip_address_decoded)) # type: ignore[attr-defined]
peer_info["OCSP"] = []
for endpoint in x509_certificate.get_ocsp_endpoints():
decoded_endpoint = endpoint.decode()
peer_info["OCSP"].append( # type: ignore[attr-defined]
decoded_endpoint[decoded_endpoint.index("(") + 1 : -1]
)
peer_info["caIssuers"] = []
for endpoint in x509_certificate.get_issuer_endpoints():
decoded_endpoint = endpoint.decode()
peer_info["caIssuers"].append( # type: ignore[attr-defined]
decoded_endpoint[decoded_endpoint.index("(") + 1 : -1]
)
peer_info["crlDistributionPoints"] = []
for endpoint in x509_certificate.get_crl_endpoints():
decoded_endpoint = endpoint.decode()
peer_info["crlDistributionPoints"].append( # type: ignore[attr-defined]
decoded_endpoint[decoded_endpoint.index("(") + 1 : -1]
)
pop_keys = []
for k in peer_info:
if isinstance(peer_info[k], list):
peer_info[k] = tuple(peer_info[k]) # type: ignore[arg-type]
if not peer_info[k]:
pop_keys.append(k)
for k in pop_keys:
peer_info.pop(k)
return peer_info
def cipher(self) -> str | None:
cipher_suite = self._quic.get_cipher()
if cipher_suite is None:
raise ValueError("TLS handshake has not been done yet")
return f"TLS_{cipher_suite.name}"
def reshelve(self, *events: Event) -> None:
for ev in reversed(events):
self._events.appendleft(ev)
def ping(self) -> None:
self._quic.send_ping(randint(0, 65535))
def max_frame_size(self) -> int:
if self._max_frame_size is not None:
return self._max_frame_size
raise NotImplementedError