summaryrefslogtreecommitdiff
path: root/venv/lib/python3.11/site-packages/anyio
diff options
context:
space:
mode:
authorcyfraeviolae <cyfraeviolae>2024-04-03 03:10:44 -0400
committercyfraeviolae <cyfraeviolae>2024-04-03 03:10:44 -0400
commit6d7ba58f880be618ade07f8ea080fe8c4bf8a896 (patch)
treeb1c931051ffcebd2bd9d61d98d6233ffa289bbce /venv/lib/python3.11/site-packages/anyio
parent4f884c9abc32990b4061a1bb6997b4b37e58ea0b (diff)
venv
Diffstat (limited to 'venv/lib/python3.11/site-packages/anyio')
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__init__.py76
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__pycache__/__init__.cpython-311.pycbin0 -> 4298 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__pycache__/from_thread.cpython-311.pycbin0 -> 23294 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__pycache__/lowlevel.cpython-311.pycbin0 -> 7661 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__pycache__/pytest_plugin.cpython-311.pycbin0 -> 9289 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__pycache__/to_process.cpython-311.pycbin0 -> 13577 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/__pycache__/to_thread.cpython-311.pycbin0 -> 3192 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_backends/__init__.py0
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/__init__.cpython-311.pycbin0 -> 199 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-311.pycbin0 -> 132800 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_trio.cpython-311.pycbin0 -> 68671 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py2478
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_backends/_trio.py1169
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__init__.py0
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/__init__.cpython-311.pycbin0 -> 195 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_eventloop.cpython-311.pycbin0 -> 6862 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_exceptions.cpython-311.pycbin0 -> 4669 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_fileio.cpython-311.pycbin0 -> 37329 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_resources.cpython-311.pycbin0 -> 1119 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_signals.cpython-311.pycbin0 -> 1344 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_sockets.cpython-311.pycbin0 -> 29147 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_streams.cpython-311.pycbin0 -> 2675 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-311.pycbin0 -> 7118 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_synchronization.cpython-311.pycbin0 -> 32517 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_tasks.cpython-311.pycbin0 -> 7718 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_testing.cpython-311.pycbin0 -> 3662 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_typedattr.cpython-311.pycbin0 -> 4396 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_eventloop.py163
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py73
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_fileio.py645
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_resources.py18
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_signals.py25
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_sockets.py716
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_streams.py52
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py140
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py649
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_tasks.py158
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_testing.py74
-rw-r--r--venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py81
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__init__.py57
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/__init__.cpython-311.pycbin0 -> 3039 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_eventloop.cpython-311.pycbin0 -> 16291 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_resources.cpython-311.pycbin0 -> 1800 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_sockets.cpython-311.pycbin0 -> 11233 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_streams.cpython-311.pycbin0 -> 9614 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-311.pycbin0 -> 3674 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_tasks.cpython-311.pycbin0 -> 4646 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_testing.cpython-311.pycbin0 -> 3035 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_eventloop.py392
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_resources.py31
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_sockets.py194
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_streams.py203
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py79
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_tasks.py97
-rw-r--r--venv/lib/python3.11/site-packages/anyio/abc/_testing.py66
-rw-r--r--venv/lib/python3.11/site-packages/anyio/from_thread.py476
-rw-r--r--venv/lib/python3.11/site-packages/anyio/lowlevel.py163
-rw-r--r--venv/lib/python3.11/site-packages/anyio/py.typed0
-rw-r--r--venv/lib/python3.11/site-packages/anyio/pytest_plugin.py149
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__init__.py0
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/__init__.cpython-311.pycbin0 -> 197 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/buffered.cpython-311.pycbin0 -> 6497 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/file.cpython-311.pycbin0 -> 8138 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/memory.cpython-311.pycbin0 -> 13950 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/stapled.cpython-311.pycbin0 -> 8264 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/text.cpython-311.pycbin0 -> 9019 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/__pycache__/tls.cpython-311.pycbin0 -> 18117 bytes
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/buffered.py119
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/file.py148
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/memory.py283
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/stapled.py141
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/text.py147
-rw-r--r--venv/lib/python3.11/site-packages/anyio/streams/tls.py338
-rw-r--r--venv/lib/python3.11/site-packages/anyio/to_process.py259
-rw-r--r--venv/lib/python3.11/site-packages/anyio/to_thread.py69
75 files changed, 9928 insertions, 0 deletions
diff --git a/venv/lib/python3.11/site-packages/anyio/__init__.py b/venv/lib/python3.11/site-packages/anyio/__init__.py
new file mode 100644
index 0000000..7bfe231
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__init__.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+from typing import Any
+
+from ._core._eventloop import current_time as current_time
+from ._core._eventloop import get_all_backends as get_all_backends
+from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
+from ._core._eventloop import run as run
+from ._core._eventloop import sleep as sleep
+from ._core._eventloop import sleep_forever as sleep_forever
+from ._core._eventloop import sleep_until as sleep_until
+from ._core._exceptions import BrokenResourceError as BrokenResourceError
+from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
+from ._core._exceptions import BusyResourceError as BusyResourceError
+from ._core._exceptions import ClosedResourceError as ClosedResourceError
+from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
+from ._core._exceptions import EndOfStream as EndOfStream
+from ._core._exceptions import IncompleteRead as IncompleteRead
+from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
+from ._core._exceptions import WouldBlock as WouldBlock
+from ._core._fileio import AsyncFile as AsyncFile
+from ._core._fileio import Path as Path
+from ._core._fileio import open_file as open_file
+from ._core._fileio import wrap_file as wrap_file
+from ._core._resources import aclose_forcefully as aclose_forcefully
+from ._core._signals import open_signal_receiver as open_signal_receiver
+from ._core._sockets import connect_tcp as connect_tcp
+from ._core._sockets import connect_unix as connect_unix
+from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
+from ._core._sockets import (
+ create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
+)
+from ._core._sockets import create_tcp_listener as create_tcp_listener
+from ._core._sockets import create_udp_socket as create_udp_socket
+from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
+from ._core._sockets import create_unix_listener as create_unix_listener
+from ._core._sockets import getaddrinfo as getaddrinfo
+from ._core._sockets import getnameinfo as getnameinfo
+from ._core._sockets import wait_socket_readable as wait_socket_readable
+from ._core._sockets import wait_socket_writable as wait_socket_writable
+from ._core._streams import create_memory_object_stream as create_memory_object_stream
+from ._core._subprocesses import open_process as open_process
+from ._core._subprocesses import run_process as run_process
+from ._core._synchronization import CapacityLimiter as CapacityLimiter
+from ._core._synchronization import (
+ CapacityLimiterStatistics as CapacityLimiterStatistics,
+)
+from ._core._synchronization import Condition as Condition
+from ._core._synchronization import ConditionStatistics as ConditionStatistics
+from ._core._synchronization import Event as Event
+from ._core._synchronization import EventStatistics as EventStatistics
+from ._core._synchronization import Lock as Lock
+from ._core._synchronization import LockStatistics as LockStatistics
+from ._core._synchronization import ResourceGuard as ResourceGuard
+from ._core._synchronization import Semaphore as Semaphore
+from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
+from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
+from ._core._tasks import CancelScope as CancelScope
+from ._core._tasks import create_task_group as create_task_group
+from ._core._tasks import current_effective_deadline as current_effective_deadline
+from ._core._tasks import fail_after as fail_after
+from ._core._tasks import move_on_after as move_on_after
+from ._core._testing import TaskInfo as TaskInfo
+from ._core._testing import get_current_task as get_current_task
+from ._core._testing import get_running_tasks as get_running_tasks
+from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
+from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
+from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
+from ._core._typedattr import typed_attribute as typed_attribute
+
+# Re-export imports so they look like they live directly in this package
+key: str
+value: Any
+for key, value in list(locals().items()):
+ if getattr(value, "__module__", "").startswith("anyio."):
+ value.__module__ = __name__
diff --git a/venv/lib/python3.11/site-packages/anyio/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000..1b5ce8f
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__pycache__/__init__.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/__pycache__/from_thread.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/__pycache__/from_thread.cpython-311.pyc
new file mode 100644
index 0000000..9fb43b0
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__pycache__/from_thread.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/__pycache__/lowlevel.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/__pycache__/lowlevel.cpython-311.pyc
new file mode 100644
index 0000000..7954b70
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__pycache__/lowlevel.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/__pycache__/pytest_plugin.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/__pycache__/pytest_plugin.cpython-311.pyc
new file mode 100644
index 0000000..d979661
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__pycache__/pytest_plugin.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/__pycache__/to_process.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/__pycache__/to_process.cpython-311.pyc
new file mode 100644
index 0000000..e02e6bb
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__pycache__/to_process.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/__pycache__/to_thread.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/__pycache__/to_thread.cpython-311.pyc
new file mode 100644
index 0000000..f50ac62
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/__pycache__/to_thread.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_backends/__init__.py b/venv/lib/python3.11/site-packages/anyio/_backends/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_backends/__init__.py
diff --git a/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000..b016e2e
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/__init__.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-311.pyc
new file mode 100644
index 0000000..fd8f88a
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_asyncio.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_trio.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_trio.cpython-311.pyc
new file mode 100644
index 0000000..aa2c0cd
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_backends/__pycache__/_trio.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py b/venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py
new file mode 100644
index 0000000..2699bf8
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_backends/_asyncio.py
@@ -0,0 +1,2478 @@
+from __future__ import annotations
+
+import array
+import asyncio
+import concurrent.futures
+import math
+import socket
+import sys
+import threading
+from asyncio import (
+ AbstractEventLoop,
+ CancelledError,
+ all_tasks,
+ create_task,
+ current_task,
+ get_running_loop,
+ sleep,
+)
+from asyncio.base_events import _run_until_complete_cb # type: ignore[attr-defined]
+from collections import OrderedDict, deque
+from collections.abc import AsyncIterator, Generator, Iterable
+from concurrent.futures import Future
+from contextlib import suppress
+from contextvars import Context, copy_context
+from dataclasses import dataclass
+from functools import partial, wraps
+from inspect import (
+ CORO_RUNNING,
+ CORO_SUSPENDED,
+ getcoroutinestate,
+ iscoroutine,
+)
+from io import IOBase
+from os import PathLike
+from queue import Queue
+from signal import Signals
+from socket import AddressFamily, SocketKind
+from threading import Thread
+from types import TracebackType
+from typing import (
+ IO,
+ Any,
+ AsyncGenerator,
+ Awaitable,
+ Callable,
+ Collection,
+ ContextManager,
+ Coroutine,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ TypeVar,
+ cast,
+)
+from weakref import WeakKeyDictionary
+
+import sniffio
+
+from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
+from .._core._eventloop import claim_worker_thread, threadlocals
+from .._core._exceptions import (
+ BrokenResourceError,
+ BusyResourceError,
+ ClosedResourceError,
+ EndOfStream,
+ WouldBlock,
+)
+from .._core._sockets import convert_ipv6_sockaddr
+from .._core._streams import create_memory_object_stream
+from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
+from .._core._synchronization import Event as BaseEvent
+from .._core._synchronization import ResourceGuard
+from .._core._tasks import CancelScope as BaseCancelScope
+from ..abc import (
+ AsyncBackend,
+ IPSockAddrType,
+ SocketListener,
+ UDPPacketType,
+ UNIXDatagramPacketType,
+)
+from ..lowlevel import RunVar
+from ..streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
+
+if sys.version_info >= (3, 10):
+ from typing import ParamSpec
+else:
+ from typing_extensions import ParamSpec
+
+if sys.version_info >= (3, 11):
+ from asyncio import Runner
+ from typing import TypeVarTuple, Unpack
+else:
+ import contextvars
+ import enum
+ import signal
+ from asyncio import coroutines, events, exceptions, tasks
+
+ from exceptiongroup import BaseExceptionGroup
+ from typing_extensions import TypeVarTuple, Unpack
+
+ class _State(enum.Enum):
+ CREATED = "created"
+ INITIALIZED = "initialized"
+ CLOSED = "closed"
+
+ class Runner:
+ # Copied from CPython 3.11
+ def __init__(
+ self,
+ *,
+ debug: bool | None = None,
+ loop_factory: Callable[[], AbstractEventLoop] | None = None,
+ ):
+ self._state = _State.CREATED
+ self._debug = debug
+ self._loop_factory = loop_factory
+ self._loop: AbstractEventLoop | None = None
+ self._context = None
+ self._interrupt_count = 0
+ self._set_event_loop = False
+
+ def __enter__(self) -> Runner:
+ self._lazy_init()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException],
+ exc_val: BaseException,
+ exc_tb: TracebackType,
+ ) -> None:
+ self.close()
+
+ def close(self) -> None:
+ """Shutdown and close event loop."""
+ if self._state is not _State.INITIALIZED:
+ return
+ try:
+ loop = self._loop
+ _cancel_all_tasks(loop)
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ if hasattr(loop, "shutdown_default_executor"):
+ loop.run_until_complete(loop.shutdown_default_executor())
+ else:
+ loop.run_until_complete(_shutdown_default_executor(loop))
+ finally:
+ if self._set_event_loop:
+ events.set_event_loop(None)
+ loop.close()
+ self._loop = None
+ self._state = _State.CLOSED
+
+ def get_loop(self) -> AbstractEventLoop:
+ """Return embedded event loop."""
+ self._lazy_init()
+ return self._loop
+
+ def run(self, coro: Coroutine[T_Retval], *, context=None) -> T_Retval:
+ """Run a coroutine inside the embedded event loop."""
+ if not coroutines.iscoroutine(coro):
+ raise ValueError(f"a coroutine was expected, got {coro!r}")
+
+ if events._get_running_loop() is not None:
+ # fail fast with short traceback
+ raise RuntimeError(
+ "Runner.run() cannot be called from a running event loop"
+ )
+
+ self._lazy_init()
+
+ if context is None:
+ context = self._context
+ task = context.run(self._loop.create_task, coro)
+
+ if (
+ threading.current_thread() is threading.main_thread()
+ and signal.getsignal(signal.SIGINT) is signal.default_int_handler
+ ):
+ sigint_handler = partial(self._on_sigint, main_task=task)
+ try:
+ signal.signal(signal.SIGINT, sigint_handler)
+ except ValueError:
+ # `signal.signal` may throw if `threading.main_thread` does
+ # not support signals (e.g. embedded interpreter with signals
+ # not registered - see gh-91880)
+ sigint_handler = None
+ else:
+ sigint_handler = None
+
+ self._interrupt_count = 0
+ try:
+ return self._loop.run_until_complete(task)
+ except exceptions.CancelledError:
+ if self._interrupt_count > 0:
+ uncancel = getattr(task, "uncancel", None)
+ if uncancel is not None and uncancel() == 0:
+ raise KeyboardInterrupt()
+ raise # CancelledError
+ finally:
+ if (
+ sigint_handler is not None
+ and signal.getsignal(signal.SIGINT) is sigint_handler
+ ):
+ signal.signal(signal.SIGINT, signal.default_int_handler)
+
+ def _lazy_init(self) -> None:
+ if self._state is _State.CLOSED:
+ raise RuntimeError("Runner is closed")
+ if self._state is _State.INITIALIZED:
+ return
+ if self._loop_factory is None:
+ self._loop = events.new_event_loop()
+ if not self._set_event_loop:
+ # Call set_event_loop only once to avoid calling
+ # attach_loop multiple times on child watchers
+ events.set_event_loop(self._loop)
+ self._set_event_loop = True
+ else:
+ self._loop = self._loop_factory()
+ if self._debug is not None:
+ self._loop.set_debug(self._debug)
+ self._context = contextvars.copy_context()
+ self._state = _State.INITIALIZED
+
+ def _on_sigint(self, signum, frame, main_task: asyncio.Task) -> None:
+ self._interrupt_count += 1
+ if self._interrupt_count == 1 and not main_task.done():
+ main_task.cancel()
+ # wakeup loop if it is blocked by select() with long timeout
+ self._loop.call_soon_threadsafe(lambda: None)
+ return
+ raise KeyboardInterrupt()
+
+ def _cancel_all_tasks(loop: AbstractEventLoop) -> None:
+ to_cancel = tasks.all_tasks(loop)
+ if not to_cancel:
+ return
+
+ for task in to_cancel:
+ task.cancel()
+
+ loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
+
+ for task in to_cancel:
+ if task.cancelled():
+ continue
+ if task.exception() is not None:
+ loop.call_exception_handler(
+ {
+ "message": "unhandled exception during asyncio.run() shutdown",
+ "exception": task.exception(),
+ "task": task,
+ }
+ )
+
+ async def _shutdown_default_executor(loop: AbstractEventLoop) -> None:
+ """Schedule the shutdown of the default executor."""
+
+ def _do_shutdown(future: asyncio.futures.Future) -> None:
+ try:
+ loop._default_executor.shutdown(wait=True) # type: ignore[attr-defined]
+ loop.call_soon_threadsafe(future.set_result, None)
+ except Exception as ex:
+ loop.call_soon_threadsafe(future.set_exception, ex)
+
+ loop._executor_shutdown_called = True
+ if loop._default_executor is None:
+ return
+ future = loop.create_future()
+ thread = threading.Thread(target=_do_shutdown, args=(future,))
+ thread.start()
+ try:
+ await future
+ finally:
+ thread.join()
+
+
+T_Retval = TypeVar("T_Retval")
+T_contra = TypeVar("T_contra", contravariant=True)
+PosArgsT = TypeVarTuple("PosArgsT")
+P = ParamSpec("P")
+
+_root_task: RunVar[asyncio.Task | None] = RunVar("_root_task")
+
+
+def find_root_task() -> asyncio.Task:
+ root_task = _root_task.get(None)
+ if root_task is not None and not root_task.done():
+ return root_task
+
+ # Look for a task that has been started via run_until_complete()
+ for task in all_tasks():
+ if task._callbacks and not task.done():
+ callbacks = [cb for cb, context in task._callbacks]
+ for cb in callbacks:
+ if (
+ cb is _run_until_complete_cb
+ or getattr(cb, "__module__", None) == "uvloop.loop"
+ ):
+ _root_task.set(task)
+ return task
+
+ # Look up the topmost task in the AnyIO task tree, if possible
+ task = cast(asyncio.Task, current_task())
+ state = _task_states.get(task)
+ if state:
+ cancel_scope = state.cancel_scope
+ while cancel_scope and cancel_scope._parent_scope is not None:
+ cancel_scope = cancel_scope._parent_scope
+
+ if cancel_scope is not None:
+ return cast(asyncio.Task, cancel_scope._host_task)
+
+ return task
+
+
+def get_callable_name(func: Callable) -> str:
+ module = getattr(func, "__module__", None)
+ qualname = getattr(func, "__qualname__", None)
+ return ".".join([x for x in (module, qualname) if x])
+
+
+#
+# Event loop
+#
+
+_run_vars: WeakKeyDictionary[asyncio.AbstractEventLoop, Any] = WeakKeyDictionary()
+
+
+def _task_started(task: asyncio.Task) -> bool:
+ """Return ``True`` if the task has been started and has not finished."""
+ try:
+ return getcoroutinestate(task.get_coro()) in (CORO_RUNNING, CORO_SUSPENDED)
+ except AttributeError:
+ # task coro is async_genenerator_asend https://bugs.python.org/issue37771
+ raise Exception(f"Cannot determine if task {task} has started or not") from None
+
+
+#
+# Timeouts and cancellation
+#
+
+
+class CancelScope(BaseCancelScope):
+ def __new__(
+ cls, *, deadline: float = math.inf, shield: bool = False
+ ) -> CancelScope:
+ return object.__new__(cls)
+
+ def __init__(self, deadline: float = math.inf, shield: bool = False):
+ self._deadline = deadline
+ self._shield = shield
+ self._parent_scope: CancelScope | None = None
+ self._child_scopes: set[CancelScope] = set()
+ self._cancel_called = False
+ self._cancelled_caught = False
+ self._active = False
+ self._timeout_handle: asyncio.TimerHandle | None = None
+ self._cancel_handle: asyncio.Handle | None = None
+ self._tasks: set[asyncio.Task] = set()
+ self._host_task: asyncio.Task | None = None
+ self._cancel_calls: int = 0
+ self._cancelling: int | None = None
+
+ def __enter__(self) -> CancelScope:
+ if self._active:
+ raise RuntimeError(
+ "Each CancelScope may only be used for a single 'with' block"
+ )
+
+ self._host_task = host_task = cast(asyncio.Task, current_task())
+ self._tasks.add(host_task)
+ try:
+ task_state = _task_states[host_task]
+ except KeyError:
+ task_state = TaskState(None, self)
+ _task_states[host_task] = task_state
+ else:
+ self._parent_scope = task_state.cancel_scope
+ task_state.cancel_scope = self
+ if self._parent_scope is not None:
+ self._parent_scope._child_scopes.add(self)
+ self._parent_scope._tasks.remove(host_task)
+
+ self._timeout()
+ self._active = True
+ if sys.version_info >= (3, 11):
+ self._cancelling = self._host_task.cancelling()
+
+ # Start cancelling the host task if the scope was cancelled before entering
+ if self._cancel_called:
+ self._deliver_cancellation(self)
+
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ if not self._active:
+ raise RuntimeError("This cancel scope is not active")
+ if current_task() is not self._host_task:
+ raise RuntimeError(
+ "Attempted to exit cancel scope in a different task than it was "
+ "entered in"
+ )
+
+ assert self._host_task is not None
+ host_task_state = _task_states.get(self._host_task)
+ if host_task_state is None or host_task_state.cancel_scope is not self:
+ raise RuntimeError(
+ "Attempted to exit a cancel scope that isn't the current tasks's "
+ "current cancel scope"
+ )
+
+ self._active = False
+ if self._timeout_handle:
+ self._timeout_handle.cancel()
+ self._timeout_handle = None
+
+ self._tasks.remove(self._host_task)
+ if self._parent_scope is not None:
+ self._parent_scope._child_scopes.remove(self)
+ self._parent_scope._tasks.add(self._host_task)
+
+ host_task_state.cancel_scope = self._parent_scope
+
+ # Restart the cancellation effort in the closest directly cancelled parent
+ # scope if this one was shielded
+ self._restart_cancellation_in_parent()
+
+ if self._cancel_called and exc_val is not None:
+ for exc in iterate_exceptions(exc_val):
+ if isinstance(exc, CancelledError):
+ self._cancelled_caught = self._uncancel(exc)
+ if self._cancelled_caught:
+ break
+
+ return self._cancelled_caught
+
+ return None
+
+ def _uncancel(self, cancelled_exc: CancelledError) -> bool:
+ if sys.version_info < (3, 9) or self._host_task is None:
+ self._cancel_calls = 0
+ return True
+
+ # Undo all cancellations done by this scope
+ if self._cancelling is not None:
+ while self._cancel_calls:
+ self._cancel_calls -= 1
+ if self._host_task.uncancel() <= self._cancelling:
+ return True
+
+ self._cancel_calls = 0
+ return f"Cancelled by cancel scope {id(self):x}" in cancelled_exc.args
+
+ def _timeout(self) -> None:
+ if self._deadline != math.inf:
+ loop = get_running_loop()
+ if loop.time() >= self._deadline:
+ self.cancel()
+ else:
+ self._timeout_handle = loop.call_at(self._deadline, self._timeout)
+
+ def _deliver_cancellation(self, origin: CancelScope) -> bool:
+ """
+ Deliver cancellation to directly contained tasks and nested cancel scopes.
+
+ Schedule another run at the end if we still have tasks eligible for
+ cancellation.
+
+ :param origin: the cancel scope that originated the cancellation
+ :return: ``True`` if the delivery needs to be retried on the next cycle
+
+ """
+ should_retry = False
+ current = current_task()
+ for task in self._tasks:
+ if task._must_cancel: # type: ignore[attr-defined]
+ continue
+
+ # The task is eligible for cancellation if it has started
+ should_retry = True
+ if task is not current and (task is self._host_task or _task_started(task)):
+ waiter = task._fut_waiter # type: ignore[attr-defined]
+ if not isinstance(waiter, asyncio.Future) or not waiter.done():
+ self._cancel_calls += 1
+ if sys.version_info >= (3, 9):
+ task.cancel(f"Cancelled by cancel scope {id(origin):x}")
+ else:
+ task.cancel()
+
+ # Deliver cancellation to child scopes that aren't shielded or running their own
+ # cancellation callbacks
+ for scope in self._child_scopes:
+ if not scope._shield and not scope.cancel_called:
+ should_retry = scope._deliver_cancellation(origin) or should_retry
+
+ # Schedule another callback if there are still tasks left
+ if origin is self:
+ if should_retry:
+ self._cancel_handle = get_running_loop().call_soon(
+ self._deliver_cancellation, origin
+ )
+ else:
+ self._cancel_handle = None
+
+ return should_retry
+
+ def _restart_cancellation_in_parent(self) -> None:
+ """
+ Restart the cancellation effort in the closest directly cancelled parent scope.
+
+ """
+ scope = self._parent_scope
+ while scope is not None:
+ if scope._cancel_called:
+ if scope._cancel_handle is None:
+ scope._deliver_cancellation(scope)
+
+ break
+
+ # No point in looking beyond any shielded scope
+ if scope._shield:
+ break
+
+ scope = scope._parent_scope
+
+ def _parent_cancelled(self) -> bool:
+ # Check whether any parent has been cancelled
+ cancel_scope = self._parent_scope
+ while cancel_scope is not None and not cancel_scope._shield:
+ if cancel_scope._cancel_called:
+ return True
+ else:
+ cancel_scope = cancel_scope._parent_scope
+
+ return False
+
+ def cancel(self) -> None:
+ if not self._cancel_called:
+ if self._timeout_handle:
+ self._timeout_handle.cancel()
+ self._timeout_handle = None
+
+ self._cancel_called = True
+ if self._host_task is not None:
+ self._deliver_cancellation(self)
+
+ @property
+ def deadline(self) -> float:
+ return self._deadline
+
+ @deadline.setter
+ def deadline(self, value: float) -> None:
+ self._deadline = float(value)
+ if self._timeout_handle is not None:
+ self._timeout_handle.cancel()
+ self._timeout_handle = None
+
+ if self._active and not self._cancel_called:
+ self._timeout()
+
+ @property
+ def cancel_called(self) -> bool:
+ return self._cancel_called
+
+ @property
+ def cancelled_caught(self) -> bool:
+ return self._cancelled_caught
+
+ @property
+ def shield(self) -> bool:
+ return self._shield
+
+ @shield.setter
+ def shield(self, value: bool) -> None:
+ if self._shield != value:
+ self._shield = value
+ if not value:
+ self._restart_cancellation_in_parent()
+
+
+#
+# Task states
+#
+
+
+class TaskState:
+ """
+ Encapsulates auxiliary task information that cannot be added to the Task instance
+ itself because there are no guarantees about its implementation.
+ """
+
+ __slots__ = "parent_id", "cancel_scope"
+
+ def __init__(self, parent_id: int | None, cancel_scope: CancelScope | None):
+ self.parent_id = parent_id
+ self.cancel_scope = cancel_scope
+
+
+_task_states = WeakKeyDictionary() # type: WeakKeyDictionary[asyncio.Task, TaskState]
+
+
+#
+# Task groups
+#
+
+
+class _AsyncioTaskStatus(abc.TaskStatus):
+ def __init__(self, future: asyncio.Future, parent_id: int):
+ self._future = future
+ self._parent_id = parent_id
+
+ def started(self, value: T_contra | None = None) -> None:
+ try:
+ self._future.set_result(value)
+ except asyncio.InvalidStateError:
+ raise RuntimeError(
+ "called 'started' twice on the same task status"
+ ) from None
+
+ task = cast(asyncio.Task, current_task())
+ _task_states[task].parent_id = self._parent_id
+
+
+def iterate_exceptions(
+ exception: BaseException,
+) -> Generator[BaseException, None, None]:
+ if isinstance(exception, BaseExceptionGroup):
+ for exc in exception.exceptions:
+ yield from iterate_exceptions(exc)
+ else:
+ yield exception
+
+
+class TaskGroup(abc.TaskGroup):
+ def __init__(self) -> None:
+ self.cancel_scope: CancelScope = CancelScope()
+ self._active = False
+ self._exceptions: list[BaseException] = []
+ self._tasks: set[asyncio.Task] = set()
+
+ async def __aenter__(self) -> TaskGroup:
+ self.cancel_scope.__enter__()
+ self._active = True
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ ignore_exception = self.cancel_scope.__exit__(exc_type, exc_val, exc_tb)
+ if exc_val is not None:
+ self.cancel_scope.cancel()
+ if not isinstance(exc_val, CancelledError):
+ self._exceptions.append(exc_val)
+
+ cancelled_exc_while_waiting_tasks: CancelledError | None = None
+ while self._tasks:
+ try:
+ await asyncio.wait(self._tasks)
+ except CancelledError as exc:
+ # This task was cancelled natively; reraise the CancelledError later
+ # unless this task was already interrupted by another exception
+ self.cancel_scope.cancel()
+ if cancelled_exc_while_waiting_tasks is None:
+ cancelled_exc_while_waiting_tasks = exc
+
+ self._active = False
+ if self._exceptions:
+ raise BaseExceptionGroup(
+ "unhandled errors in a TaskGroup", self._exceptions
+ )
+
+ # Raise the CancelledError received while waiting for child tasks to exit,
+ # unless the context manager itself was previously exited with another
+ # exception, or if any of the child tasks raised an exception other than
+ # CancelledError
+ if cancelled_exc_while_waiting_tasks:
+ if exc_val is None or ignore_exception:
+ raise cancelled_exc_while_waiting_tasks
+
+ return ignore_exception
+
+ def _spawn(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
+ args: tuple[Unpack[PosArgsT]],
+ name: object,
+ task_status_future: asyncio.Future | None = None,
+ ) -> asyncio.Task:
+ def task_done(_task: asyncio.Task) -> None:
+ task_state = _task_states[_task]
+ assert task_state.cancel_scope is not None
+ assert _task in task_state.cancel_scope._tasks
+ task_state.cancel_scope._tasks.remove(_task)
+ self._tasks.remove(task)
+ del _task_states[_task]
+
+ try:
+ exc = _task.exception()
+ except CancelledError as e:
+ while isinstance(e.__context__, CancelledError):
+ e = e.__context__
+
+ exc = e
+
+ if exc is not None:
+ if task_status_future is None or task_status_future.done():
+ if not isinstance(exc, CancelledError):
+ self._exceptions.append(exc)
+
+ if not self.cancel_scope._parent_cancelled():
+ self.cancel_scope.cancel()
+ else:
+ task_status_future.set_exception(exc)
+ elif task_status_future is not None and not task_status_future.done():
+ task_status_future.set_exception(
+ RuntimeError("Child exited without calling task_status.started()")
+ )
+
+ if not self._active:
+ raise RuntimeError(
+ "This task group is not active; no new tasks can be started."
+ )
+
+ kwargs = {}
+ if task_status_future:
+ parent_id = id(current_task())
+ kwargs["task_status"] = _AsyncioTaskStatus(
+ task_status_future, id(self.cancel_scope._host_task)
+ )
+ else:
+ parent_id = id(self.cancel_scope._host_task)
+
+ coro = func(*args, **kwargs)
+ if not iscoroutine(coro):
+ prefix = f"{func.__module__}." if hasattr(func, "__module__") else ""
+ raise TypeError(
+ f"Expected {prefix}{func.__qualname__}() to return a coroutine, but "
+ f"the return value ({coro!r}) is not a coroutine object"
+ )
+
+ name = get_callable_name(func) if name is None else str(name)
+ task = create_task(coro, name=name)
+ task.add_done_callback(task_done)
+
+ # Make the spawned task inherit the task group's cancel scope
+ _task_states[task] = TaskState(
+ parent_id=parent_id, cancel_scope=self.cancel_scope
+ )
+ self.cancel_scope._tasks.add(task)
+ self._tasks.add(task)
+ return task
+
+ def start_soon(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
+ *args: Unpack[PosArgsT],
+ name: object = None,
+ ) -> None:
+ self._spawn(func, args, name)
+
+ async def start(
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
+ ) -> Any:
+ future: asyncio.Future = asyncio.Future()
+ task = self._spawn(func, args, name, future)
+
+ # If the task raises an exception after sending a start value without a switch
+ # point between, the task group is cancelled and this method never proceeds to
+ # process the completed future. That's why we have to have a shielded cancel
+ # scope here.
+ try:
+ return await future
+ except CancelledError:
+ # Cancel the task and wait for it to exit before returning
+ task.cancel()
+ with CancelScope(shield=True), suppress(CancelledError):
+ await task
+
+ raise
+
+
+#
+# Threads
+#
+
+_Retval_Queue_Type = Tuple[Optional[T_Retval], Optional[BaseException]]
+
+
+class WorkerThread(Thread):
+ MAX_IDLE_TIME = 10 # seconds
+
+ def __init__(
+ self,
+ root_task: asyncio.Task,
+ workers: set[WorkerThread],
+ idle_workers: deque[WorkerThread],
+ ):
+ super().__init__(name="AnyIO worker thread")
+ self.root_task = root_task
+ self.workers = workers
+ self.idle_workers = idle_workers
+ self.loop = root_task._loop
+ self.queue: Queue[
+ tuple[Context, Callable, tuple, asyncio.Future, CancelScope] | None
+ ] = Queue(2)
+ self.idle_since = AsyncIOBackend.current_time()
+ self.stopping = False
+
+ def _report_result(
+ self, future: asyncio.Future, result: Any, exc: BaseException | None
+ ) -> None:
+ self.idle_since = AsyncIOBackend.current_time()
+ if not self.stopping:
+ self.idle_workers.append(self)
+
+ if not future.cancelled():
+ if exc is not None:
+ if isinstance(exc, StopIteration):
+ new_exc = RuntimeError("coroutine raised StopIteration")
+ new_exc.__cause__ = exc
+ exc = new_exc
+
+ future.set_exception(exc)
+ else:
+ future.set_result(result)
+
+ def run(self) -> None:
+ with claim_worker_thread(AsyncIOBackend, self.loop):
+ while True:
+ item = self.queue.get()
+ if item is None:
+ # Shutdown command received
+ return
+
+ context, func, args, future, cancel_scope = item
+ if not future.cancelled():
+ result = None
+ exception: BaseException | None = None
+ threadlocals.current_cancel_scope = cancel_scope
+ try:
+ result = context.run(func, *args)
+ except BaseException as exc:
+ exception = exc
+ finally:
+ del threadlocals.current_cancel_scope
+
+ if not self.loop.is_closed():
+ self.loop.call_soon_threadsafe(
+ self._report_result, future, result, exception
+ )
+
+ self.queue.task_done()
+
+ def stop(self, f: asyncio.Task | None = None) -> None:
+ self.stopping = True
+ self.queue.put_nowait(None)
+ self.workers.discard(self)
+ try:
+ self.idle_workers.remove(self)
+ except ValueError:
+ pass
+
+
+_threadpool_idle_workers: RunVar[deque[WorkerThread]] = RunVar(
+ "_threadpool_idle_workers"
+)
+_threadpool_workers: RunVar[set[WorkerThread]] = RunVar("_threadpool_workers")
+
+
+class BlockingPortal(abc.BlockingPortal):
+ def __new__(cls) -> BlockingPortal:
+ return object.__new__(cls)
+
+ def __init__(self) -> None:
+ super().__init__()
+ self._loop = get_running_loop()
+
+ def _spawn_task_from_thread(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ name: object,
+ future: Future[T_Retval],
+ ) -> None:
+ AsyncIOBackend.run_sync_from_thread(
+ partial(self._task_group.start_soon, name=name),
+ (self._call_func, func, args, kwargs, future),
+ self._loop,
+ )
+
+
+#
+# Subprocesses
+#
+
+
+@dataclass(eq=False)
+class StreamReaderWrapper(abc.ByteReceiveStream):
+ _stream: asyncio.StreamReader
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ data = await self._stream.read(max_bytes)
+ if data:
+ return data
+ else:
+ raise EndOfStream
+
+ async def aclose(self) -> None:
+ self._stream.feed_eof()
+ await AsyncIOBackend.checkpoint()
+
+
+@dataclass(eq=False)
+class StreamWriterWrapper(abc.ByteSendStream):
+ _stream: asyncio.StreamWriter
+
+ async def send(self, item: bytes) -> None:
+ self._stream.write(item)
+ await self._stream.drain()
+
+ async def aclose(self) -> None:
+ self._stream.close()
+ await AsyncIOBackend.checkpoint()
+
+
+@dataclass(eq=False)
+class Process(abc.Process):
+ _process: asyncio.subprocess.Process
+ _stdin: StreamWriterWrapper | None
+ _stdout: StreamReaderWrapper | None
+ _stderr: StreamReaderWrapper | None
+
+ async def aclose(self) -> None:
+ with CancelScope(shield=True):
+ if self._stdin:
+ await self._stdin.aclose()
+ if self._stdout:
+ await self._stdout.aclose()
+ if self._stderr:
+ await self._stderr.aclose()
+
+ try:
+ await self.wait()
+ except BaseException:
+ self.kill()
+ with CancelScope(shield=True):
+ await self.wait()
+
+ raise
+
+ async def wait(self) -> int:
+ return await self._process.wait()
+
+ def terminate(self) -> None:
+ self._process.terminate()
+
+ def kill(self) -> None:
+ self._process.kill()
+
+ def send_signal(self, signal: int) -> None:
+ self._process.send_signal(signal)
+
+ @property
+ def pid(self) -> int:
+ return self._process.pid
+
+ @property
+ def returncode(self) -> int | None:
+ return self._process.returncode
+
+ @property
+ def stdin(self) -> abc.ByteSendStream | None:
+ return self._stdin
+
+ @property
+ def stdout(self) -> abc.ByteReceiveStream | None:
+ return self._stdout
+
+ @property
+ def stderr(self) -> abc.ByteReceiveStream | None:
+ return self._stderr
+
+
+def _forcibly_shutdown_process_pool_on_exit(
+ workers: set[Process], _task: object
+) -> None:
+ """
+ Forcibly shuts down worker processes belonging to this event loop."""
+ child_watcher: asyncio.AbstractChildWatcher | None = None
+ if sys.version_info < (3, 12):
+ try:
+ child_watcher = asyncio.get_event_loop_policy().get_child_watcher()
+ except NotImplementedError:
+ pass
+
+ # Close as much as possible (w/o async/await) to avoid warnings
+ for process in workers:
+ if process.returncode is None:
+ continue
+
+ process._stdin._stream._transport.close() # type: ignore[union-attr]
+ process._stdout._stream._transport.close() # type: ignore[union-attr]
+ process._stderr._stream._transport.close() # type: ignore[union-attr]
+ process.kill()
+ if child_watcher:
+ child_watcher.remove_child_handler(process.pid)
+
+
+async def _shutdown_process_pool_on_exit(workers: set[abc.Process]) -> None:
+ """
+ Shuts down worker processes belonging to this event loop.
+
+ NOTE: this only works when the event loop was started using asyncio.run() or
+ anyio.run().
+
+ """
+ process: abc.Process
+ try:
+ await sleep(math.inf)
+ except asyncio.CancelledError:
+ for process in workers:
+ if process.returncode is None:
+ process.kill()
+
+ for process in workers:
+ await process.aclose()
+
+
+#
+# Sockets and networking
+#
+
+
+class StreamProtocol(asyncio.Protocol):
+ read_queue: deque[bytes]
+ read_event: asyncio.Event
+ write_event: asyncio.Event
+ exception: Exception | None = None
+
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
+ self.read_queue = deque()
+ self.read_event = asyncio.Event()
+ self.write_event = asyncio.Event()
+ self.write_event.set()
+ cast(asyncio.Transport, transport).set_write_buffer_limits(0)
+
+ def connection_lost(self, exc: Exception | None) -> None:
+ if exc:
+ self.exception = BrokenResourceError()
+ self.exception.__cause__ = exc
+
+ self.read_event.set()
+ self.write_event.set()
+
+ def data_received(self, data: bytes) -> None:
+ self.read_queue.append(data)
+ self.read_event.set()
+
+ def eof_received(self) -> bool | None:
+ self.read_event.set()
+ return True
+
+ def pause_writing(self) -> None:
+ self.write_event = asyncio.Event()
+
+ def resume_writing(self) -> None:
+ self.write_event.set()
+
+
+class DatagramProtocol(asyncio.DatagramProtocol):
+ read_queue: deque[tuple[bytes, IPSockAddrType]]
+ read_event: asyncio.Event
+ write_event: asyncio.Event
+ exception: Exception | None = None
+
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
+ self.read_queue = deque(maxlen=100) # arbitrary value
+ self.read_event = asyncio.Event()
+ self.write_event = asyncio.Event()
+ self.write_event.set()
+
+ def connection_lost(self, exc: Exception | None) -> None:
+ self.read_event.set()
+ self.write_event.set()
+
+ def datagram_received(self, data: bytes, addr: IPSockAddrType) -> None:
+ addr = convert_ipv6_sockaddr(addr)
+ self.read_queue.append((data, addr))
+ self.read_event.set()
+
+ def error_received(self, exc: Exception) -> None:
+ self.exception = exc
+
+ def pause_writing(self) -> None:
+ self.write_event.clear()
+
+ def resume_writing(self) -> None:
+ self.write_event.set()
+
+
+class SocketStream(abc.SocketStream):
+ def __init__(self, transport: asyncio.Transport, protocol: StreamProtocol):
+ self._transport = transport
+ self._protocol = protocol
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+ self._closed = False
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self._transport.get_extra_info("socket")
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ with self._receive_guard:
+ await AsyncIOBackend.checkpoint()
+
+ if (
+ not self._protocol.read_event.is_set()
+ and not self._transport.is_closing()
+ ):
+ self._transport.resume_reading()
+ await self._protocol.read_event.wait()
+ self._transport.pause_reading()
+
+ try:
+ chunk = self._protocol.read_queue.popleft()
+ except IndexError:
+ if self._closed:
+ raise ClosedResourceError from None
+ elif self._protocol.exception:
+ raise self._protocol.exception from None
+ else:
+ raise EndOfStream from None
+
+ if len(chunk) > max_bytes:
+ # Split the oversized chunk
+ chunk, leftover = chunk[:max_bytes], chunk[max_bytes:]
+ self._protocol.read_queue.appendleft(leftover)
+
+ # If the read queue is empty, clear the flag so that the next call will
+ # block until data is available
+ if not self._protocol.read_queue:
+ self._protocol.read_event.clear()
+
+ return chunk
+
+ async def send(self, item: bytes) -> None:
+ with self._send_guard:
+ await AsyncIOBackend.checkpoint()
+
+ if self._closed:
+ raise ClosedResourceError
+ elif self._protocol.exception is not None:
+ raise self._protocol.exception
+
+ try:
+ self._transport.write(item)
+ except RuntimeError as exc:
+ if self._transport.is_closing():
+ raise BrokenResourceError from exc
+ else:
+ raise
+
+ await self._protocol.write_event.wait()
+
+ async def send_eof(self) -> None:
+ try:
+ self._transport.write_eof()
+ except OSError:
+ pass
+
+ async def aclose(self) -> None:
+ if not self._transport.is_closing():
+ self._closed = True
+ try:
+ self._transport.write_eof()
+ except OSError:
+ pass
+
+ self._transport.close()
+ await sleep(0)
+ self._transport.abort()
+
+
+class _RawSocketMixin:
+ _receive_future: asyncio.Future | None = None
+ _send_future: asyncio.Future | None = None
+ _closing = False
+
+ def __init__(self, raw_socket: socket.socket):
+ self.__raw_socket = raw_socket
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self.__raw_socket
+
+ def _wait_until_readable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
+ def callback(f: object) -> None:
+ del self._receive_future
+ loop.remove_reader(self.__raw_socket)
+
+ f = self._receive_future = asyncio.Future()
+ loop.add_reader(self.__raw_socket, f.set_result, None)
+ f.add_done_callback(callback)
+ return f
+
+ def _wait_until_writable(self, loop: asyncio.AbstractEventLoop) -> asyncio.Future:
+ def callback(f: object) -> None:
+ del self._send_future
+ loop.remove_writer(self.__raw_socket)
+
+ f = self._send_future = asyncio.Future()
+ loop.add_writer(self.__raw_socket, f.set_result, None)
+ f.add_done_callback(callback)
+ return f
+
+ async def aclose(self) -> None:
+ if not self._closing:
+ self._closing = True
+ if self.__raw_socket.fileno() != -1:
+ self.__raw_socket.close()
+
+ if self._receive_future:
+ self._receive_future.set_result(None)
+ if self._send_future:
+ self._send_future.set_result(None)
+
+
+class UNIXSocketStream(_RawSocketMixin, abc.UNIXSocketStream):
+ async def send_eof(self) -> None:
+ with self._send_guard:
+ self._raw_socket.shutdown(socket.SHUT_WR)
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ loop = get_running_loop()
+ await AsyncIOBackend.checkpoint()
+ with self._receive_guard:
+ while True:
+ try:
+ data = self._raw_socket.recv(max_bytes)
+ except BlockingIOError:
+ await self._wait_until_readable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ if not data:
+ raise EndOfStream
+
+ return data
+
+ async def send(self, item: bytes) -> None:
+ loop = get_running_loop()
+ await AsyncIOBackend.checkpoint()
+ with self._send_guard:
+ view = memoryview(item)
+ while view:
+ try:
+ bytes_sent = self._raw_socket.send(view)
+ except BlockingIOError:
+ await self._wait_until_writable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ view = view[bytes_sent:]
+
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
+ if not isinstance(msglen, int) or msglen < 0:
+ raise ValueError("msglen must be a non-negative integer")
+ if not isinstance(maxfds, int) or maxfds < 1:
+ raise ValueError("maxfds must be a positive integer")
+
+ loop = get_running_loop()
+ fds = array.array("i")
+ await AsyncIOBackend.checkpoint()
+ with self._receive_guard:
+ while True:
+ try:
+ message, ancdata, flags, addr = self._raw_socket.recvmsg(
+ msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
+ )
+ except BlockingIOError:
+ await self._wait_until_readable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ if not message and not ancdata:
+ raise EndOfStream
+
+ break
+
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
+ if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
+ raise RuntimeError(
+ f"Received unexpected ancillary data; message = {message!r}, "
+ f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
+ )
+
+ fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
+
+ return message, list(fds)
+
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
+ if not message:
+ raise ValueError("message must not be empty")
+ if not fds:
+ raise ValueError("fds must not be empty")
+
+ loop = get_running_loop()
+ filenos: list[int] = []
+ for fd in fds:
+ if isinstance(fd, int):
+ filenos.append(fd)
+ elif isinstance(fd, IOBase):
+ filenos.append(fd.fileno())
+
+ fdarray = array.array("i", filenos)
+ await AsyncIOBackend.checkpoint()
+ with self._send_guard:
+ while True:
+ try:
+ # The ignore can be removed after mypy picks up
+ # https://github.com/python/typeshed/pull/5545
+ self._raw_socket.sendmsg(
+ [message], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fdarray)]
+ )
+ break
+ except BlockingIOError:
+ await self._wait_until_writable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+
+
+class TCPSocketListener(abc.SocketListener):
+ _accept_scope: CancelScope | None = None
+ _closed = False
+
+ def __init__(self, raw_socket: socket.socket):
+ self.__raw_socket = raw_socket
+ self._loop = cast(asyncio.BaseEventLoop, get_running_loop())
+ self._accept_guard = ResourceGuard("accepting connections from")
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self.__raw_socket
+
+ async def accept(self) -> abc.SocketStream:
+ if self._closed:
+ raise ClosedResourceError
+
+ with self._accept_guard:
+ await AsyncIOBackend.checkpoint()
+ with CancelScope() as self._accept_scope:
+ try:
+ client_sock, _addr = await self._loop.sock_accept(self._raw_socket)
+ except asyncio.CancelledError:
+ # Workaround for https://bugs.python.org/issue41317
+ try:
+ self._loop.remove_reader(self._raw_socket)
+ except (ValueError, NotImplementedError):
+ pass
+
+ if self._closed:
+ raise ClosedResourceError from None
+
+ raise
+ finally:
+ self._accept_scope = None
+
+ client_sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ transport, protocol = await self._loop.connect_accepted_socket(
+ StreamProtocol, client_sock
+ )
+ return SocketStream(transport, protocol)
+
+ async def aclose(self) -> None:
+ if self._closed:
+ return
+
+ self._closed = True
+ if self._accept_scope:
+ # Workaround for https://bugs.python.org/issue41317
+ try:
+ self._loop.remove_reader(self._raw_socket)
+ except (ValueError, NotImplementedError):
+ pass
+
+ self._accept_scope.cancel()
+ await sleep(0)
+
+ self._raw_socket.close()
+
+
+class UNIXSocketListener(abc.SocketListener):
+ def __init__(self, raw_socket: socket.socket):
+ self.__raw_socket = raw_socket
+ self._loop = get_running_loop()
+ self._accept_guard = ResourceGuard("accepting connections from")
+ self._closed = False
+
+ async def accept(self) -> abc.SocketStream:
+ await AsyncIOBackend.checkpoint()
+ with self._accept_guard:
+ while True:
+ try:
+ client_sock, _ = self.__raw_socket.accept()
+ client_sock.setblocking(False)
+ return UNIXSocketStream(client_sock)
+ except BlockingIOError:
+ f: asyncio.Future = asyncio.Future()
+ self._loop.add_reader(self.__raw_socket, f.set_result, None)
+ f.add_done_callback(
+ lambda _: self._loop.remove_reader(self.__raw_socket)
+ )
+ await f
+ except OSError as exc:
+ if self._closed:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+
+ async def aclose(self) -> None:
+ self._closed = True
+ self.__raw_socket.close()
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self.__raw_socket
+
+
+class UDPSocket(abc.UDPSocket):
+ def __init__(
+ self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
+ ):
+ self._transport = transport
+ self._protocol = protocol
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+ self._closed = False
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self._transport.get_extra_info("socket")
+
+ async def aclose(self) -> None:
+ if not self._transport.is_closing():
+ self._closed = True
+ self._transport.close()
+
+ async def receive(self) -> tuple[bytes, IPSockAddrType]:
+ with self._receive_guard:
+ await AsyncIOBackend.checkpoint()
+
+ # If the buffer is empty, ask for more data
+ if not self._protocol.read_queue and not self._transport.is_closing():
+ self._protocol.read_event.clear()
+ await self._protocol.read_event.wait()
+
+ try:
+ return self._protocol.read_queue.popleft()
+ except IndexError:
+ if self._closed:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from None
+
+ async def send(self, item: UDPPacketType) -> None:
+ with self._send_guard:
+ await AsyncIOBackend.checkpoint()
+ await self._protocol.write_event.wait()
+ if self._closed:
+ raise ClosedResourceError
+ elif self._transport.is_closing():
+ raise BrokenResourceError
+ else:
+ self._transport.sendto(*item)
+
+
+class ConnectedUDPSocket(abc.ConnectedUDPSocket):
+ def __init__(
+ self, transport: asyncio.DatagramTransport, protocol: DatagramProtocol
+ ):
+ self._transport = transport
+ self._protocol = protocol
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+ self._closed = False
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self._transport.get_extra_info("socket")
+
+ async def aclose(self) -> None:
+ if not self._transport.is_closing():
+ self._closed = True
+ self._transport.close()
+
+ async def receive(self) -> bytes:
+ with self._receive_guard:
+ await AsyncIOBackend.checkpoint()
+
+ # If the buffer is empty, ask for more data
+ if not self._protocol.read_queue and not self._transport.is_closing():
+ self._protocol.read_event.clear()
+ await self._protocol.read_event.wait()
+
+ try:
+ packet = self._protocol.read_queue.popleft()
+ except IndexError:
+ if self._closed:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from None
+
+ return packet[0]
+
+ async def send(self, item: bytes) -> None:
+ with self._send_guard:
+ await AsyncIOBackend.checkpoint()
+ await self._protocol.write_event.wait()
+ if self._closed:
+ raise ClosedResourceError
+ elif self._transport.is_closing():
+ raise BrokenResourceError
+ else:
+ self._transport.sendto(item)
+
+
+class UNIXDatagramSocket(_RawSocketMixin, abc.UNIXDatagramSocket):
+ async def receive(self) -> UNIXDatagramPacketType:
+ loop = get_running_loop()
+ await AsyncIOBackend.checkpoint()
+ with self._receive_guard:
+ while True:
+ try:
+ data = self._raw_socket.recvfrom(65536)
+ except BlockingIOError:
+ await self._wait_until_readable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ return data
+
+ async def send(self, item: UNIXDatagramPacketType) -> None:
+ loop = get_running_loop()
+ await AsyncIOBackend.checkpoint()
+ with self._send_guard:
+ while True:
+ try:
+ self._raw_socket.sendto(*item)
+ except BlockingIOError:
+ await self._wait_until_writable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ return
+
+
+class ConnectedUNIXDatagramSocket(_RawSocketMixin, abc.ConnectedUNIXDatagramSocket):
+ async def receive(self) -> bytes:
+ loop = get_running_loop()
+ await AsyncIOBackend.checkpoint()
+ with self._receive_guard:
+ while True:
+ try:
+ data = self._raw_socket.recv(65536)
+ except BlockingIOError:
+ await self._wait_until_readable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ return data
+
+ async def send(self, item: bytes) -> None:
+ loop = get_running_loop()
+ await AsyncIOBackend.checkpoint()
+ with self._send_guard:
+ while True:
+ try:
+ self._raw_socket.send(item)
+ except BlockingIOError:
+ await self._wait_until_writable(loop)
+ except OSError as exc:
+ if self._closing:
+ raise ClosedResourceError from None
+ else:
+ raise BrokenResourceError from exc
+ else:
+ return
+
+
+_read_events: RunVar[dict[Any, asyncio.Event]] = RunVar("read_events")
+_write_events: RunVar[dict[Any, asyncio.Event]] = RunVar("write_events")
+
+
+#
+# Synchronization
+#
+
+
+class Event(BaseEvent):
+ def __new__(cls) -> Event:
+ return object.__new__(cls)
+
+ def __init__(self) -> None:
+ self._event = asyncio.Event()
+
+ def set(self) -> None:
+ self._event.set()
+
+ def is_set(self) -> bool:
+ return self._event.is_set()
+
+ async def wait(self) -> None:
+ if self.is_set():
+ await AsyncIOBackend.checkpoint()
+ else:
+ await self._event.wait()
+
+ def statistics(self) -> EventStatistics:
+ return EventStatistics(len(self._event._waiters)) # type: ignore[attr-defined]
+
+
+class CapacityLimiter(BaseCapacityLimiter):
+ _total_tokens: float = 0
+
+ def __new__(cls, total_tokens: float) -> CapacityLimiter:
+ return object.__new__(cls)
+
+ def __init__(self, total_tokens: float):
+ self._borrowers: set[Any] = set()
+ self._wait_queue: OrderedDict[Any, asyncio.Event] = OrderedDict()
+ self.total_tokens = total_tokens
+
+ async def __aenter__(self) -> None:
+ await self.acquire()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.release()
+
+ @property
+ def total_tokens(self) -> float:
+ return self._total_tokens
+
+ @total_tokens.setter
+ def total_tokens(self, value: float) -> None:
+ if not isinstance(value, int) and not math.isinf(value):
+ raise TypeError("total_tokens must be an int or math.inf")
+ if value < 1:
+ raise ValueError("total_tokens must be >= 1")
+
+ waiters_to_notify = max(value - self._total_tokens, 0)
+ self._total_tokens = value
+
+ # Notify waiting tasks that they have acquired the limiter
+ while self._wait_queue and waiters_to_notify:
+ event = self._wait_queue.popitem(last=False)[1]
+ event.set()
+ waiters_to_notify -= 1
+
+ @property
+ def borrowed_tokens(self) -> int:
+ return len(self._borrowers)
+
+ @property
+ def available_tokens(self) -> float:
+ return self._total_tokens - len(self._borrowers)
+
+ def acquire_nowait(self) -> None:
+ self.acquire_on_behalf_of_nowait(current_task())
+
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
+ if borrower in self._borrowers:
+ raise RuntimeError(
+ "this borrower is already holding one of this CapacityLimiter's "
+ "tokens"
+ )
+
+ if self._wait_queue or len(self._borrowers) >= self._total_tokens:
+ raise WouldBlock
+
+ self._borrowers.add(borrower)
+
+ async def acquire(self) -> None:
+ return await self.acquire_on_behalf_of(current_task())
+
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
+ await AsyncIOBackend.checkpoint_if_cancelled()
+ try:
+ self.acquire_on_behalf_of_nowait(borrower)
+ except WouldBlock:
+ event = asyncio.Event()
+ self._wait_queue[borrower] = event
+ try:
+ await event.wait()
+ except BaseException:
+ self._wait_queue.pop(borrower, None)
+ raise
+
+ self._borrowers.add(borrower)
+ else:
+ try:
+ await AsyncIOBackend.cancel_shielded_checkpoint()
+ except BaseException:
+ self.release()
+ raise
+
+ def release(self) -> None:
+ self.release_on_behalf_of(current_task())
+
+ def release_on_behalf_of(self, borrower: object) -> None:
+ try:
+ self._borrowers.remove(borrower)
+ except KeyError:
+ raise RuntimeError(
+ "this borrower isn't holding any of this CapacityLimiter's " "tokens"
+ ) from None
+
+ # Notify the next task in line if this limiter has free capacity now
+ if self._wait_queue and len(self._borrowers) < self._total_tokens:
+ event = self._wait_queue.popitem(last=False)[1]
+ event.set()
+
+ def statistics(self) -> CapacityLimiterStatistics:
+ return CapacityLimiterStatistics(
+ self.borrowed_tokens,
+ self.total_tokens,
+ tuple(self._borrowers),
+ len(self._wait_queue),
+ )
+
+
+_default_thread_limiter: RunVar[CapacityLimiter] = RunVar("_default_thread_limiter")
+
+
+#
+# Operating system signals
+#
+
+
+class _SignalReceiver:
+ def __init__(self, signals: tuple[Signals, ...]):
+ self._signals = signals
+ self._loop = get_running_loop()
+ self._signal_queue: deque[Signals] = deque()
+ self._future: asyncio.Future = asyncio.Future()
+ self._handled_signals: set[Signals] = set()
+
+ def _deliver(self, signum: Signals) -> None:
+ self._signal_queue.append(signum)
+ if not self._future.done():
+ self._future.set_result(None)
+
+ def __enter__(self) -> _SignalReceiver:
+ for sig in set(self._signals):
+ self._loop.add_signal_handler(sig, self._deliver, sig)
+ self._handled_signals.add(sig)
+
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ for sig in self._handled_signals:
+ self._loop.remove_signal_handler(sig)
+ return None
+
+ def __aiter__(self) -> _SignalReceiver:
+ return self
+
+ async def __anext__(self) -> Signals:
+ await AsyncIOBackend.checkpoint()
+ if not self._signal_queue:
+ self._future = asyncio.Future()
+ await self._future
+
+ return self._signal_queue.popleft()
+
+
+#
+# Testing and debugging
+#
+
+
+def _create_task_info(task: asyncio.Task) -> TaskInfo:
+ task_state = _task_states.get(task)
+ if task_state is None:
+ parent_id = None
+ else:
+ parent_id = task_state.parent_id
+
+ return TaskInfo(id(task), parent_id, task.get_name(), task.get_coro())
+
+
+class TestRunner(abc.TestRunner):
+ _send_stream: MemoryObjectSendStream[tuple[Awaitable[Any], asyncio.Future[Any]]]
+
+ def __init__(
+ self,
+ *,
+ debug: bool | None = None,
+ use_uvloop: bool = False,
+ loop_factory: Callable[[], AbstractEventLoop] | None = None,
+ ) -> None:
+ if use_uvloop and loop_factory is None:
+ import uvloop
+
+ loop_factory = uvloop.new_event_loop
+
+ self._runner = Runner(debug=debug, loop_factory=loop_factory)
+ self._exceptions: list[BaseException] = []
+ self._runner_task: asyncio.Task | None = None
+
+ def __enter__(self) -> TestRunner:
+ self._runner.__enter__()
+ self.get_loop().set_exception_handler(self._exception_handler)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self._runner.__exit__(exc_type, exc_val, exc_tb)
+
+ def get_loop(self) -> AbstractEventLoop:
+ return self._runner.get_loop()
+
+ def _exception_handler(
+ self, loop: asyncio.AbstractEventLoop, context: dict[str, Any]
+ ) -> None:
+ if isinstance(context.get("exception"), Exception):
+ self._exceptions.append(context["exception"])
+ else:
+ loop.default_exception_handler(context)
+
+ def _raise_async_exceptions(self) -> None:
+ # Re-raise any exceptions raised in asynchronous callbacks
+ if self._exceptions:
+ exceptions, self._exceptions = self._exceptions, []
+ if len(exceptions) == 1:
+ raise exceptions[0]
+ elif exceptions:
+ raise BaseExceptionGroup(
+ "Multiple exceptions occurred in asynchronous callbacks", exceptions
+ )
+
+ @staticmethod
+ async def _run_tests_and_fixtures(
+ receive_stream: MemoryObjectReceiveStream[
+ tuple[Awaitable[T_Retval], asyncio.Future[T_Retval]]
+ ],
+ ) -> None:
+ with receive_stream:
+ async for coro, future in receive_stream:
+ try:
+ retval = await coro
+ except BaseException as exc:
+ if not future.cancelled():
+ future.set_exception(exc)
+ else:
+ if not future.cancelled():
+ future.set_result(retval)
+
+ async def _call_in_runner_task(
+ self,
+ func: Callable[P, Awaitable[T_Retval]],
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T_Retval:
+ if not self._runner_task:
+ self._send_stream, receive_stream = create_memory_object_stream[
+ Tuple[Awaitable[Any], asyncio.Future]
+ ](1)
+ self._runner_task = self.get_loop().create_task(
+ self._run_tests_and_fixtures(receive_stream)
+ )
+
+ coro = func(*args, **kwargs)
+ future: asyncio.Future[T_Retval] = self.get_loop().create_future()
+ self._send_stream.send_nowait((coro, future))
+ return await future
+
+ def run_asyncgen_fixture(
+ self,
+ fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
+ kwargs: dict[str, Any],
+ ) -> Iterable[T_Retval]:
+ asyncgen = fixture_func(**kwargs)
+ fixturevalue: T_Retval = self.get_loop().run_until_complete(
+ self._call_in_runner_task(asyncgen.asend, None)
+ )
+ self._raise_async_exceptions()
+
+ yield fixturevalue
+
+ try:
+ self.get_loop().run_until_complete(
+ self._call_in_runner_task(asyncgen.asend, None)
+ )
+ except StopAsyncIteration:
+ self._raise_async_exceptions()
+ else:
+ self.get_loop().run_until_complete(asyncgen.aclose())
+ raise RuntimeError("Async generator fixture did not stop")
+
+ def run_fixture(
+ self,
+ fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
+ kwargs: dict[str, Any],
+ ) -> T_Retval:
+ retval = self.get_loop().run_until_complete(
+ self._call_in_runner_task(fixture_func, **kwargs)
+ )
+ self._raise_async_exceptions()
+ return retval
+
+ def run_test(
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
+ ) -> None:
+ try:
+ self.get_loop().run_until_complete(
+ self._call_in_runner_task(test_func, **kwargs)
+ )
+ except Exception as exc:
+ self._exceptions.append(exc)
+
+ self._raise_async_exceptions()
+
+
+class AsyncIOBackend(AsyncBackend):
+ @classmethod
+ def run(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ options: dict[str, Any],
+ ) -> T_Retval:
+ @wraps(func)
+ async def wrapper() -> T_Retval:
+ task = cast(asyncio.Task, current_task())
+ task.set_name(get_callable_name(func))
+ _task_states[task] = TaskState(None, None)
+
+ try:
+ return await func(*args)
+ finally:
+ del _task_states[task]
+
+ debug = options.get("debug", False)
+ loop_factory = options.get("loop_factory", None)
+ if loop_factory is None and options.get("use_uvloop", False):
+ import uvloop
+
+ loop_factory = uvloop.new_event_loop
+
+ with Runner(debug=debug, loop_factory=loop_factory) as runner:
+ return runner.run(wrapper())
+
+ @classmethod
+ def current_token(cls) -> object:
+ return get_running_loop()
+
+ @classmethod
+ def current_time(cls) -> float:
+ return get_running_loop().time()
+
+ @classmethod
+ def cancelled_exception_class(cls) -> type[BaseException]:
+ return CancelledError
+
+ @classmethod
+ async def checkpoint(cls) -> None:
+ await sleep(0)
+
+ @classmethod
+ async def checkpoint_if_cancelled(cls) -> None:
+ task = current_task()
+ if task is None:
+ return
+
+ try:
+ cancel_scope = _task_states[task].cancel_scope
+ except KeyError:
+ return
+
+ while cancel_scope:
+ if cancel_scope.cancel_called:
+ await sleep(0)
+ elif cancel_scope.shield:
+ break
+ else:
+ cancel_scope = cancel_scope._parent_scope
+
+ @classmethod
+ async def cancel_shielded_checkpoint(cls) -> None:
+ with CancelScope(shield=True):
+ await sleep(0)
+
+ @classmethod
+ async def sleep(cls, delay: float) -> None:
+ await sleep(delay)
+
+ @classmethod
+ def create_cancel_scope(
+ cls, *, deadline: float = math.inf, shield: bool = False
+ ) -> CancelScope:
+ return CancelScope(deadline=deadline, shield=shield)
+
+ @classmethod
+ def current_effective_deadline(cls) -> float:
+ try:
+ cancel_scope = _task_states[
+ current_task() # type: ignore[index]
+ ].cancel_scope
+ except KeyError:
+ return math.inf
+
+ deadline = math.inf
+ while cancel_scope:
+ deadline = min(deadline, cancel_scope.deadline)
+ if cancel_scope._cancel_called:
+ deadline = -math.inf
+ break
+ elif cancel_scope.shield:
+ break
+ else:
+ cancel_scope = cancel_scope._parent_scope
+
+ return deadline
+
+ @classmethod
+ def create_task_group(cls) -> abc.TaskGroup:
+ return TaskGroup()
+
+ @classmethod
+ def create_event(cls) -> abc.Event:
+ return Event()
+
+ @classmethod
+ def create_capacity_limiter(cls, total_tokens: float) -> abc.CapacityLimiter:
+ return CapacityLimiter(total_tokens)
+
+ @classmethod
+ async def run_sync_in_worker_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ abandon_on_cancel: bool = False,
+ limiter: abc.CapacityLimiter | None = None,
+ ) -> T_Retval:
+ await cls.checkpoint()
+
+ # If this is the first run in this event loop thread, set up the necessary
+ # variables
+ try:
+ idle_workers = _threadpool_idle_workers.get()
+ workers = _threadpool_workers.get()
+ except LookupError:
+ idle_workers = deque()
+ workers = set()
+ _threadpool_idle_workers.set(idle_workers)
+ _threadpool_workers.set(workers)
+
+ async with limiter or cls.current_default_thread_limiter():
+ with CancelScope(shield=not abandon_on_cancel) as scope:
+ future: asyncio.Future = asyncio.Future()
+ root_task = find_root_task()
+ if not idle_workers:
+ worker = WorkerThread(root_task, workers, idle_workers)
+ worker.start()
+ workers.add(worker)
+ root_task.add_done_callback(worker.stop)
+ else:
+ worker = idle_workers.pop()
+
+ # Prune any other workers that have been idle for MAX_IDLE_TIME
+ # seconds or longer
+ now = cls.current_time()
+ while idle_workers:
+ if (
+ now - idle_workers[0].idle_since
+ < WorkerThread.MAX_IDLE_TIME
+ ):
+ break
+
+ expired_worker = idle_workers.popleft()
+ expired_worker.root_task.remove_done_callback(
+ expired_worker.stop
+ )
+ expired_worker.stop()
+
+ context = copy_context()
+ context.run(sniffio.current_async_library_cvar.set, None)
+ if abandon_on_cancel or scope._parent_scope is None:
+ worker_scope = scope
+ else:
+ worker_scope = scope._parent_scope
+
+ worker.queue.put_nowait((context, func, args, future, worker_scope))
+ return await future
+
+ @classmethod
+ def check_cancelled(cls) -> None:
+ scope: CancelScope | None = threadlocals.current_cancel_scope
+ while scope is not None:
+ if scope.cancel_called:
+ raise CancelledError(f"Cancelled by cancel scope {id(scope):x}")
+
+ if scope.shield:
+ return
+
+ scope = scope._parent_scope
+
+ @classmethod
+ def run_async_from_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ args: tuple[Unpack[PosArgsT]],
+ token: object,
+ ) -> T_Retval:
+ async def task_wrapper(scope: CancelScope) -> T_Retval:
+ __tracebackhide__ = True
+ task = cast(asyncio.Task, current_task())
+ _task_states[task] = TaskState(None, scope)
+ scope._tasks.add(task)
+ try:
+ return await func(*args)
+ except CancelledError as exc:
+ raise concurrent.futures.CancelledError(str(exc)) from None
+ finally:
+ scope._tasks.discard(task)
+
+ loop = cast(AbstractEventLoop, token)
+ context = copy_context()
+ context.run(sniffio.current_async_library_cvar.set, "asyncio")
+ wrapper = task_wrapper(threadlocals.current_cancel_scope)
+ f: concurrent.futures.Future[T_Retval] = context.run(
+ asyncio.run_coroutine_threadsafe, wrapper, loop
+ )
+ return f.result()
+
+ @classmethod
+ def run_sync_from_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ token: object,
+ ) -> T_Retval:
+ @wraps(func)
+ def wrapper() -> None:
+ try:
+ sniffio.current_async_library_cvar.set("asyncio")
+ f.set_result(func(*args))
+ except BaseException as exc:
+ f.set_exception(exc)
+ if not isinstance(exc, Exception):
+ raise
+
+ f: concurrent.futures.Future[T_Retval] = Future()
+ loop = cast(AbstractEventLoop, token)
+ loop.call_soon_threadsafe(wrapper)
+ return f.result()
+
+ @classmethod
+ def create_blocking_portal(cls) -> abc.BlockingPortal:
+ return BlockingPortal()
+
+ @classmethod
+ async def open_process(
+ cls,
+ command: str | bytes | Sequence[str | bytes],
+ *,
+ shell: bool,
+ stdin: int | IO[Any] | None,
+ stdout: int | IO[Any] | None,
+ stderr: int | IO[Any] | None,
+ cwd: str | bytes | PathLike | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+ ) -> Process:
+ await cls.checkpoint()
+ if shell:
+ process = await asyncio.create_subprocess_shell(
+ cast("str | bytes", command),
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ cwd=cwd,
+ env=env,
+ start_new_session=start_new_session,
+ )
+ else:
+ process = await asyncio.create_subprocess_exec(
+ *command,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ cwd=cwd,
+ env=env,
+ start_new_session=start_new_session,
+ )
+
+ stdin_stream = StreamWriterWrapper(process.stdin) if process.stdin else None
+ stdout_stream = StreamReaderWrapper(process.stdout) if process.stdout else None
+ stderr_stream = StreamReaderWrapper(process.stderr) if process.stderr else None
+ return Process(process, stdin_stream, stdout_stream, stderr_stream)
+
+ @classmethod
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
+ create_task(
+ _shutdown_process_pool_on_exit(workers),
+ name="AnyIO process pool shutdown task",
+ )
+ find_root_task().add_done_callback(
+ partial(_forcibly_shutdown_process_pool_on_exit, workers)
+ )
+
+ @classmethod
+ async def connect_tcp(
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
+ ) -> abc.SocketStream:
+ transport, protocol = cast(
+ Tuple[asyncio.Transport, StreamProtocol],
+ await get_running_loop().create_connection(
+ StreamProtocol, host, port, local_addr=local_address
+ ),
+ )
+ transport.pause_reading()
+ return SocketStream(transport, protocol)
+
+ @classmethod
+ async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
+ await cls.checkpoint()
+ loop = get_running_loop()
+ raw_socket = socket.socket(socket.AF_UNIX)
+ raw_socket.setblocking(False)
+ while True:
+ try:
+ raw_socket.connect(path)
+ except BlockingIOError:
+ f: asyncio.Future = asyncio.Future()
+ loop.add_writer(raw_socket, f.set_result, None)
+ f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
+ await f
+ except BaseException:
+ raw_socket.close()
+ raise
+ else:
+ return UNIXSocketStream(raw_socket)
+
+ @classmethod
+ def create_tcp_listener(cls, sock: socket.socket) -> SocketListener:
+ return TCPSocketListener(sock)
+
+ @classmethod
+ def create_unix_listener(cls, sock: socket.socket) -> SocketListener:
+ return UNIXSocketListener(sock)
+
+ @classmethod
+ async def create_udp_socket(
+ cls,
+ family: AddressFamily,
+ local_address: IPSockAddrType | None,
+ remote_address: IPSockAddrType | None,
+ reuse_port: bool,
+ ) -> UDPSocket | ConnectedUDPSocket:
+ transport, protocol = await get_running_loop().create_datagram_endpoint(
+ DatagramProtocol,
+ local_addr=local_address,
+ remote_addr=remote_address,
+ family=family,
+ reuse_port=reuse_port,
+ )
+ if protocol.exception:
+ transport.close()
+ raise protocol.exception
+
+ if not remote_address:
+ return UDPSocket(transport, protocol)
+ else:
+ return ConnectedUDPSocket(transport, protocol)
+
+ @classmethod
+ async def create_unix_datagram_socket( # type: ignore[override]
+ cls, raw_socket: socket.socket, remote_path: str | bytes | None
+ ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
+ await cls.checkpoint()
+ loop = get_running_loop()
+
+ if remote_path:
+ while True:
+ try:
+ raw_socket.connect(remote_path)
+ except BlockingIOError:
+ f: asyncio.Future = asyncio.Future()
+ loop.add_writer(raw_socket, f.set_result, None)
+ f.add_done_callback(lambda _: loop.remove_writer(raw_socket))
+ await f
+ except BaseException:
+ raw_socket.close()
+ raise
+ else:
+ return ConnectedUNIXDatagramSocket(raw_socket)
+ else:
+ return UNIXDatagramSocket(raw_socket)
+
+ @classmethod
+ async def getaddrinfo(
+ cls,
+ host: bytes | str | None,
+ port: str | int | None,
+ *,
+ family: int | AddressFamily = 0,
+ type: int | SocketKind = 0,
+ proto: int = 0,
+ flags: int = 0,
+ ) -> list[
+ tuple[
+ AddressFamily,
+ SocketKind,
+ int,
+ str,
+ tuple[str, int] | tuple[str, int, int, int],
+ ]
+ ]:
+ return await get_running_loop().getaddrinfo(
+ host, port, family=family, type=type, proto=proto, flags=flags
+ )
+
+ @classmethod
+ async def getnameinfo(
+ cls, sockaddr: IPSockAddrType, flags: int = 0
+ ) -> tuple[str, str]:
+ return await get_running_loop().getnameinfo(sockaddr, flags)
+
+ @classmethod
+ async def wait_socket_readable(cls, sock: socket.socket) -> None:
+ await cls.checkpoint()
+ try:
+ read_events = _read_events.get()
+ except LookupError:
+ read_events = {}
+ _read_events.set(read_events)
+
+ if read_events.get(sock):
+ raise BusyResourceError("reading from") from None
+
+ loop = get_running_loop()
+ event = read_events[sock] = asyncio.Event()
+ loop.add_reader(sock, event.set)
+ try:
+ await event.wait()
+ finally:
+ if read_events.pop(sock, None) is not None:
+ loop.remove_reader(sock)
+ readable = True
+ else:
+ readable = False
+
+ if not readable:
+ raise ClosedResourceError
+
+ @classmethod
+ async def wait_socket_writable(cls, sock: socket.socket) -> None:
+ await cls.checkpoint()
+ try:
+ write_events = _write_events.get()
+ except LookupError:
+ write_events = {}
+ _write_events.set(write_events)
+
+ if write_events.get(sock):
+ raise BusyResourceError("writing to") from None
+
+ loop = get_running_loop()
+ event = write_events[sock] = asyncio.Event()
+ loop.add_writer(sock.fileno(), event.set)
+ try:
+ await event.wait()
+ finally:
+ if write_events.pop(sock, None) is not None:
+ loop.remove_writer(sock)
+ writable = True
+ else:
+ writable = False
+
+ if not writable:
+ raise ClosedResourceError
+
+ @classmethod
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
+ try:
+ return _default_thread_limiter.get()
+ except LookupError:
+ limiter = CapacityLimiter(40)
+ _default_thread_limiter.set(limiter)
+ return limiter
+
+ @classmethod
+ def open_signal_receiver(
+ cls, *signals: Signals
+ ) -> ContextManager[AsyncIterator[Signals]]:
+ return _SignalReceiver(signals)
+
+ @classmethod
+ def get_current_task(cls) -> TaskInfo:
+ return _create_task_info(current_task()) # type: ignore[arg-type]
+
+ @classmethod
+ def get_running_tasks(cls) -> list[TaskInfo]:
+ return [_create_task_info(task) for task in all_tasks() if not task.done()]
+
+ @classmethod
+ async def wait_all_tasks_blocked(cls) -> None:
+ await cls.checkpoint()
+ this_task = current_task()
+ while True:
+ for task in all_tasks():
+ if task is this_task:
+ continue
+
+ waiter = task._fut_waiter # type: ignore[attr-defined]
+ if waiter is None or waiter.done():
+ await sleep(0.1)
+ break
+ else:
+ return
+
+ @classmethod
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
+ return TestRunner(**options)
+
+
+backend_class = AsyncIOBackend
diff --git a/venv/lib/python3.11/site-packages/anyio/_backends/_trio.py b/venv/lib/python3.11/site-packages/anyio/_backends/_trio.py
new file mode 100644
index 0000000..1a47192
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_backends/_trio.py
@@ -0,0 +1,1169 @@
+from __future__ import annotations
+
+import array
+import math
+import socket
+import sys
+import types
+from collections.abc import AsyncIterator, Iterable
+from concurrent.futures import Future
+from dataclasses import dataclass
+from functools import partial
+from io import IOBase
+from os import PathLike
+from signal import Signals
+from socket import AddressFamily, SocketKind
+from types import TracebackType
+from typing import (
+ IO,
+ Any,
+ AsyncGenerator,
+ Awaitable,
+ Callable,
+ Collection,
+ ContextManager,
+ Coroutine,
+ Generic,
+ Mapping,
+ NoReturn,
+ Sequence,
+ TypeVar,
+ cast,
+ overload,
+)
+
+import trio.from_thread
+import trio.lowlevel
+from outcome import Error, Outcome, Value
+from trio.lowlevel import (
+ current_root_task,
+ current_task,
+ wait_readable,
+ wait_writable,
+)
+from trio.socket import SocketType as TrioSocketType
+from trio.to_thread import run_sync
+
+from .. import CapacityLimiterStatistics, EventStatistics, TaskInfo, abc
+from .._core._eventloop import claim_worker_thread
+from .._core._exceptions import (
+ BrokenResourceError,
+ BusyResourceError,
+ ClosedResourceError,
+ EndOfStream,
+)
+from .._core._sockets import convert_ipv6_sockaddr
+from .._core._streams import create_memory_object_stream
+from .._core._synchronization import CapacityLimiter as BaseCapacityLimiter
+from .._core._synchronization import Event as BaseEvent
+from .._core._synchronization import ResourceGuard
+from .._core._tasks import CancelScope as BaseCancelScope
+from ..abc import IPSockAddrType, UDPPacketType, UNIXDatagramPacketType
+from ..abc._eventloop import AsyncBackend
+from ..streams.memory import MemoryObjectSendStream
+
+if sys.version_info >= (3, 10):
+ from typing import ParamSpec
+else:
+ from typing_extensions import ParamSpec
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from exceptiongroup import BaseExceptionGroup
+ from typing_extensions import TypeVarTuple, Unpack
+
+T = TypeVar("T")
+T_Retval = TypeVar("T_Retval")
+T_SockAddr = TypeVar("T_SockAddr", str, IPSockAddrType)
+PosArgsT = TypeVarTuple("PosArgsT")
+P = ParamSpec("P")
+
+
+#
+# Event loop
+#
+
+RunVar = trio.lowlevel.RunVar
+
+
+#
+# Timeouts and cancellation
+#
+
+
+class CancelScope(BaseCancelScope):
+ def __new__(
+ cls, original: trio.CancelScope | None = None, **kwargs: object
+ ) -> CancelScope:
+ return object.__new__(cls)
+
+ def __init__(self, original: trio.CancelScope | None = None, **kwargs: Any) -> None:
+ self.__original = original or trio.CancelScope(**kwargs)
+
+ def __enter__(self) -> CancelScope:
+ self.__original.__enter__()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ # https://github.com/python-trio/trio-typing/pull/79
+ return self.__original.__exit__(exc_type, exc_val, exc_tb)
+
+ def cancel(self) -> None:
+ self.__original.cancel()
+
+ @property
+ def deadline(self) -> float:
+ return self.__original.deadline
+
+ @deadline.setter
+ def deadline(self, value: float) -> None:
+ self.__original.deadline = value
+
+ @property
+ def cancel_called(self) -> bool:
+ return self.__original.cancel_called
+
+ @property
+ def cancelled_caught(self) -> bool:
+ return self.__original.cancelled_caught
+
+ @property
+ def shield(self) -> bool:
+ return self.__original.shield
+
+ @shield.setter
+ def shield(self, value: bool) -> None:
+ self.__original.shield = value
+
+
+#
+# Task groups
+#
+
+
+class TaskGroup(abc.TaskGroup):
+ def __init__(self) -> None:
+ self._active = False
+ self._nursery_manager = trio.open_nursery(strict_exception_groups=True)
+ self.cancel_scope = None # type: ignore[assignment]
+
+ async def __aenter__(self) -> TaskGroup:
+ self._active = True
+ self._nursery = await self._nursery_manager.__aenter__()
+ self.cancel_scope = CancelScope(self._nursery.cancel_scope)
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ try:
+ return await self._nursery_manager.__aexit__(exc_type, exc_val, exc_tb)
+ except BaseExceptionGroup as exc:
+ _, rest = exc.split(trio.Cancelled)
+ if not rest:
+ cancelled_exc = trio.Cancelled._create()
+ raise cancelled_exc from exc
+
+ raise
+ finally:
+ self._active = False
+
+ def start_soon(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
+ *args: Unpack[PosArgsT],
+ name: object = None,
+ ) -> None:
+ if not self._active:
+ raise RuntimeError(
+ "This task group is not active; no new tasks can be started."
+ )
+
+ self._nursery.start_soon(func, *args, name=name)
+
+ async def start(
+ self, func: Callable[..., Awaitable[Any]], *args: object, name: object = None
+ ) -> Any:
+ if not self._active:
+ raise RuntimeError(
+ "This task group is not active; no new tasks can be started."
+ )
+
+ return await self._nursery.start(func, *args, name=name)
+
+
+#
+# Threads
+#
+
+
+class BlockingPortal(abc.BlockingPortal):
+ def __new__(cls) -> BlockingPortal:
+ return object.__new__(cls)
+
+ def __init__(self) -> None:
+ super().__init__()
+ self._token = trio.lowlevel.current_trio_token()
+
+ def _spawn_task_from_thread(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ name: object,
+ future: Future[T_Retval],
+ ) -> None:
+ trio.from_thread.run_sync(
+ partial(self._task_group.start_soon, name=name),
+ self._call_func,
+ func,
+ args,
+ kwargs,
+ future,
+ trio_token=self._token,
+ )
+
+
+#
+# Subprocesses
+#
+
+
+@dataclass(eq=False)
+class ReceiveStreamWrapper(abc.ByteReceiveStream):
+ _stream: trio.abc.ReceiveStream
+
+ async def receive(self, max_bytes: int | None = None) -> bytes:
+ try:
+ data = await self._stream.receive_some(max_bytes)
+ except trio.ClosedResourceError as exc:
+ raise ClosedResourceError from exc.__cause__
+ except trio.BrokenResourceError as exc:
+ raise BrokenResourceError from exc.__cause__
+
+ if data:
+ return data
+ else:
+ raise EndOfStream
+
+ async def aclose(self) -> None:
+ await self._stream.aclose()
+
+
+@dataclass(eq=False)
+class SendStreamWrapper(abc.ByteSendStream):
+ _stream: trio.abc.SendStream
+
+ async def send(self, item: bytes) -> None:
+ try:
+ await self._stream.send_all(item)
+ except trio.ClosedResourceError as exc:
+ raise ClosedResourceError from exc.__cause__
+ except trio.BrokenResourceError as exc:
+ raise BrokenResourceError from exc.__cause__
+
+ async def aclose(self) -> None:
+ await self._stream.aclose()
+
+
+@dataclass(eq=False)
+class Process(abc.Process):
+ _process: trio.Process
+ _stdin: abc.ByteSendStream | None
+ _stdout: abc.ByteReceiveStream | None
+ _stderr: abc.ByteReceiveStream | None
+
+ async def aclose(self) -> None:
+ with CancelScope(shield=True):
+ if self._stdin:
+ await self._stdin.aclose()
+ if self._stdout:
+ await self._stdout.aclose()
+ if self._stderr:
+ await self._stderr.aclose()
+
+ try:
+ await self.wait()
+ except BaseException:
+ self.kill()
+ with CancelScope(shield=True):
+ await self.wait()
+ raise
+
+ async def wait(self) -> int:
+ return await self._process.wait()
+
+ def terminate(self) -> None:
+ self._process.terminate()
+
+ def kill(self) -> None:
+ self._process.kill()
+
+ def send_signal(self, signal: Signals) -> None:
+ self._process.send_signal(signal)
+
+ @property
+ def pid(self) -> int:
+ return self._process.pid
+
+ @property
+ def returncode(self) -> int | None:
+ return self._process.returncode
+
+ @property
+ def stdin(self) -> abc.ByteSendStream | None:
+ return self._stdin
+
+ @property
+ def stdout(self) -> abc.ByteReceiveStream | None:
+ return self._stdout
+
+ @property
+ def stderr(self) -> abc.ByteReceiveStream | None:
+ return self._stderr
+
+
+class _ProcessPoolShutdownInstrument(trio.abc.Instrument):
+ def after_run(self) -> None:
+ super().after_run()
+
+
+current_default_worker_process_limiter: trio.lowlevel.RunVar = RunVar(
+ "current_default_worker_process_limiter"
+)
+
+
+async def _shutdown_process_pool(workers: set[abc.Process]) -> None:
+ try:
+ await trio.sleep(math.inf)
+ except trio.Cancelled:
+ for process in workers:
+ if process.returncode is None:
+ process.kill()
+
+ with CancelScope(shield=True):
+ for process in workers:
+ await process.aclose()
+
+
+#
+# Sockets and networking
+#
+
+
+class _TrioSocketMixin(Generic[T_SockAddr]):
+ def __init__(self, trio_socket: TrioSocketType) -> None:
+ self._trio_socket = trio_socket
+ self._closed = False
+
+ def _check_closed(self) -> None:
+ if self._closed:
+ raise ClosedResourceError
+ if self._trio_socket.fileno() < 0:
+ raise BrokenResourceError
+
+ @property
+ def _raw_socket(self) -> socket.socket:
+ return self._trio_socket._sock # type: ignore[attr-defined]
+
+ async def aclose(self) -> None:
+ if self._trio_socket.fileno() >= 0:
+ self._closed = True
+ self._trio_socket.close()
+
+ def _convert_socket_error(self, exc: BaseException) -> NoReturn:
+ if isinstance(exc, trio.ClosedResourceError):
+ raise ClosedResourceError from exc
+ elif self._trio_socket.fileno() < 0 and self._closed:
+ raise ClosedResourceError from None
+ elif isinstance(exc, OSError):
+ raise BrokenResourceError from exc
+ else:
+ raise exc
+
+
+class SocketStream(_TrioSocketMixin, abc.SocketStream):
+ def __init__(self, trio_socket: TrioSocketType) -> None:
+ super().__init__(trio_socket)
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ with self._receive_guard:
+ try:
+ data = await self._trio_socket.recv(max_bytes)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ if data:
+ return data
+ else:
+ raise EndOfStream
+
+ async def send(self, item: bytes) -> None:
+ with self._send_guard:
+ view = memoryview(item)
+ while view:
+ try:
+ bytes_sent = await self._trio_socket.send(view)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ view = view[bytes_sent:]
+
+ async def send_eof(self) -> None:
+ self._trio_socket.shutdown(socket.SHUT_WR)
+
+
+class UNIXSocketStream(SocketStream, abc.UNIXSocketStream):
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
+ if not isinstance(msglen, int) or msglen < 0:
+ raise ValueError("msglen must be a non-negative integer")
+ if not isinstance(maxfds, int) or maxfds < 1:
+ raise ValueError("maxfds must be a positive integer")
+
+ fds = array.array("i")
+ await trio.lowlevel.checkpoint()
+ with self._receive_guard:
+ while True:
+ try:
+ message, ancdata, flags, addr = await self._trio_socket.recvmsg(
+ msglen, socket.CMSG_LEN(maxfds * fds.itemsize)
+ )
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+ else:
+ if not message and not ancdata:
+ raise EndOfStream
+
+ break
+
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
+ if cmsg_level != socket.SOL_SOCKET or cmsg_type != socket.SCM_RIGHTS:
+ raise RuntimeError(
+ f"Received unexpected ancillary data; message = {message!r}, "
+ f"cmsg_level = {cmsg_level}, cmsg_type = {cmsg_type}"
+ )
+
+ fds.frombytes(cmsg_data[: len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
+
+ return message, list(fds)
+
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
+ if not message:
+ raise ValueError("message must not be empty")
+ if not fds:
+ raise ValueError("fds must not be empty")
+
+ filenos: list[int] = []
+ for fd in fds:
+ if isinstance(fd, int):
+ filenos.append(fd)
+ elif isinstance(fd, IOBase):
+ filenos.append(fd.fileno())
+
+ fdarray = array.array("i", filenos)
+ await trio.lowlevel.checkpoint()
+ with self._send_guard:
+ while True:
+ try:
+ await self._trio_socket.sendmsg(
+ [message],
+ [
+ (
+ socket.SOL_SOCKET,
+ socket.SCM_RIGHTS,
+ fdarray,
+ )
+ ],
+ )
+ break
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+
+class TCPSocketListener(_TrioSocketMixin, abc.SocketListener):
+ def __init__(self, raw_socket: socket.socket):
+ super().__init__(trio.socket.from_stdlib_socket(raw_socket))
+ self._accept_guard = ResourceGuard("accepting connections from")
+
+ async def accept(self) -> SocketStream:
+ with self._accept_guard:
+ try:
+ trio_socket, _addr = await self._trio_socket.accept()
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ return SocketStream(trio_socket)
+
+
+class UNIXSocketListener(_TrioSocketMixin, abc.SocketListener):
+ def __init__(self, raw_socket: socket.socket):
+ super().__init__(trio.socket.from_stdlib_socket(raw_socket))
+ self._accept_guard = ResourceGuard("accepting connections from")
+
+ async def accept(self) -> UNIXSocketStream:
+ with self._accept_guard:
+ try:
+ trio_socket, _addr = await self._trio_socket.accept()
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ return UNIXSocketStream(trio_socket)
+
+
+class UDPSocket(_TrioSocketMixin[IPSockAddrType], abc.UDPSocket):
+ def __init__(self, trio_socket: TrioSocketType) -> None:
+ super().__init__(trio_socket)
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+
+ async def receive(self) -> tuple[bytes, IPSockAddrType]:
+ with self._receive_guard:
+ try:
+ data, addr = await self._trio_socket.recvfrom(65536)
+ return data, convert_ipv6_sockaddr(addr)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ async def send(self, item: UDPPacketType) -> None:
+ with self._send_guard:
+ try:
+ await self._trio_socket.sendto(*item)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+
+class ConnectedUDPSocket(_TrioSocketMixin[IPSockAddrType], abc.ConnectedUDPSocket):
+ def __init__(self, trio_socket: TrioSocketType) -> None:
+ super().__init__(trio_socket)
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+
+ async def receive(self) -> bytes:
+ with self._receive_guard:
+ try:
+ return await self._trio_socket.recv(65536)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ async def send(self, item: bytes) -> None:
+ with self._send_guard:
+ try:
+ await self._trio_socket.send(item)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+
+class UNIXDatagramSocket(_TrioSocketMixin[str], abc.UNIXDatagramSocket):
+ def __init__(self, trio_socket: TrioSocketType) -> None:
+ super().__init__(trio_socket)
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+
+ async def receive(self) -> UNIXDatagramPacketType:
+ with self._receive_guard:
+ try:
+ data, addr = await self._trio_socket.recvfrom(65536)
+ return data, addr
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ async def send(self, item: UNIXDatagramPacketType) -> None:
+ with self._send_guard:
+ try:
+ await self._trio_socket.sendto(*item)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+
+class ConnectedUNIXDatagramSocket(
+ _TrioSocketMixin[str], abc.ConnectedUNIXDatagramSocket
+):
+ def __init__(self, trio_socket: TrioSocketType) -> None:
+ super().__init__(trio_socket)
+ self._receive_guard = ResourceGuard("reading from")
+ self._send_guard = ResourceGuard("writing to")
+
+ async def receive(self) -> bytes:
+ with self._receive_guard:
+ try:
+ return await self._trio_socket.recv(65536)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+ async def send(self, item: bytes) -> None:
+ with self._send_guard:
+ try:
+ await self._trio_socket.send(item)
+ except BaseException as exc:
+ self._convert_socket_error(exc)
+
+
+#
+# Synchronization
+#
+
+
+class Event(BaseEvent):
+ def __new__(cls) -> Event:
+ return object.__new__(cls)
+
+ def __init__(self) -> None:
+ self.__original = trio.Event()
+
+ def is_set(self) -> bool:
+ return self.__original.is_set()
+
+ async def wait(self) -> None:
+ return await self.__original.wait()
+
+ def statistics(self) -> EventStatistics:
+ orig_statistics = self.__original.statistics()
+ return EventStatistics(tasks_waiting=orig_statistics.tasks_waiting)
+
+ def set(self) -> None:
+ self.__original.set()
+
+
+class CapacityLimiter(BaseCapacityLimiter):
+ def __new__(
+ cls,
+ total_tokens: float | None = None,
+ *,
+ original: trio.CapacityLimiter | None = None,
+ ) -> CapacityLimiter:
+ return object.__new__(cls)
+
+ def __init__(
+ self,
+ total_tokens: float | None = None,
+ *,
+ original: trio.CapacityLimiter | None = None,
+ ) -> None:
+ if original is not None:
+ self.__original = original
+ else:
+ assert total_tokens is not None
+ self.__original = trio.CapacityLimiter(total_tokens)
+
+ async def __aenter__(self) -> None:
+ return await self.__original.__aenter__()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ await self.__original.__aexit__(exc_type, exc_val, exc_tb)
+
+ @property
+ def total_tokens(self) -> float:
+ return self.__original.total_tokens
+
+ @total_tokens.setter
+ def total_tokens(self, value: float) -> None:
+ self.__original.total_tokens = value
+
+ @property
+ def borrowed_tokens(self) -> int:
+ return self.__original.borrowed_tokens
+
+ @property
+ def available_tokens(self) -> float:
+ return self.__original.available_tokens
+
+ def acquire_nowait(self) -> None:
+ self.__original.acquire_nowait()
+
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
+ self.__original.acquire_on_behalf_of_nowait(borrower)
+
+ async def acquire(self) -> None:
+ await self.__original.acquire()
+
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
+ await self.__original.acquire_on_behalf_of(borrower)
+
+ def release(self) -> None:
+ return self.__original.release()
+
+ def release_on_behalf_of(self, borrower: object) -> None:
+ return self.__original.release_on_behalf_of(borrower)
+
+ def statistics(self) -> CapacityLimiterStatistics:
+ orig = self.__original.statistics()
+ return CapacityLimiterStatistics(
+ borrowed_tokens=orig.borrowed_tokens,
+ total_tokens=orig.total_tokens,
+ borrowers=tuple(orig.borrowers),
+ tasks_waiting=orig.tasks_waiting,
+ )
+
+
+_capacity_limiter_wrapper: trio.lowlevel.RunVar = RunVar("_capacity_limiter_wrapper")
+
+
+#
+# Signal handling
+#
+
+
+class _SignalReceiver:
+ _iterator: AsyncIterator[int]
+
+ def __init__(self, signals: tuple[Signals, ...]):
+ self._signals = signals
+
+ def __enter__(self) -> _SignalReceiver:
+ self._cm = trio.open_signal_receiver(*self._signals)
+ self._iterator = self._cm.__enter__()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ return self._cm.__exit__(exc_type, exc_val, exc_tb)
+
+ def __aiter__(self) -> _SignalReceiver:
+ return self
+
+ async def __anext__(self) -> Signals:
+ signum = await self._iterator.__anext__()
+ return Signals(signum)
+
+
+#
+# Testing and debugging
+#
+
+
+class TestRunner(abc.TestRunner):
+ def __init__(self, **options: Any) -> None:
+ from queue import Queue
+
+ self._call_queue: Queue[Callable[[], object]] = Queue()
+ self._send_stream: MemoryObjectSendStream | None = None
+ self._options = options
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: types.TracebackType | None,
+ ) -> None:
+ if self._send_stream:
+ self._send_stream.close()
+ while self._send_stream is not None:
+ self._call_queue.get()()
+
+ async def _run_tests_and_fixtures(self) -> None:
+ self._send_stream, receive_stream = create_memory_object_stream(1)
+ with receive_stream:
+ async for coro, outcome_holder in receive_stream:
+ try:
+ retval = await coro
+ except BaseException as exc:
+ outcome_holder.append(Error(exc))
+ else:
+ outcome_holder.append(Value(retval))
+
+ def _main_task_finished(self, outcome: object) -> None:
+ self._send_stream = None
+
+ def _call_in_runner_task(
+ self,
+ func: Callable[P, Awaitable[T_Retval]],
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T_Retval:
+ if self._send_stream is None:
+ trio.lowlevel.start_guest_run(
+ self._run_tests_and_fixtures,
+ run_sync_soon_threadsafe=self._call_queue.put,
+ done_callback=self._main_task_finished,
+ **self._options,
+ )
+ while self._send_stream is None:
+ self._call_queue.get()()
+
+ outcome_holder: list[Outcome] = []
+ self._send_stream.send_nowait((func(*args, **kwargs), outcome_holder))
+ while not outcome_holder:
+ self._call_queue.get()()
+
+ return outcome_holder[0].unwrap()
+
+ def run_asyncgen_fixture(
+ self,
+ fixture_func: Callable[..., AsyncGenerator[T_Retval, Any]],
+ kwargs: dict[str, Any],
+ ) -> Iterable[T_Retval]:
+ asyncgen = fixture_func(**kwargs)
+ fixturevalue: T_Retval = self._call_in_runner_task(asyncgen.asend, None)
+
+ yield fixturevalue
+
+ try:
+ self._call_in_runner_task(asyncgen.asend, None)
+ except StopAsyncIteration:
+ pass
+ else:
+ self._call_in_runner_task(asyncgen.aclose)
+ raise RuntimeError("Async generator fixture did not stop")
+
+ def run_fixture(
+ self,
+ fixture_func: Callable[..., Coroutine[Any, Any, T_Retval]],
+ kwargs: dict[str, Any],
+ ) -> T_Retval:
+ return self._call_in_runner_task(fixture_func, **kwargs)
+
+ def run_test(
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
+ ) -> None:
+ self._call_in_runner_task(test_func, **kwargs)
+
+
+class TrioBackend(AsyncBackend):
+ @classmethod
+ def run(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ options: dict[str, Any],
+ ) -> T_Retval:
+ return trio.run(func, *args)
+
+ @classmethod
+ def current_token(cls) -> object:
+ return trio.lowlevel.current_trio_token()
+
+ @classmethod
+ def current_time(cls) -> float:
+ return trio.current_time()
+
+ @classmethod
+ def cancelled_exception_class(cls) -> type[BaseException]:
+ return trio.Cancelled
+
+ @classmethod
+ async def checkpoint(cls) -> None:
+ await trio.lowlevel.checkpoint()
+
+ @classmethod
+ async def checkpoint_if_cancelled(cls) -> None:
+ await trio.lowlevel.checkpoint_if_cancelled()
+
+ @classmethod
+ async def cancel_shielded_checkpoint(cls) -> None:
+ await trio.lowlevel.cancel_shielded_checkpoint()
+
+ @classmethod
+ async def sleep(cls, delay: float) -> None:
+ await trio.sleep(delay)
+
+ @classmethod
+ def create_cancel_scope(
+ cls, *, deadline: float = math.inf, shield: bool = False
+ ) -> abc.CancelScope:
+ return CancelScope(deadline=deadline, shield=shield)
+
+ @classmethod
+ def current_effective_deadline(cls) -> float:
+ return trio.current_effective_deadline()
+
+ @classmethod
+ def create_task_group(cls) -> abc.TaskGroup:
+ return TaskGroup()
+
+ @classmethod
+ def create_event(cls) -> abc.Event:
+ return Event()
+
+ @classmethod
+ def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
+ return CapacityLimiter(total_tokens)
+
+ @classmethod
+ async def run_sync_in_worker_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ abandon_on_cancel: bool = False,
+ limiter: abc.CapacityLimiter | None = None,
+ ) -> T_Retval:
+ def wrapper() -> T_Retval:
+ with claim_worker_thread(TrioBackend, token):
+ return func(*args)
+
+ token = TrioBackend.current_token()
+ return await run_sync(
+ wrapper,
+ abandon_on_cancel=abandon_on_cancel,
+ limiter=cast(trio.CapacityLimiter, limiter),
+ )
+
+ @classmethod
+ def check_cancelled(cls) -> None:
+ trio.from_thread.check_cancelled()
+
+ @classmethod
+ def run_async_from_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ args: tuple[Unpack[PosArgsT]],
+ token: object,
+ ) -> T_Retval:
+ return trio.from_thread.run(func, *args)
+
+ @classmethod
+ def run_sync_from_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ token: object,
+ ) -> T_Retval:
+ return trio.from_thread.run_sync(func, *args)
+
+ @classmethod
+ def create_blocking_portal(cls) -> abc.BlockingPortal:
+ return BlockingPortal()
+
+ @classmethod
+ async def open_process(
+ cls,
+ command: str | bytes | Sequence[str | bytes],
+ *,
+ shell: bool,
+ stdin: int | IO[Any] | None,
+ stdout: int | IO[Any] | None,
+ stderr: int | IO[Any] | None,
+ cwd: str | bytes | PathLike | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+ ) -> Process:
+ process = await trio.lowlevel.open_process( # type: ignore[misc]
+ command, # type: ignore[arg-type]
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ shell=shell,
+ cwd=cwd,
+ env=env,
+ start_new_session=start_new_session,
+ )
+ stdin_stream = SendStreamWrapper(process.stdin) if process.stdin else None
+ stdout_stream = ReceiveStreamWrapper(process.stdout) if process.stdout else None
+ stderr_stream = ReceiveStreamWrapper(process.stderr) if process.stderr else None
+ return Process(process, stdin_stream, stdout_stream, stderr_stream)
+
+ @classmethod
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[abc.Process]) -> None:
+ trio.lowlevel.spawn_system_task(_shutdown_process_pool, workers)
+
+ @classmethod
+ async def connect_tcp(
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
+ ) -> SocketStream:
+ family = socket.AF_INET6 if ":" in host else socket.AF_INET
+ trio_socket = trio.socket.socket(family)
+ trio_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ if local_address:
+ await trio_socket.bind(local_address)
+
+ try:
+ await trio_socket.connect((host, port))
+ except BaseException:
+ trio_socket.close()
+ raise
+
+ return SocketStream(trio_socket)
+
+ @classmethod
+ async def connect_unix(cls, path: str | bytes) -> abc.UNIXSocketStream:
+ trio_socket = trio.socket.socket(socket.AF_UNIX)
+ try:
+ await trio_socket.connect(path)
+ except BaseException:
+ trio_socket.close()
+ raise
+
+ return UNIXSocketStream(trio_socket)
+
+ @classmethod
+ def create_tcp_listener(cls, sock: socket.socket) -> abc.SocketListener:
+ return TCPSocketListener(sock)
+
+ @classmethod
+ def create_unix_listener(cls, sock: socket.socket) -> abc.SocketListener:
+ return UNIXSocketListener(sock)
+
+ @classmethod
+ async def create_udp_socket(
+ cls,
+ family: socket.AddressFamily,
+ local_address: IPSockAddrType | None,
+ remote_address: IPSockAddrType | None,
+ reuse_port: bool,
+ ) -> UDPSocket | ConnectedUDPSocket:
+ trio_socket = trio.socket.socket(family=family, type=socket.SOCK_DGRAM)
+
+ if reuse_port:
+ trio_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+
+ if local_address:
+ await trio_socket.bind(local_address)
+
+ if remote_address:
+ await trio_socket.connect(remote_address)
+ return ConnectedUDPSocket(trio_socket)
+ else:
+ return UDPSocket(trio_socket)
+
+ @classmethod
+ @overload
+ async def create_unix_datagram_socket(
+ cls, raw_socket: socket.socket, remote_path: None
+ ) -> abc.UNIXDatagramSocket:
+ ...
+
+ @classmethod
+ @overload
+ async def create_unix_datagram_socket(
+ cls, raw_socket: socket.socket, remote_path: str | bytes
+ ) -> abc.ConnectedUNIXDatagramSocket:
+ ...
+
+ @classmethod
+ async def create_unix_datagram_socket(
+ cls, raw_socket: socket.socket, remote_path: str | bytes | None
+ ) -> abc.UNIXDatagramSocket | abc.ConnectedUNIXDatagramSocket:
+ trio_socket = trio.socket.from_stdlib_socket(raw_socket)
+
+ if remote_path:
+ await trio_socket.connect(remote_path)
+ return ConnectedUNIXDatagramSocket(trio_socket)
+ else:
+ return UNIXDatagramSocket(trio_socket)
+
+ @classmethod
+ async def getaddrinfo(
+ cls,
+ host: bytes | str | None,
+ port: str | int | None,
+ *,
+ family: int | AddressFamily = 0,
+ type: int | SocketKind = 0,
+ proto: int = 0,
+ flags: int = 0,
+ ) -> list[
+ tuple[
+ AddressFamily,
+ SocketKind,
+ int,
+ str,
+ tuple[str, int] | tuple[str, int, int, int],
+ ]
+ ]:
+ return await trio.socket.getaddrinfo(host, port, family, type, proto, flags)
+
+ @classmethod
+ async def getnameinfo(
+ cls, sockaddr: IPSockAddrType, flags: int = 0
+ ) -> tuple[str, str]:
+ return await trio.socket.getnameinfo(sockaddr, flags)
+
+ @classmethod
+ async def wait_socket_readable(cls, sock: socket.socket) -> None:
+ try:
+ await wait_readable(sock)
+ except trio.ClosedResourceError as exc:
+ raise ClosedResourceError().with_traceback(exc.__traceback__) from None
+ except trio.BusyResourceError:
+ raise BusyResourceError("reading from") from None
+
+ @classmethod
+ async def wait_socket_writable(cls, sock: socket.socket) -> None:
+ try:
+ await wait_writable(sock)
+ except trio.ClosedResourceError as exc:
+ raise ClosedResourceError().with_traceback(exc.__traceback__) from None
+ except trio.BusyResourceError:
+ raise BusyResourceError("writing to") from None
+
+ @classmethod
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
+ try:
+ return _capacity_limiter_wrapper.get()
+ except LookupError:
+ limiter = CapacityLimiter(
+ original=trio.to_thread.current_default_thread_limiter()
+ )
+ _capacity_limiter_wrapper.set(limiter)
+ return limiter
+
+ @classmethod
+ def open_signal_receiver(
+ cls, *signals: Signals
+ ) -> ContextManager[AsyncIterator[Signals]]:
+ return _SignalReceiver(signals)
+
+ @classmethod
+ def get_current_task(cls) -> TaskInfo:
+ task = current_task()
+
+ parent_id = None
+ if task.parent_nursery and task.parent_nursery.parent_task:
+ parent_id = id(task.parent_nursery.parent_task)
+
+ return TaskInfo(id(task), parent_id, task.name, task.coro)
+
+ @classmethod
+ def get_running_tasks(cls) -> list[TaskInfo]:
+ root_task = current_root_task()
+ assert root_task
+ task_infos = [TaskInfo(id(root_task), None, root_task.name, root_task.coro)]
+ nurseries = root_task.child_nurseries
+ while nurseries:
+ new_nurseries: list[trio.Nursery] = []
+ for nursery in nurseries:
+ for task in nursery.child_tasks:
+ task_infos.append(
+ TaskInfo(
+ id(task), id(nursery.parent_task), task.name, task.coro
+ )
+ )
+ new_nurseries.extend(task.child_nurseries)
+
+ nurseries = new_nurseries
+
+ return task_infos
+
+ @classmethod
+ async def wait_all_tasks_blocked(cls) -> None:
+ from trio.testing import wait_all_tasks_blocked
+
+ await wait_all_tasks_blocked()
+
+ @classmethod
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
+ return TestRunner(**options)
+
+
+backend_class = TrioBackend
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__init__.py b/venv/lib/python3.11/site-packages/anyio/_core/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__init__.py
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000..174d85c
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/__init__.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_eventloop.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_eventloop.cpython-311.pyc
new file mode 100644
index 0000000..3793487
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_eventloop.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_exceptions.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_exceptions.cpython-311.pyc
new file mode 100644
index 0000000..4ed4ed8
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_exceptions.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_fileio.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_fileio.cpython-311.pyc
new file mode 100644
index 0000000..b403ec5
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_fileio.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_resources.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_resources.cpython-311.pyc
new file mode 100644
index 0000000..f5170f4
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_resources.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_signals.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_signals.cpython-311.pyc
new file mode 100644
index 0000000..9c51c0b
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_signals.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_sockets.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_sockets.cpython-311.pyc
new file mode 100644
index 0000000..4de85c1
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_sockets.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_streams.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_streams.cpython-311.pyc
new file mode 100644
index 0000000..53334c7
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_streams.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-311.pyc
new file mode 100644
index 0000000..6fa1ea5
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_subprocesses.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_synchronization.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_synchronization.cpython-311.pyc
new file mode 100644
index 0000000..345fe04
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_synchronization.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_tasks.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_tasks.cpython-311.pyc
new file mode 100644
index 0000000..0a60553
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_tasks.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_testing.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_testing.cpython-311.pyc
new file mode 100644
index 0000000..f53a591
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_testing.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_typedattr.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_typedattr.cpython-311.pyc
new file mode 100644
index 0000000..7ad8529
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/__pycache__/_typedattr.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_eventloop.py b/venv/lib/python3.11/site-packages/anyio/_core/_eventloop.py
new file mode 100644
index 0000000..a9c6e82
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_eventloop.py
@@ -0,0 +1,163 @@
+from __future__ import annotations
+
+import math
+import sys
+import threading
+from collections.abc import Awaitable, Callable, Generator
+from contextlib import contextmanager
+from importlib import import_module
+from typing import TYPE_CHECKING, Any, TypeVar
+
+import sniffio
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+if TYPE_CHECKING:
+ from ..abc import AsyncBackend
+
+# This must be updated when new backends are introduced
+BACKENDS = "asyncio", "trio"
+
+T_Retval = TypeVar("T_Retval")
+PosArgsT = TypeVarTuple("PosArgsT")
+
+threadlocals = threading.local()
+
+
+def run(
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ *args: Unpack[PosArgsT],
+ backend: str = "asyncio",
+ backend_options: dict[str, Any] | None = None,
+) -> T_Retval:
+ """
+ Run the given coroutine function in an asynchronous event loop.
+
+ The current thread must not be already running an event loop.
+
+ :param func: a coroutine function
+ :param args: positional arguments to ``func``
+ :param backend: name of the asynchronous event loop implementation – currently
+ either ``asyncio`` or ``trio``
+ :param backend_options: keyword arguments to call the backend ``run()``
+ implementation with (documented :ref:`here <backend options>`)
+ :return: the return value of the coroutine function
+ :raises RuntimeError: if an asynchronous event loop is already running in this
+ thread
+ :raises LookupError: if the named backend is not found
+
+ """
+ try:
+ asynclib_name = sniffio.current_async_library()
+ except sniffio.AsyncLibraryNotFoundError:
+ pass
+ else:
+ raise RuntimeError(f"Already running {asynclib_name} in this thread")
+
+ try:
+ async_backend = get_async_backend(backend)
+ except ImportError as exc:
+ raise LookupError(f"No such backend: {backend}") from exc
+
+ token = None
+ if sniffio.current_async_library_cvar.get(None) is None:
+ # Since we're in control of the event loop, we can cache the name of the async
+ # library
+ token = sniffio.current_async_library_cvar.set(backend)
+
+ try:
+ backend_options = backend_options or {}
+ return async_backend.run(func, args, {}, backend_options)
+ finally:
+ if token:
+ sniffio.current_async_library_cvar.reset(token)
+
+
+async def sleep(delay: float) -> None:
+ """
+ Pause the current task for the specified duration.
+
+ :param delay: the duration, in seconds
+
+ """
+ return await get_async_backend().sleep(delay)
+
+
+async def sleep_forever() -> None:
+ """
+ Pause the current task until it's cancelled.
+
+ This is a shortcut for ``sleep(math.inf)``.
+
+ .. versionadded:: 3.1
+
+ """
+ await sleep(math.inf)
+
+
+async def sleep_until(deadline: float) -> None:
+ """
+ Pause the current task until the given time.
+
+ :param deadline: the absolute time to wake up at (according to the internal
+ monotonic clock of the event loop)
+
+ .. versionadded:: 3.1
+
+ """
+ now = current_time()
+ await sleep(max(deadline - now, 0))
+
+
+def current_time() -> float:
+ """
+ Return the current value of the event loop's internal clock.
+
+ :return: the clock value (seconds)
+
+ """
+ return get_async_backend().current_time()
+
+
+def get_all_backends() -> tuple[str, ...]:
+ """Return a tuple of the names of all built-in backends."""
+ return BACKENDS
+
+
+def get_cancelled_exc_class() -> type[BaseException]:
+ """Return the current async library's cancellation exception class."""
+ return get_async_backend().cancelled_exception_class()
+
+
+#
+# Private API
+#
+
+
+@contextmanager
+def claim_worker_thread(
+ backend_class: type[AsyncBackend], token: object
+) -> Generator[Any, None, None]:
+ threadlocals.current_async_backend = backend_class
+ threadlocals.current_token = token
+ try:
+ yield
+ finally:
+ del threadlocals.current_async_backend
+ del threadlocals.current_token
+
+
+def get_async_backend(asynclib_name: str | None = None) -> AsyncBackend:
+ if asynclib_name is None:
+ asynclib_name = sniffio.current_async_library()
+
+ modulename = "anyio._backends._" + asynclib_name
+ try:
+ module = sys.modules[modulename]
+ except KeyError:
+ module = import_module(modulename)
+
+ return getattr(module, "backend_class")
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py b/venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py
new file mode 100644
index 0000000..571c3b8
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_exceptions.py
@@ -0,0 +1,73 @@
+from __future__ import annotations
+
+
+class BrokenResourceError(Exception):
+ """
+ Raised when trying to use a resource that has been rendered unusable due to external
+ causes (e.g. a send stream whose peer has disconnected).
+ """
+
+
+class BrokenWorkerProcess(Exception):
+ """
+ Raised by :func:`run_sync_in_process` if the worker process terminates abruptly or
+ otherwise misbehaves.
+ """
+
+
+class BusyResourceError(Exception):
+ """
+ Raised when two tasks are trying to read from or write to the same resource
+ concurrently.
+ """
+
+ def __init__(self, action: str):
+ super().__init__(f"Another task is already {action} this resource")
+
+
+class ClosedResourceError(Exception):
+ """Raised when trying to use a resource that has been closed."""
+
+
+class DelimiterNotFound(Exception):
+ """
+ Raised during
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
+ maximum number of bytes has been read without the delimiter being found.
+ """
+
+ def __init__(self, max_bytes: int) -> None:
+ super().__init__(
+ f"The delimiter was not found among the first {max_bytes} bytes"
+ )
+
+
+class EndOfStream(Exception):
+ """
+ Raised when trying to read from a stream that has been closed from the other end.
+ """
+
+
+class IncompleteRead(Exception):
+ """
+ Raised during
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
+ :meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
+ connection is closed before the requested amount of bytes has been read.
+ """
+
+ def __init__(self) -> None:
+ super().__init__(
+ "The stream was closed before the read operation could be completed"
+ )
+
+
+class TypedAttributeLookupError(LookupError):
+ """
+ Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
+ is not found and no default value has been given.
+ """
+
+
+class WouldBlock(Exception):
+ """Raised by ``X_nowait`` functions if ``X()`` would block."""
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_fileio.py b/venv/lib/python3.11/site-packages/anyio/_core/_fileio.py
new file mode 100644
index 0000000..d054be6
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_fileio.py
@@ -0,0 +1,645 @@
+from __future__ import annotations
+
+import os
+import pathlib
+import sys
+from collections.abc import Callable, Iterable, Iterator, Sequence
+from dataclasses import dataclass
+from functools import partial
+from os import PathLike
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ AnyStr,
+ AsyncIterator,
+ Final,
+ Generic,
+ overload,
+)
+
+from .. import to_thread
+from ..abc import AsyncResource
+
+if TYPE_CHECKING:
+ from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
+else:
+ ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
+
+
+class AsyncFile(AsyncResource, Generic[AnyStr]):
+ """
+ An asynchronous file object.
+
+ This class wraps a standard file object and provides async friendly versions of the
+ following blocking methods (where available on the original file object):
+
+ * read
+ * read1
+ * readline
+ * readlines
+ * readinto
+ * readinto1
+ * write
+ * writelines
+ * truncate
+ * seek
+ * tell
+ * flush
+
+ All other methods are directly passed through.
+
+ This class supports the asynchronous context manager protocol which closes the
+ underlying file at the end of the context block.
+
+ This class also supports asynchronous iteration::
+
+ async with await open_file(...) as f:
+ async for line in f:
+ print(line)
+ """
+
+ def __init__(self, fp: IO[AnyStr]) -> None:
+ self._fp: Any = fp
+
+ def __getattr__(self, name: str) -> object:
+ return getattr(self._fp, name)
+
+ @property
+ def wrapped(self) -> IO[AnyStr]:
+ """The wrapped file object."""
+ return self._fp
+
+ async def __aiter__(self) -> AsyncIterator[AnyStr]:
+ while True:
+ line = await self.readline()
+ if line:
+ yield line
+ else:
+ break
+
+ async def aclose(self) -> None:
+ return await to_thread.run_sync(self._fp.close)
+
+ async def read(self, size: int = -1) -> AnyStr:
+ return await to_thread.run_sync(self._fp.read, size)
+
+ async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
+ return await to_thread.run_sync(self._fp.read1, size)
+
+ async def readline(self) -> AnyStr:
+ return await to_thread.run_sync(self._fp.readline)
+
+ async def readlines(self) -> list[AnyStr]:
+ return await to_thread.run_sync(self._fp.readlines)
+
+ async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
+ return await to_thread.run_sync(self._fp.readinto, b)
+
+ async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> bytes:
+ return await to_thread.run_sync(self._fp.readinto1, b)
+
+ @overload
+ async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int:
+ ...
+
+ @overload
+ async def write(self: AsyncFile[str], b: str) -> int:
+ ...
+
+ async def write(self, b: ReadableBuffer | str) -> int:
+ return await to_thread.run_sync(self._fp.write, b)
+
+ @overload
+ async def writelines(
+ self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
+ ) -> None:
+ ...
+
+ @overload
+ async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None:
+ ...
+
+ async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
+ return await to_thread.run_sync(self._fp.writelines, lines)
+
+ async def truncate(self, size: int | None = None) -> int:
+ return await to_thread.run_sync(self._fp.truncate, size)
+
+ async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
+ return await to_thread.run_sync(self._fp.seek, offset, whence)
+
+ async def tell(self) -> int:
+ return await to_thread.run_sync(self._fp.tell)
+
+ async def flush(self) -> None:
+ return await to_thread.run_sync(self._fp.flush)
+
+
+@overload
+async def open_file(
+ file: str | PathLike[str] | int,
+ mode: OpenBinaryMode,
+ buffering: int = ...,
+ encoding: str | None = ...,
+ errors: str | None = ...,
+ newline: str | None = ...,
+ closefd: bool = ...,
+ opener: Callable[[str, int], int] | None = ...,
+) -> AsyncFile[bytes]:
+ ...
+
+
+@overload
+async def open_file(
+ file: str | PathLike[str] | int,
+ mode: OpenTextMode = ...,
+ buffering: int = ...,
+ encoding: str | None = ...,
+ errors: str | None = ...,
+ newline: str | None = ...,
+ closefd: bool = ...,
+ opener: Callable[[str, int], int] | None = ...,
+) -> AsyncFile[str]:
+ ...
+
+
+async def open_file(
+ file: str | PathLike[str] | int,
+ mode: str = "r",
+ buffering: int = -1,
+ encoding: str | None = None,
+ errors: str | None = None,
+ newline: str | None = None,
+ closefd: bool = True,
+ opener: Callable[[str, int], int] | None = None,
+) -> AsyncFile[Any]:
+ """
+ Open a file asynchronously.
+
+ The arguments are exactly the same as for the builtin :func:`open`.
+
+ :return: an asynchronous file object
+
+ """
+ fp = await to_thread.run_sync(
+ open, file, mode, buffering, encoding, errors, newline, closefd, opener
+ )
+ return AsyncFile(fp)
+
+
+def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
+ """
+ Wrap an existing file as an asynchronous file.
+
+ :param file: an existing file-like object
+ :return: an asynchronous file object
+
+ """
+ return AsyncFile(file)
+
+
+@dataclass(eq=False)
+class _PathIterator(AsyncIterator["Path"]):
+ iterator: Iterator[PathLike[str]]
+
+ async def __anext__(self) -> Path:
+ nextval = await to_thread.run_sync(
+ next, self.iterator, None, abandon_on_cancel=True
+ )
+ if nextval is None:
+ raise StopAsyncIteration from None
+
+ return Path(nextval)
+
+
+class Path:
+ """
+ An asynchronous version of :class:`pathlib.Path`.
+
+ This class cannot be substituted for :class:`pathlib.Path` or
+ :class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
+ interface.
+
+ It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
+ the deprecated :meth:`~pathlib.Path.link_to` method.
+
+ Any methods that do disk I/O need to be awaited on. These methods are:
+
+ * :meth:`~pathlib.Path.absolute`
+ * :meth:`~pathlib.Path.chmod`
+ * :meth:`~pathlib.Path.cwd`
+ * :meth:`~pathlib.Path.exists`
+ * :meth:`~pathlib.Path.expanduser`
+ * :meth:`~pathlib.Path.group`
+ * :meth:`~pathlib.Path.hardlink_to`
+ * :meth:`~pathlib.Path.home`
+ * :meth:`~pathlib.Path.is_block_device`
+ * :meth:`~pathlib.Path.is_char_device`
+ * :meth:`~pathlib.Path.is_dir`
+ * :meth:`~pathlib.Path.is_fifo`
+ * :meth:`~pathlib.Path.is_file`
+ * :meth:`~pathlib.Path.is_mount`
+ * :meth:`~pathlib.Path.lchmod`
+ * :meth:`~pathlib.Path.lstat`
+ * :meth:`~pathlib.Path.mkdir`
+ * :meth:`~pathlib.Path.open`
+ * :meth:`~pathlib.Path.owner`
+ * :meth:`~pathlib.Path.read_bytes`
+ * :meth:`~pathlib.Path.read_text`
+ * :meth:`~pathlib.Path.readlink`
+ * :meth:`~pathlib.Path.rename`
+ * :meth:`~pathlib.Path.replace`
+ * :meth:`~pathlib.Path.rmdir`
+ * :meth:`~pathlib.Path.samefile`
+ * :meth:`~pathlib.Path.stat`
+ * :meth:`~pathlib.Path.touch`
+ * :meth:`~pathlib.Path.unlink`
+ * :meth:`~pathlib.Path.write_bytes`
+ * :meth:`~pathlib.Path.write_text`
+
+ Additionally, the following methods return an async iterator yielding
+ :class:`~.Path` objects:
+
+ * :meth:`~pathlib.Path.glob`
+ * :meth:`~pathlib.Path.iterdir`
+ * :meth:`~pathlib.Path.rglob`
+ """
+
+ __slots__ = "_path", "__weakref__"
+
+ __weakref__: Any
+
+ def __init__(self, *args: str | PathLike[str]) -> None:
+ self._path: Final[pathlib.Path] = pathlib.Path(*args)
+
+ def __fspath__(self) -> str:
+ return self._path.__fspath__()
+
+ def __str__(self) -> str:
+ return self._path.__str__()
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({self.as_posix()!r})"
+
+ def __bytes__(self) -> bytes:
+ return self._path.__bytes__()
+
+ def __hash__(self) -> int:
+ return self._path.__hash__()
+
+ def __eq__(self, other: object) -> bool:
+ target = other._path if isinstance(other, Path) else other
+ return self._path.__eq__(target)
+
+ def __lt__(self, other: pathlib.PurePath | Path) -> bool:
+ target = other._path if isinstance(other, Path) else other
+ return self._path.__lt__(target)
+
+ def __le__(self, other: pathlib.PurePath | Path) -> bool:
+ target = other._path if isinstance(other, Path) else other
+ return self._path.__le__(target)
+
+ def __gt__(self, other: pathlib.PurePath | Path) -> bool:
+ target = other._path if isinstance(other, Path) else other
+ return self._path.__gt__(target)
+
+ def __ge__(self, other: pathlib.PurePath | Path) -> bool:
+ target = other._path if isinstance(other, Path) else other
+ return self._path.__ge__(target)
+
+ def __truediv__(self, other: str | PathLike[str]) -> Path:
+ return Path(self._path / other)
+
+ def __rtruediv__(self, other: str | PathLike[str]) -> Path:
+ return Path(other) / self
+
+ @property
+ def parts(self) -> tuple[str, ...]:
+ return self._path.parts
+
+ @property
+ def drive(self) -> str:
+ return self._path.drive
+
+ @property
+ def root(self) -> str:
+ return self._path.root
+
+ @property
+ def anchor(self) -> str:
+ return self._path.anchor
+
+ @property
+ def parents(self) -> Sequence[Path]:
+ return tuple(Path(p) for p in self._path.parents)
+
+ @property
+ def parent(self) -> Path:
+ return Path(self._path.parent)
+
+ @property
+ def name(self) -> str:
+ return self._path.name
+
+ @property
+ def suffix(self) -> str:
+ return self._path.suffix
+
+ @property
+ def suffixes(self) -> list[str]:
+ return self._path.suffixes
+
+ @property
+ def stem(self) -> str:
+ return self._path.stem
+
+ async def absolute(self) -> Path:
+ path = await to_thread.run_sync(self._path.absolute)
+ return Path(path)
+
+ def as_posix(self) -> str:
+ return self._path.as_posix()
+
+ def as_uri(self) -> str:
+ return self._path.as_uri()
+
+ def match(self, path_pattern: str) -> bool:
+ return self._path.match(path_pattern)
+
+ def is_relative_to(self, other: str | PathLike[str]) -> bool:
+ try:
+ self.relative_to(other)
+ return True
+ except ValueError:
+ return False
+
+ async def is_junction(self) -> bool:
+ return await to_thread.run_sync(self._path.is_junction)
+
+ async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
+ func = partial(os.chmod, follow_symlinks=follow_symlinks)
+ return await to_thread.run_sync(func, self._path, mode)
+
+ @classmethod
+ async def cwd(cls) -> Path:
+ path = await to_thread.run_sync(pathlib.Path.cwd)
+ return cls(path)
+
+ async def exists(self) -> bool:
+ return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
+
+ async def expanduser(self) -> Path:
+ return Path(
+ await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
+ )
+
+ def glob(self, pattern: str) -> AsyncIterator[Path]:
+ gen = self._path.glob(pattern)
+ return _PathIterator(gen)
+
+ async def group(self) -> str:
+ return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
+
+ async def hardlink_to(
+ self, target: str | bytes | PathLike[str] | PathLike[bytes]
+ ) -> None:
+ if isinstance(target, Path):
+ target = target._path
+
+ await to_thread.run_sync(os.link, target, self)
+
+ @classmethod
+ async def home(cls) -> Path:
+ home_path = await to_thread.run_sync(pathlib.Path.home)
+ return cls(home_path)
+
+ def is_absolute(self) -> bool:
+ return self._path.is_absolute()
+
+ async def is_block_device(self) -> bool:
+ return await to_thread.run_sync(
+ self._path.is_block_device, abandon_on_cancel=True
+ )
+
+ async def is_char_device(self) -> bool:
+ return await to_thread.run_sync(
+ self._path.is_char_device, abandon_on_cancel=True
+ )
+
+ async def is_dir(self) -> bool:
+ return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
+
+ async def is_fifo(self) -> bool:
+ return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
+
+ async def is_file(self) -> bool:
+ return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
+
+ async def is_mount(self) -> bool:
+ return await to_thread.run_sync(
+ os.path.ismount, self._path, abandon_on_cancel=True
+ )
+
+ def is_reserved(self) -> bool:
+ return self._path.is_reserved()
+
+ async def is_socket(self) -> bool:
+ return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
+
+ async def is_symlink(self) -> bool:
+ return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
+
+ def iterdir(self) -> AsyncIterator[Path]:
+ gen = self._path.iterdir()
+ return _PathIterator(gen)
+
+ def joinpath(self, *args: str | PathLike[str]) -> Path:
+ return Path(self._path.joinpath(*args))
+
+ async def lchmod(self, mode: int) -> None:
+ await to_thread.run_sync(self._path.lchmod, mode)
+
+ async def lstat(self) -> os.stat_result:
+ return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
+
+ async def mkdir(
+ self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
+ ) -> None:
+ await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
+
+ @overload
+ async def open(
+ self,
+ mode: OpenBinaryMode,
+ buffering: int = ...,
+ encoding: str | None = ...,
+ errors: str | None = ...,
+ newline: str | None = ...,
+ ) -> AsyncFile[bytes]:
+ ...
+
+ @overload
+ async def open(
+ self,
+ mode: OpenTextMode = ...,
+ buffering: int = ...,
+ encoding: str | None = ...,
+ errors: str | None = ...,
+ newline: str | None = ...,
+ ) -> AsyncFile[str]:
+ ...
+
+ async def open(
+ self,
+ mode: str = "r",
+ buffering: int = -1,
+ encoding: str | None = None,
+ errors: str | None = None,
+ newline: str | None = None,
+ ) -> AsyncFile[Any]:
+ fp = await to_thread.run_sync(
+ self._path.open, mode, buffering, encoding, errors, newline
+ )
+ return AsyncFile(fp)
+
+ async def owner(self) -> str:
+ return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
+
+ async def read_bytes(self) -> bytes:
+ return await to_thread.run_sync(self._path.read_bytes)
+
+ async def read_text(
+ self, encoding: str | None = None, errors: str | None = None
+ ) -> str:
+ return await to_thread.run_sync(self._path.read_text, encoding, errors)
+
+ if sys.version_info >= (3, 12):
+
+ def relative_to(
+ self, *other: str | PathLike[str], walk_up: bool = False
+ ) -> Path:
+ return Path(self._path.relative_to(*other, walk_up=walk_up))
+
+ else:
+
+ def relative_to(self, *other: str | PathLike[str]) -> Path:
+ return Path(self._path.relative_to(*other))
+
+ async def readlink(self) -> Path:
+ target = await to_thread.run_sync(os.readlink, self._path)
+ return Path(target)
+
+ async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
+ if isinstance(target, Path):
+ target = target._path
+
+ await to_thread.run_sync(self._path.rename, target)
+ return Path(target)
+
+ async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
+ if isinstance(target, Path):
+ target = target._path
+
+ await to_thread.run_sync(self._path.replace, target)
+ return Path(target)
+
+ async def resolve(self, strict: bool = False) -> Path:
+ func = partial(self._path.resolve, strict=strict)
+ return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
+
+ def rglob(self, pattern: str) -> AsyncIterator[Path]:
+ gen = self._path.rglob(pattern)
+ return _PathIterator(gen)
+
+ async def rmdir(self) -> None:
+ await to_thread.run_sync(self._path.rmdir)
+
+ async def samefile(self, other_path: str | PathLike[str]) -> bool:
+ if isinstance(other_path, Path):
+ other_path = other_path._path
+
+ return await to_thread.run_sync(
+ self._path.samefile, other_path, abandon_on_cancel=True
+ )
+
+ async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
+ func = partial(os.stat, follow_symlinks=follow_symlinks)
+ return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
+
+ async def symlink_to(
+ self,
+ target: str | bytes | PathLike[str] | PathLike[bytes],
+ target_is_directory: bool = False,
+ ) -> None:
+ if isinstance(target, Path):
+ target = target._path
+
+ await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
+
+ async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
+ await to_thread.run_sync(self._path.touch, mode, exist_ok)
+
+ async def unlink(self, missing_ok: bool = False) -> None:
+ try:
+ await to_thread.run_sync(self._path.unlink)
+ except FileNotFoundError:
+ if not missing_ok:
+ raise
+
+ if sys.version_info >= (3, 12):
+
+ async def walk(
+ self,
+ top_down: bool = True,
+ on_error: Callable[[OSError], object] | None = None,
+ follow_symlinks: bool = False,
+ ) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
+ def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
+ try:
+ return next(gen)
+ except StopIteration:
+ return None
+
+ gen = self._path.walk(top_down, on_error, follow_symlinks)
+ while True:
+ value = await to_thread.run_sync(get_next_value)
+ if value is None:
+ return
+
+ root, dirs, paths = value
+ yield Path(root), dirs, paths
+
+ def with_name(self, name: str) -> Path:
+ return Path(self._path.with_name(name))
+
+ def with_stem(self, stem: str) -> Path:
+ return Path(self._path.with_name(stem + self._path.suffix))
+
+ def with_suffix(self, suffix: str) -> Path:
+ return Path(self._path.with_suffix(suffix))
+
+ def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
+ return Path(*pathsegments)
+
+ async def write_bytes(self, data: bytes) -> int:
+ return await to_thread.run_sync(self._path.write_bytes, data)
+
+ async def write_text(
+ self,
+ data: str,
+ encoding: str | None = None,
+ errors: str | None = None,
+ newline: str | None = None,
+ ) -> int:
+ # Path.write_text() does not support the "newline" parameter before Python 3.10
+ def sync_write_text() -> int:
+ with self._path.open(
+ "w", encoding=encoding, errors=errors, newline=newline
+ ) as fp:
+ return fp.write(data)
+
+ return await to_thread.run_sync(sync_write_text)
+
+
+PathLike.register(Path)
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_resources.py b/venv/lib/python3.11/site-packages/anyio/_core/_resources.py
new file mode 100644
index 0000000..b9a5344
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_resources.py
@@ -0,0 +1,18 @@
+from __future__ import annotations
+
+from ..abc import AsyncResource
+from ._tasks import CancelScope
+
+
+async def aclose_forcefully(resource: AsyncResource) -> None:
+ """
+ Close an asynchronous resource in a cancelled scope.
+
+ Doing this closes the resource without waiting on anything.
+
+ :param resource: the resource to close
+
+ """
+ with CancelScope() as scope:
+ scope.cancel()
+ await resource.aclose()
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_signals.py b/venv/lib/python3.11/site-packages/anyio/_core/_signals.py
new file mode 100644
index 0000000..115c749
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_signals.py
@@ -0,0 +1,25 @@
+from __future__ import annotations
+
+from collections.abc import AsyncIterator
+from signal import Signals
+from typing import ContextManager
+
+from ._eventloop import get_async_backend
+
+
+def open_signal_receiver(*signals: Signals) -> ContextManager[AsyncIterator[Signals]]:
+ """
+ Start receiving operating system signals.
+
+ :param signals: signals to receive (e.g. ``signal.SIGINT``)
+ :return: an asynchronous context manager for an asynchronous iterator which yields
+ signal numbers
+
+ .. warning:: Windows does not support signals natively so it is best to avoid
+ relying on this in cross-platform applications.
+
+ .. warning:: On asyncio, this permanently replaces any previous signal handler for
+ the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
+
+ """
+ return get_async_backend().open_signal_receiver(*signals)
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_sockets.py b/venv/lib/python3.11/site-packages/anyio/_core/_sockets.py
new file mode 100644
index 0000000..0f0a314
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_sockets.py
@@ -0,0 +1,716 @@
+from __future__ import annotations
+
+import errno
+import os
+import socket
+import ssl
+import stat
+import sys
+from collections.abc import Awaitable
+from ipaddress import IPv6Address, ip_address
+from os import PathLike, chmod
+from socket import AddressFamily, SocketKind
+from typing import Any, Literal, cast, overload
+
+from .. import to_thread
+from ..abc import (
+ ConnectedUDPSocket,
+ ConnectedUNIXDatagramSocket,
+ IPAddressType,
+ IPSockAddrType,
+ SocketListener,
+ SocketStream,
+ UDPSocket,
+ UNIXDatagramSocket,
+ UNIXSocketStream,
+)
+from ..streams.stapled import MultiListener
+from ..streams.tls import TLSStream
+from ._eventloop import get_async_backend
+from ._resources import aclose_forcefully
+from ._synchronization import Event
+from ._tasks import create_task_group, move_on_after
+
+if sys.version_info < (3, 11):
+ from exceptiongroup import ExceptionGroup
+
+IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
+
+AnyIPAddressFamily = Literal[
+ AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
+]
+IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
+
+
+# tls_hostname given
+@overload
+async def connect_tcp(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ local_host: IPAddressType | None = ...,
+ ssl_context: ssl.SSLContext | None = ...,
+ tls_standard_compatible: bool = ...,
+ tls_hostname: str,
+ happy_eyeballs_delay: float = ...,
+) -> TLSStream:
+ ...
+
+
+# ssl_context given
+@overload
+async def connect_tcp(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ local_host: IPAddressType | None = ...,
+ ssl_context: ssl.SSLContext,
+ tls_standard_compatible: bool = ...,
+ tls_hostname: str | None = ...,
+ happy_eyeballs_delay: float = ...,
+) -> TLSStream:
+ ...
+
+
+# tls=True
+@overload
+async def connect_tcp(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ local_host: IPAddressType | None = ...,
+ tls: Literal[True],
+ ssl_context: ssl.SSLContext | None = ...,
+ tls_standard_compatible: bool = ...,
+ tls_hostname: str | None = ...,
+ happy_eyeballs_delay: float = ...,
+) -> TLSStream:
+ ...
+
+
+# tls=False
+@overload
+async def connect_tcp(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ local_host: IPAddressType | None = ...,
+ tls: Literal[False],
+ ssl_context: ssl.SSLContext | None = ...,
+ tls_standard_compatible: bool = ...,
+ tls_hostname: str | None = ...,
+ happy_eyeballs_delay: float = ...,
+) -> SocketStream:
+ ...
+
+
+# No TLS arguments
+@overload
+async def connect_tcp(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ local_host: IPAddressType | None = ...,
+ happy_eyeballs_delay: float = ...,
+) -> SocketStream:
+ ...
+
+
+async def connect_tcp(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ local_host: IPAddressType | None = None,
+ tls: bool = False,
+ ssl_context: ssl.SSLContext | None = None,
+ tls_standard_compatible: bool = True,
+ tls_hostname: str | None = None,
+ happy_eyeballs_delay: float = 0.25,
+) -> SocketStream | TLSStream:
+ """
+ Connect to a host using the TCP protocol.
+
+ This function implements the stateless version of the Happy Eyeballs algorithm (RFC
+ 6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
+ each one is tried until one connection attempt succeeds. If the first attempt does
+ not connected within 250 milliseconds, a second attempt is started using the next
+ address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
+ available) is tried first.
+
+ When the connection has been established, a TLS handshake will be done if either
+ ``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
+
+ :param remote_host: the IP address or host name to connect to
+ :param remote_port: port on the target host to connect to
+ :param local_host: the interface address or name to bind the socket to before
+ connecting
+ :param tls: ``True`` to do a TLS handshake with the connected stream and return a
+ :class:`~anyio.streams.tls.TLSStream` instead
+ :param ssl_context: the SSL context object to use (if omitted, a default context is
+ created)
+ :param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
+ before closing the stream and requires that the server does this as well.
+ Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
+ Some protocols, such as HTTP, require this option to be ``False``.
+ See :meth:`~ssl.SSLContext.wrap_socket` for details.
+ :param tls_hostname: host name to check the server certificate against (defaults to
+ the value of ``remote_host``)
+ :param happy_eyeballs_delay: delay (in seconds) before starting the next connection
+ attempt
+ :return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
+ :raises OSError: if the connection attempt fails
+
+ """
+ # Placed here due to https://github.com/python/mypy/issues/7057
+ connected_stream: SocketStream | None = None
+
+ async def try_connect(remote_host: str, event: Event) -> None:
+ nonlocal connected_stream
+ try:
+ stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
+ except OSError as exc:
+ oserrors.append(exc)
+ return
+ else:
+ if connected_stream is None:
+ connected_stream = stream
+ tg.cancel_scope.cancel()
+ else:
+ await stream.aclose()
+ finally:
+ event.set()
+
+ asynclib = get_async_backend()
+ local_address: IPSockAddrType | None = None
+ family = socket.AF_UNSPEC
+ if local_host:
+ gai_res = await getaddrinfo(str(local_host), None)
+ family, *_, local_address = gai_res[0]
+
+ target_host = str(remote_host)
+ try:
+ addr_obj = ip_address(remote_host)
+ except ValueError:
+ # getaddrinfo() will raise an exception if name resolution fails
+ gai_res = await getaddrinfo(
+ target_host, remote_port, family=family, type=socket.SOCK_STREAM
+ )
+
+ # Organize the list so that the first address is an IPv6 address (if available)
+ # and the second one is an IPv4 addresses. The rest can be in whatever order.
+ v6_found = v4_found = False
+ target_addrs: list[tuple[socket.AddressFamily, str]] = []
+ for af, *rest, sa in gai_res:
+ if af == socket.AF_INET6 and not v6_found:
+ v6_found = True
+ target_addrs.insert(0, (af, sa[0]))
+ elif af == socket.AF_INET and not v4_found and v6_found:
+ v4_found = True
+ target_addrs.insert(1, (af, sa[0]))
+ else:
+ target_addrs.append((af, sa[0]))
+ else:
+ if isinstance(addr_obj, IPv6Address):
+ target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
+ else:
+ target_addrs = [(socket.AF_INET, addr_obj.compressed)]
+
+ oserrors: list[OSError] = []
+ async with create_task_group() as tg:
+ for i, (af, addr) in enumerate(target_addrs):
+ event = Event()
+ tg.start_soon(try_connect, addr, event)
+ with move_on_after(happy_eyeballs_delay):
+ await event.wait()
+
+ if connected_stream is None:
+ cause = (
+ oserrors[0]
+ if len(oserrors) == 1
+ else ExceptionGroup("multiple connection attempts failed", oserrors)
+ )
+ raise OSError("All connection attempts failed") from cause
+
+ if tls or tls_hostname or ssl_context:
+ try:
+ return await TLSStream.wrap(
+ connected_stream,
+ server_side=False,
+ hostname=tls_hostname or str(remote_host),
+ ssl_context=ssl_context,
+ standard_compatible=tls_standard_compatible,
+ )
+ except BaseException:
+ await aclose_forcefully(connected_stream)
+ raise
+
+ return connected_stream
+
+
+async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
+ """
+ Connect to the given UNIX socket.
+
+ Not available on Windows.
+
+ :param path: path to the socket
+ :return: a socket stream object
+
+ """
+ path = os.fspath(path)
+ return await get_async_backend().connect_unix(path)
+
+
+async def create_tcp_listener(
+ *,
+ local_host: IPAddressType | None = None,
+ local_port: int = 0,
+ family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
+ backlog: int = 65536,
+ reuse_port: bool = False,
+) -> MultiListener[SocketStream]:
+ """
+ Create a TCP socket listener.
+
+ :param local_port: port number to listen on
+ :param local_host: IP address of the interface to listen on. If omitted, listen on
+ all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
+ family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
+ :param family: address family (used if ``local_host`` was omitted)
+ :param backlog: maximum number of queued incoming connections (up to a maximum of
+ 2**16, or 65536)
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
+ address/port (not supported on Windows)
+ :return: a list of listener objects
+
+ """
+ asynclib = get_async_backend()
+ backlog = min(backlog, 65536)
+ local_host = str(local_host) if local_host is not None else None
+ gai_res = await getaddrinfo(
+ local_host,
+ local_port,
+ family=family,
+ type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
+ )
+ listeners: list[SocketListener] = []
+ try:
+ # The set() is here to work around a glibc bug:
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=14969
+ sockaddr: tuple[str, int] | tuple[str, int, int, int]
+ for fam, kind, *_, sockaddr in sorted(set(gai_res)):
+ # Workaround for an uvloop bug where we don't get the correct scope ID for
+ # IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
+ # getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
+ if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
+ continue
+
+ raw_socket = socket.socket(fam)
+ raw_socket.setblocking(False)
+
+ # For Windows, enable exclusive address use. For others, enable address
+ # reuse.
+ if sys.platform == "win32":
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
+ else:
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ if reuse_port:
+ raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+
+ # If only IPv6 was requested, disable dual stack operation
+ if fam == socket.AF_INET6:
+ raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
+
+ # Workaround for #554
+ if "%" in sockaddr[0]:
+ addr, scope_id = sockaddr[0].split("%", 1)
+ sockaddr = (addr, sockaddr[1], 0, int(scope_id))
+
+ raw_socket.bind(sockaddr)
+ raw_socket.listen(backlog)
+ listener = asynclib.create_tcp_listener(raw_socket)
+ listeners.append(listener)
+ except BaseException:
+ for listener in listeners:
+ await listener.aclose()
+
+ raise
+
+ return MultiListener(listeners)
+
+
+async def create_unix_listener(
+ path: str | bytes | PathLike[Any],
+ *,
+ mode: int | None = None,
+ backlog: int = 65536,
+) -> SocketListener:
+ """
+ Create a UNIX socket listener.
+
+ Not available on Windows.
+
+ :param path: path of the socket
+ :param mode: permissions to set on the socket
+ :param backlog: maximum number of queued incoming connections (up to a maximum of
+ 2**16, or 65536)
+ :return: a listener object
+
+ .. versionchanged:: 3.0
+ If a socket already exists on the file system in the given path, it will be
+ removed first.
+
+ """
+ backlog = min(backlog, 65536)
+ raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
+ try:
+ raw_socket.listen(backlog)
+ return get_async_backend().create_unix_listener(raw_socket)
+ except BaseException:
+ raw_socket.close()
+ raise
+
+
+async def create_udp_socket(
+ family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
+ *,
+ local_host: IPAddressType | None = None,
+ local_port: int = 0,
+ reuse_port: bool = False,
+) -> UDPSocket:
+ """
+ Create a UDP socket.
+
+ If ``port`` has been given, the socket will be bound to this port on the local
+ machine, making this socket suitable for providing UDP based services.
+
+ :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
+ determined from ``local_host`` if omitted
+ :param local_host: IP address or host name of the local interface to bind to
+ :param local_port: local port to bind to
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
+ address/port (not supported on Windows)
+ :return: a UDP socket
+
+ """
+ if family is AddressFamily.AF_UNSPEC and not local_host:
+ raise ValueError('Either "family" or "local_host" must be given')
+
+ if local_host:
+ gai_res = await getaddrinfo(
+ str(local_host),
+ local_port,
+ family=family,
+ type=socket.SOCK_DGRAM,
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
+ )
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
+ local_address = gai_res[0][-1]
+ elif family is AddressFamily.AF_INET6:
+ local_address = ("::", 0)
+ else:
+ local_address = ("0.0.0.0", 0)
+
+ sock = await get_async_backend().create_udp_socket(
+ family, local_address, None, reuse_port
+ )
+ return cast(UDPSocket, sock)
+
+
+async def create_connected_udp_socket(
+ remote_host: IPAddressType,
+ remote_port: int,
+ *,
+ family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
+ local_host: IPAddressType | None = None,
+ local_port: int = 0,
+ reuse_port: bool = False,
+) -> ConnectedUDPSocket:
+ """
+ Create a connected UDP socket.
+
+ Connected UDP sockets can only communicate with the specified remote host/port, an
+ any packets sent from other sources are dropped.
+
+ :param remote_host: remote host to set as the default target
+ :param remote_port: port on the remote host to set as the default target
+ :param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
+ determined from ``local_host`` or ``remote_host`` if omitted
+ :param local_host: IP address or host name of the local interface to bind to
+ :param local_port: local port to bind to
+ :param reuse_port: ``True`` to allow multiple sockets to bind to the same
+ address/port (not supported on Windows)
+ :return: a connected UDP socket
+
+ """
+ local_address = None
+ if local_host:
+ gai_res = await getaddrinfo(
+ str(local_host),
+ local_port,
+ family=family,
+ type=socket.SOCK_DGRAM,
+ flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
+ )
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
+ local_address = gai_res[0][-1]
+
+ gai_res = await getaddrinfo(
+ str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
+ )
+ family = cast(AnyIPAddressFamily, gai_res[0][0])
+ remote_address = gai_res[0][-1]
+
+ sock = await get_async_backend().create_udp_socket(
+ family, local_address, remote_address, reuse_port
+ )
+ return cast(ConnectedUDPSocket, sock)
+
+
+async def create_unix_datagram_socket(
+ *,
+ local_path: None | str | bytes | PathLike[Any] = None,
+ local_mode: int | None = None,
+) -> UNIXDatagramSocket:
+ """
+ Create a UNIX datagram socket.
+
+ Not available on Windows.
+
+ If ``local_path`` has been given, the socket will be bound to this path, making this
+ socket suitable for receiving datagrams from other processes. Other processes can
+ send datagrams to this socket only if ``local_path`` is set.
+
+ If a socket already exists on the file system in the ``local_path``, it will be
+ removed first.
+
+ :param local_path: the path on which to bind to
+ :param local_mode: permissions to set on the local socket
+ :return: a UNIX datagram socket
+
+ """
+ raw_socket = await setup_unix_local_socket(
+ local_path, local_mode, socket.SOCK_DGRAM
+ )
+ return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
+
+
+async def create_connected_unix_datagram_socket(
+ remote_path: str | bytes | PathLike[Any],
+ *,
+ local_path: None | str | bytes | PathLike[Any] = None,
+ local_mode: int | None = None,
+) -> ConnectedUNIXDatagramSocket:
+ """
+ Create a connected UNIX datagram socket.
+
+ Connected datagram sockets can only communicate with the specified remote path.
+
+ If ``local_path`` has been given, the socket will be bound to this path, making
+ this socket suitable for receiving datagrams from other processes. Other processes
+ can send datagrams to this socket only if ``local_path`` is set.
+
+ If a socket already exists on the file system in the ``local_path``, it will be
+ removed first.
+
+ :param remote_path: the path to set as the default target
+ :param local_path: the path on which to bind to
+ :param local_mode: permissions to set on the local socket
+ :return: a connected UNIX datagram socket
+
+ """
+ remote_path = os.fspath(remote_path)
+ raw_socket = await setup_unix_local_socket(
+ local_path, local_mode, socket.SOCK_DGRAM
+ )
+ return await get_async_backend().create_unix_datagram_socket(
+ raw_socket, remote_path
+ )
+
+
+async def getaddrinfo(
+ host: bytes | str | None,
+ port: str | int | None,
+ *,
+ family: int | AddressFamily = 0,
+ type: int | SocketKind = 0,
+ proto: int = 0,
+ flags: int = 0,
+) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
+ """
+ Look up a numeric IP address given a host name.
+
+ Internationalized domain names are translated according to the (non-transitional)
+ IDNA 2008 standard.
+
+ .. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
+ (host, port), unlike what :func:`socket.getaddrinfo` does.
+
+ :param host: host name
+ :param port: port number
+ :param family: socket family (`'AF_INET``, ...)
+ :param type: socket type (``SOCK_STREAM``, ...)
+ :param proto: protocol number
+ :param flags: flags to pass to upstream ``getaddrinfo()``
+ :return: list of tuples containing (family, type, proto, canonname, sockaddr)
+
+ .. seealso:: :func:`socket.getaddrinfo`
+
+ """
+ # Handle unicode hostnames
+ if isinstance(host, str):
+ try:
+ encoded_host: bytes | None = host.encode("ascii")
+ except UnicodeEncodeError:
+ import idna
+
+ encoded_host = idna.encode(host, uts46=True)
+ else:
+ encoded_host = host
+
+ gai_res = await get_async_backend().getaddrinfo(
+ encoded_host, port, family=family, type=type, proto=proto, flags=flags
+ )
+ return [
+ (family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
+ for family, type, proto, canonname, sockaddr in gai_res
+ ]
+
+
+def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
+ """
+ Look up the host name of an IP address.
+
+ :param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
+ :param flags: flags to pass to upstream ``getnameinfo()``
+ :return: a tuple of (host name, service name)
+
+ .. seealso:: :func:`socket.getnameinfo`
+
+ """
+ return get_async_backend().getnameinfo(sockaddr, flags)
+
+
+def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
+ """
+ Wait until the given socket has data to be read.
+
+ This does **NOT** work on Windows when using the asyncio backend with a proactor
+ event loop (default on py3.8+).
+
+ .. warning:: Only use this on raw sockets that have not been wrapped by any higher
+ level constructs like socket streams!
+
+ :param sock: a socket object
+ :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
+ socket to become readable
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
+ to become readable
+
+ """
+ return get_async_backend().wait_socket_readable(sock)
+
+
+def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
+ """
+ Wait until the given socket can be written to.
+
+ This does **NOT** work on Windows when using the asyncio backend with a proactor
+ event loop (default on py3.8+).
+
+ .. warning:: Only use this on raw sockets that have not been wrapped by any higher
+ level constructs like socket streams!
+
+ :param sock: a socket object
+ :raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
+ socket to become writable
+ :raises ~anyio.BusyResourceError: if another task is already waiting for the socket
+ to become writable
+
+ """
+ return get_async_backend().wait_socket_writable(sock)
+
+
+#
+# Private API
+#
+
+
+def convert_ipv6_sockaddr(
+ sockaddr: tuple[str, int, int, int] | tuple[str, int],
+) -> tuple[str, int]:
+ """
+ Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
+
+ If the scope ID is nonzero, it is added to the address, separated with ``%``.
+ Otherwise the flow id and scope id are simply cut off from the tuple.
+ Any other kinds of socket addresses are returned as-is.
+
+ :param sockaddr: the result of :meth:`~socket.socket.getsockname`
+ :return: the converted socket address
+
+ """
+ # This is more complicated than it should be because of MyPy
+ if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
+ host, port, flowinfo, scope_id = sockaddr
+ if scope_id:
+ # PyPy (as of v7.3.11) leaves the interface name in the result, so
+ # we discard it and only get the scope ID from the end
+ # (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
+ host = host.split("%")[0]
+
+ # Add scope_id to the address
+ return f"{host}%{scope_id}", port
+ else:
+ return host, port
+ else:
+ return sockaddr
+
+
+async def setup_unix_local_socket(
+ path: None | str | bytes | PathLike[Any],
+ mode: int | None,
+ socktype: int,
+) -> socket.socket:
+ """
+ Create a UNIX local socket object, deleting the socket at the given path if it
+ exists.
+
+ Not available on Windows.
+
+ :param path: path of the socket
+ :param mode: permissions to set on the socket
+ :param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
+
+ """
+ path_str: str | bytes | None
+ if path is not None:
+ path_str = os.fspath(path)
+
+ # Copied from pathlib...
+ try:
+ stat_result = os.stat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EBADF, errno.ELOOP):
+ raise
+ else:
+ if stat.S_ISSOCK(stat_result.st_mode):
+ os.unlink(path)
+ else:
+ path_str = None
+
+ raw_socket = socket.socket(socket.AF_UNIX, socktype)
+ raw_socket.setblocking(False)
+
+ if path_str is not None:
+ try:
+ await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
+ if mode is not None:
+ await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
+ except BaseException:
+ raw_socket.close()
+ raise
+
+ return raw_socket
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_streams.py b/venv/lib/python3.11/site-packages/anyio/_core/_streams.py
new file mode 100644
index 0000000..aa6b0c2
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_streams.py
@@ -0,0 +1,52 @@
+from __future__ import annotations
+
+import math
+from typing import Tuple, TypeVar
+from warnings import warn
+
+from ..streams.memory import (
+ MemoryObjectReceiveStream,
+ MemoryObjectSendStream,
+ MemoryObjectStreamState,
+)
+
+T_Item = TypeVar("T_Item")
+
+
+class create_memory_object_stream(
+ Tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
+):
+ """
+ Create a memory object stream.
+
+ The stream's item type can be annotated like
+ :func:`create_memory_object_stream[T_Item]`.
+
+ :param max_buffer_size: number of items held in the buffer until ``send()`` starts
+ blocking
+ :param item_type: old way of marking the streams with the right generic type for
+ static typing (does nothing on AnyIO 4)
+
+ .. deprecated:: 4.0
+ Use ``create_memory_object_stream[YourItemType](...)`` instead.
+ :return: a tuple of (send stream, receive stream)
+
+ """
+
+ def __new__( # type: ignore[misc]
+ cls, max_buffer_size: float = 0, item_type: object = None
+ ) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
+ if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
+ raise ValueError("max_buffer_size must be either an integer or math.inf")
+ if max_buffer_size < 0:
+ raise ValueError("max_buffer_size cannot be negative")
+ if item_type is not None:
+ warn(
+ "The item_type argument has been deprecated in AnyIO 4.0. "
+ "Use create_memory_object_stream[YourItemType](...) instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ state = MemoryObjectStreamState[T_Item](max_buffer_size)
+ return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py b/venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py
new file mode 100644
index 0000000..5d5d7b7
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_subprocesses.py
@@ -0,0 +1,140 @@
+from __future__ import annotations
+
+from collections.abc import AsyncIterable, Mapping, Sequence
+from io import BytesIO
+from os import PathLike
+from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
+from typing import IO, Any, cast
+
+from ..abc import Process
+from ._eventloop import get_async_backend
+from ._tasks import create_task_group
+
+
+async def run_process(
+ command: str | bytes | Sequence[str | bytes],
+ *,
+ input: bytes | None = None,
+ stdout: int | IO[Any] | None = PIPE,
+ stderr: int | IO[Any] | None = PIPE,
+ check: bool = True,
+ cwd: str | bytes | PathLike[str] | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+) -> CompletedProcess[bytes]:
+ """
+ Run an external command in a subprocess and wait until it completes.
+
+ .. seealso:: :func:`subprocess.run`
+
+ :param command: either a string to pass to the shell, or an iterable of strings
+ containing the executable name or path and its arguments
+ :param input: bytes passed to the standard input of the subprocess
+ :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+ a file-like object, or `None`
+ :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+ :data:`subprocess.STDOUT`, a file-like object, or `None`
+ :param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
+ process terminates with a return code other than 0
+ :param cwd: If not ``None``, change the working directory to this before running the
+ command
+ :param env: if not ``None``, this mapping replaces the inherited environment
+ variables from the parent process
+ :param start_new_session: if ``true`` the setsid() system call will be made in the
+ child process prior to the execution of the subprocess. (POSIX only)
+ :return: an object representing the completed process
+ :raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
+ exits with a nonzero return code
+
+ """
+
+ async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
+ buffer = BytesIO()
+ async for chunk in stream:
+ buffer.write(chunk)
+
+ stream_contents[index] = buffer.getvalue()
+
+ async with await open_process(
+ command,
+ stdin=PIPE if input else DEVNULL,
+ stdout=stdout,
+ stderr=stderr,
+ cwd=cwd,
+ env=env,
+ start_new_session=start_new_session,
+ ) as process:
+ stream_contents: list[bytes | None] = [None, None]
+ async with create_task_group() as tg:
+ if process.stdout:
+ tg.start_soon(drain_stream, process.stdout, 0)
+
+ if process.stderr:
+ tg.start_soon(drain_stream, process.stderr, 1)
+
+ if process.stdin and input:
+ await process.stdin.send(input)
+ await process.stdin.aclose()
+
+ await process.wait()
+
+ output, errors = stream_contents
+ if check and process.returncode != 0:
+ raise CalledProcessError(cast(int, process.returncode), command, output, errors)
+
+ return CompletedProcess(command, cast(int, process.returncode), output, errors)
+
+
+async def open_process(
+ command: str | bytes | Sequence[str | bytes],
+ *,
+ stdin: int | IO[Any] | None = PIPE,
+ stdout: int | IO[Any] | None = PIPE,
+ stderr: int | IO[Any] | None = PIPE,
+ cwd: str | bytes | PathLike[str] | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+) -> Process:
+ """
+ Start an external command in a subprocess.
+
+ .. seealso:: :class:`subprocess.Popen`
+
+ :param command: either a string to pass to the shell, or an iterable of strings
+ containing the executable name or path and its arguments
+ :param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
+ file-like object, or ``None``
+ :param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+ a file-like object, or ``None``
+ :param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
+ :data:`subprocess.STDOUT`, a file-like object, or ``None``
+ :param cwd: If not ``None``, the working directory is changed before executing
+ :param env: If env is not ``None``, it must be a mapping that defines the
+ environment variables for the new process
+ :param start_new_session: if ``true`` the setsid() system call will be made in the
+ child process prior to the execution of the subprocess. (POSIX only)
+ :return: an asynchronous process object
+
+ """
+ if isinstance(command, (str, bytes)):
+ return await get_async_backend().open_process(
+ command,
+ shell=True,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ cwd=cwd,
+ env=env,
+ start_new_session=start_new_session,
+ )
+ else:
+ return await get_async_backend().open_process(
+ command,
+ shell=False,
+ stdin=stdin,
+ stdout=stdout,
+ stderr=stderr,
+ cwd=cwd,
+ env=env,
+ start_new_session=start_new_session,
+ )
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py b/venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py
new file mode 100644
index 0000000..b274a31
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_synchronization.py
@@ -0,0 +1,649 @@
+from __future__ import annotations
+
+import math
+from collections import deque
+from dataclasses import dataclass
+from types import TracebackType
+
+from sniffio import AsyncLibraryNotFoundError
+
+from ..lowlevel import cancel_shielded_checkpoint, checkpoint, checkpoint_if_cancelled
+from ._eventloop import get_async_backend
+from ._exceptions import BusyResourceError, WouldBlock
+from ._tasks import CancelScope
+from ._testing import TaskInfo, get_current_task
+
+
+@dataclass(frozen=True)
+class EventStatistics:
+ """
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
+ """
+
+ tasks_waiting: int
+
+
+@dataclass(frozen=True)
+class CapacityLimiterStatistics:
+ """
+ :ivar int borrowed_tokens: number of tokens currently borrowed by tasks
+ :ivar float total_tokens: total number of available tokens
+ :ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
+ this limiter
+ :ivar int tasks_waiting: number of tasks waiting on
+ :meth:`~.CapacityLimiter.acquire` or
+ :meth:`~.CapacityLimiter.acquire_on_behalf_of`
+ """
+
+ borrowed_tokens: int
+ total_tokens: float
+ borrowers: tuple[object, ...]
+ tasks_waiting: int
+
+
+@dataclass(frozen=True)
+class LockStatistics:
+ """
+ :ivar bool locked: flag indicating if this lock is locked or not
+ :ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
+ lock is not held by any task)
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
+ """
+
+ locked: bool
+ owner: TaskInfo | None
+ tasks_waiting: int
+
+
+@dataclass(frozen=True)
+class ConditionStatistics:
+ """
+ :ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
+ :ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
+ :class:`~.Lock`
+ """
+
+ tasks_waiting: int
+ lock_statistics: LockStatistics
+
+
+@dataclass(frozen=True)
+class SemaphoreStatistics:
+ """
+ :ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
+
+ """
+
+ tasks_waiting: int
+
+
+class Event:
+ def __new__(cls) -> Event:
+ try:
+ return get_async_backend().create_event()
+ except AsyncLibraryNotFoundError:
+ return EventAdapter()
+
+ def set(self) -> None:
+ """Set the flag, notifying all listeners."""
+ raise NotImplementedError
+
+ def is_set(self) -> bool:
+ """Return ``True`` if the flag is set, ``False`` if not."""
+ raise NotImplementedError
+
+ async def wait(self) -> None:
+ """
+ Wait until the flag has been set.
+
+ If the flag has already been set when this method is called, it returns
+ immediately.
+
+ """
+ raise NotImplementedError
+
+ def statistics(self) -> EventStatistics:
+ """Return statistics about the current state of this event."""
+ raise NotImplementedError
+
+
+class EventAdapter(Event):
+ _internal_event: Event | None = None
+
+ def __new__(cls) -> EventAdapter:
+ return object.__new__(cls)
+
+ @property
+ def _event(self) -> Event:
+ if self._internal_event is None:
+ self._internal_event = get_async_backend().create_event()
+
+ return self._internal_event
+
+ def set(self) -> None:
+ self._event.set()
+
+ def is_set(self) -> bool:
+ return self._internal_event is not None and self._internal_event.is_set()
+
+ async def wait(self) -> None:
+ await self._event.wait()
+
+ def statistics(self) -> EventStatistics:
+ if self._internal_event is None:
+ return EventStatistics(tasks_waiting=0)
+
+ return self._internal_event.statistics()
+
+
+class Lock:
+ _owner_task: TaskInfo | None = None
+
+ def __init__(self) -> None:
+ self._waiters: deque[tuple[TaskInfo, Event]] = deque()
+
+ async def __aenter__(self) -> None:
+ await self.acquire()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.release()
+
+ async def acquire(self) -> None:
+ """Acquire the lock."""
+ await checkpoint_if_cancelled()
+ try:
+ self.acquire_nowait()
+ except WouldBlock:
+ task = get_current_task()
+ event = Event()
+ token = task, event
+ self._waiters.append(token)
+ try:
+ await event.wait()
+ except BaseException:
+ if not event.is_set():
+ self._waiters.remove(token)
+ elif self._owner_task == task:
+ self.release()
+
+ raise
+
+ assert self._owner_task == task
+ else:
+ try:
+ await cancel_shielded_checkpoint()
+ except BaseException:
+ self.release()
+ raise
+
+ def acquire_nowait(self) -> None:
+ """
+ Acquire the lock, without blocking.
+
+ :raises ~anyio.WouldBlock: if the operation would block
+
+ """
+ task = get_current_task()
+ if self._owner_task == task:
+ raise RuntimeError("Attempted to acquire an already held Lock")
+
+ if self._owner_task is not None:
+ raise WouldBlock
+
+ self._owner_task = task
+
+ def release(self) -> None:
+ """Release the lock."""
+ if self._owner_task != get_current_task():
+ raise RuntimeError("The current task is not holding this lock")
+
+ if self._waiters:
+ self._owner_task, event = self._waiters.popleft()
+ event.set()
+ else:
+ del self._owner_task
+
+ def locked(self) -> bool:
+ """Return True if the lock is currently held."""
+ return self._owner_task is not None
+
+ def statistics(self) -> LockStatistics:
+ """
+ Return statistics about the current state of this lock.
+
+ .. versionadded:: 3.0
+ """
+ return LockStatistics(self.locked(), self._owner_task, len(self._waiters))
+
+
+class Condition:
+ _owner_task: TaskInfo | None = None
+
+ def __init__(self, lock: Lock | None = None):
+ self._lock = lock or Lock()
+ self._waiters: deque[Event] = deque()
+
+ async def __aenter__(self) -> None:
+ await self.acquire()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.release()
+
+ def _check_acquired(self) -> None:
+ if self._owner_task != get_current_task():
+ raise RuntimeError("The current task is not holding the underlying lock")
+
+ async def acquire(self) -> None:
+ """Acquire the underlying lock."""
+ await self._lock.acquire()
+ self._owner_task = get_current_task()
+
+ def acquire_nowait(self) -> None:
+ """
+ Acquire the underlying lock, without blocking.
+
+ :raises ~anyio.WouldBlock: if the operation would block
+
+ """
+ self._lock.acquire_nowait()
+ self._owner_task = get_current_task()
+
+ def release(self) -> None:
+ """Release the underlying lock."""
+ self._lock.release()
+
+ def locked(self) -> bool:
+ """Return True if the lock is set."""
+ return self._lock.locked()
+
+ def notify(self, n: int = 1) -> None:
+ """Notify exactly n listeners."""
+ self._check_acquired()
+ for _ in range(n):
+ try:
+ event = self._waiters.popleft()
+ except IndexError:
+ break
+
+ event.set()
+
+ def notify_all(self) -> None:
+ """Notify all the listeners."""
+ self._check_acquired()
+ for event in self._waiters:
+ event.set()
+
+ self._waiters.clear()
+
+ async def wait(self) -> None:
+ """Wait for a notification."""
+ await checkpoint()
+ event = Event()
+ self._waiters.append(event)
+ self.release()
+ try:
+ await event.wait()
+ except BaseException:
+ if not event.is_set():
+ self._waiters.remove(event)
+
+ raise
+ finally:
+ with CancelScope(shield=True):
+ await self.acquire()
+
+ def statistics(self) -> ConditionStatistics:
+ """
+ Return statistics about the current state of this condition.
+
+ .. versionadded:: 3.0
+ """
+ return ConditionStatistics(len(self._waiters), self._lock.statistics())
+
+
+class Semaphore:
+ def __init__(self, initial_value: int, *, max_value: int | None = None):
+ if not isinstance(initial_value, int):
+ raise TypeError("initial_value must be an integer")
+ if initial_value < 0:
+ raise ValueError("initial_value must be >= 0")
+ if max_value is not None:
+ if not isinstance(max_value, int):
+ raise TypeError("max_value must be an integer or None")
+ if max_value < initial_value:
+ raise ValueError(
+ "max_value must be equal to or higher than initial_value"
+ )
+
+ self._value = initial_value
+ self._max_value = max_value
+ self._waiters: deque[Event] = deque()
+
+ async def __aenter__(self) -> Semaphore:
+ await self.acquire()
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.release()
+
+ async def acquire(self) -> None:
+ """Decrement the semaphore value, blocking if necessary."""
+ await checkpoint_if_cancelled()
+ try:
+ self.acquire_nowait()
+ except WouldBlock:
+ event = Event()
+ self._waiters.append(event)
+ try:
+ await event.wait()
+ except BaseException:
+ if not event.is_set():
+ self._waiters.remove(event)
+ else:
+ self.release()
+
+ raise
+ else:
+ try:
+ await cancel_shielded_checkpoint()
+ except BaseException:
+ self.release()
+ raise
+
+ def acquire_nowait(self) -> None:
+ """
+ Acquire the underlying lock, without blocking.
+
+ :raises ~anyio.WouldBlock: if the operation would block
+
+ """
+ if self._value == 0:
+ raise WouldBlock
+
+ self._value -= 1
+
+ def release(self) -> None:
+ """Increment the semaphore value."""
+ if self._max_value is not None and self._value == self._max_value:
+ raise ValueError("semaphore released too many times")
+
+ if self._waiters:
+ self._waiters.popleft().set()
+ else:
+ self._value += 1
+
+ @property
+ def value(self) -> int:
+ """The current value of the semaphore."""
+ return self._value
+
+ @property
+ def max_value(self) -> int | None:
+ """The maximum value of the semaphore."""
+ return self._max_value
+
+ def statistics(self) -> SemaphoreStatistics:
+ """
+ Return statistics about the current state of this semaphore.
+
+ .. versionadded:: 3.0
+ """
+ return SemaphoreStatistics(len(self._waiters))
+
+
+class CapacityLimiter:
+ def __new__(cls, total_tokens: float) -> CapacityLimiter:
+ try:
+ return get_async_backend().create_capacity_limiter(total_tokens)
+ except AsyncLibraryNotFoundError:
+ return CapacityLimiterAdapter(total_tokens)
+
+ async def __aenter__(self) -> None:
+ raise NotImplementedError
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ raise NotImplementedError
+
+ @property
+ def total_tokens(self) -> float:
+ """
+ The total number of tokens available for borrowing.
+
+ This is a read-write property. If the total number of tokens is increased, the
+ proportionate number of tasks waiting on this limiter will be granted their
+ tokens.
+
+ .. versionchanged:: 3.0
+ The property is now writable.
+
+ """
+ raise NotImplementedError
+
+ @total_tokens.setter
+ def total_tokens(self, value: float) -> None:
+ raise NotImplementedError
+
+ @property
+ def borrowed_tokens(self) -> int:
+ """The number of tokens that have currently been borrowed."""
+ raise NotImplementedError
+
+ @property
+ def available_tokens(self) -> float:
+ """The number of tokens currently available to be borrowed"""
+ raise NotImplementedError
+
+ def acquire_nowait(self) -> None:
+ """
+ Acquire a token for the current task without waiting for one to become
+ available.
+
+ :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
+
+ """
+ raise NotImplementedError
+
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
+ """
+ Acquire a token without waiting for one to become available.
+
+ :param borrower: the entity borrowing a token
+ :raises ~anyio.WouldBlock: if there are no tokens available for borrowing
+
+ """
+ raise NotImplementedError
+
+ async def acquire(self) -> None:
+ """
+ Acquire a token for the current task, waiting if necessary for one to become
+ available.
+
+ """
+ raise NotImplementedError
+
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
+ """
+ Acquire a token, waiting if necessary for one to become available.
+
+ :param borrower: the entity borrowing a token
+
+ """
+ raise NotImplementedError
+
+ def release(self) -> None:
+ """
+ Release the token held by the current task.
+
+ :raises RuntimeError: if the current task has not borrowed a token from this
+ limiter.
+
+ """
+ raise NotImplementedError
+
+ def release_on_behalf_of(self, borrower: object) -> None:
+ """
+ Release the token held by the given borrower.
+
+ :raises RuntimeError: if the borrower has not borrowed a token from this
+ limiter.
+
+ """
+ raise NotImplementedError
+
+ def statistics(self) -> CapacityLimiterStatistics:
+ """
+ Return statistics about the current state of this limiter.
+
+ .. versionadded:: 3.0
+
+ """
+ raise NotImplementedError
+
+
+class CapacityLimiterAdapter(CapacityLimiter):
+ _internal_limiter: CapacityLimiter | None = None
+
+ def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
+ return object.__new__(cls)
+
+ def __init__(self, total_tokens: float) -> None:
+ self.total_tokens = total_tokens
+
+ @property
+ def _limiter(self) -> CapacityLimiter:
+ if self._internal_limiter is None:
+ self._internal_limiter = get_async_backend().create_capacity_limiter(
+ self._total_tokens
+ )
+
+ return self._internal_limiter
+
+ async def __aenter__(self) -> None:
+ await self._limiter.__aenter__()
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
+
+ @property
+ def total_tokens(self) -> float:
+ if self._internal_limiter is None:
+ return self._total_tokens
+
+ return self._internal_limiter.total_tokens
+
+ @total_tokens.setter
+ def total_tokens(self, value: float) -> None:
+ if not isinstance(value, int) and value is not math.inf:
+ raise TypeError("total_tokens must be an int or math.inf")
+ elif value < 1:
+ raise ValueError("total_tokens must be >= 1")
+
+ if self._internal_limiter is None:
+ self._total_tokens = value
+ return
+
+ self._limiter.total_tokens = value
+
+ @property
+ def borrowed_tokens(self) -> int:
+ if self._internal_limiter is None:
+ return 0
+
+ return self._internal_limiter.borrowed_tokens
+
+ @property
+ def available_tokens(self) -> float:
+ if self._internal_limiter is None:
+ return self._total_tokens
+
+ return self._internal_limiter.available_tokens
+
+ def acquire_nowait(self) -> None:
+ self._limiter.acquire_nowait()
+
+ def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
+ self._limiter.acquire_on_behalf_of_nowait(borrower)
+
+ async def acquire(self) -> None:
+ await self._limiter.acquire()
+
+ async def acquire_on_behalf_of(self, borrower: object) -> None:
+ await self._limiter.acquire_on_behalf_of(borrower)
+
+ def release(self) -> None:
+ self._limiter.release()
+
+ def release_on_behalf_of(self, borrower: object) -> None:
+ self._limiter.release_on_behalf_of(borrower)
+
+ def statistics(self) -> CapacityLimiterStatistics:
+ if self._internal_limiter is None:
+ return CapacityLimiterStatistics(
+ borrowed_tokens=0,
+ total_tokens=self.total_tokens,
+ borrowers=(),
+ tasks_waiting=0,
+ )
+
+ return self._internal_limiter.statistics()
+
+
+class ResourceGuard:
+ """
+ A context manager for ensuring that a resource is only used by a single task at a
+ time.
+
+ Entering this context manager while the previous has not exited it yet will trigger
+ :exc:`BusyResourceError`.
+
+ :param action: the action to guard against (visible in the :exc:`BusyResourceError`
+ when triggered, e.g. "Another task is already {action} this resource")
+
+ .. versionadded:: 4.1
+ """
+
+ __slots__ = "action", "_guarded"
+
+ def __init__(self, action: str = "using"):
+ self.action: str = action
+ self._guarded = False
+
+ def __enter__(self) -> None:
+ if self._guarded:
+ raise BusyResourceError(self.action)
+
+ self._guarded = True
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ self._guarded = False
+ return None
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_tasks.py b/venv/lib/python3.11/site-packages/anyio/_core/_tasks.py
new file mode 100644
index 0000000..2f21ea2
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_tasks.py
@@ -0,0 +1,158 @@
+from __future__ import annotations
+
+import math
+from collections.abc import Generator
+from contextlib import contextmanager
+from types import TracebackType
+
+from ..abc._tasks import TaskGroup, TaskStatus
+from ._eventloop import get_async_backend
+
+
+class _IgnoredTaskStatus(TaskStatus[object]):
+ def started(self, value: object = None) -> None:
+ pass
+
+
+TASK_STATUS_IGNORED = _IgnoredTaskStatus()
+
+
+class CancelScope:
+ """
+ Wraps a unit of work that can be made separately cancellable.
+
+ :param deadline: The time (clock value) when this scope is cancelled automatically
+ :param shield: ``True`` to shield the cancel scope from external cancellation
+ """
+
+ def __new__(
+ cls, *, deadline: float = math.inf, shield: bool = False
+ ) -> CancelScope:
+ return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
+
+ def cancel(self) -> None:
+ """Cancel this scope immediately."""
+ raise NotImplementedError
+
+ @property
+ def deadline(self) -> float:
+ """
+ The time (clock value) when this scope is cancelled automatically.
+
+ Will be ``float('inf')`` if no timeout has been set.
+
+ """
+ raise NotImplementedError
+
+ @deadline.setter
+ def deadline(self, value: float) -> None:
+ raise NotImplementedError
+
+ @property
+ def cancel_called(self) -> bool:
+ """``True`` if :meth:`cancel` has been called."""
+ raise NotImplementedError
+
+ @property
+ def cancelled_caught(self) -> bool:
+ """
+ ``True`` if this scope suppressed a cancellation exception it itself raised.
+
+ This is typically used to check if any work was interrupted, or to see if the
+ scope was cancelled due to its deadline being reached. The value will, however,
+ only be ``True`` if the cancellation was triggered by the scope itself (and not
+ an outer scope).
+
+ """
+ raise NotImplementedError
+
+ @property
+ def shield(self) -> bool:
+ """
+ ``True`` if this scope is shielded from external cancellation.
+
+ While a scope is shielded, it will not receive cancellations from outside.
+
+ """
+ raise NotImplementedError
+
+ @shield.setter
+ def shield(self, value: bool) -> None:
+ raise NotImplementedError
+
+ def __enter__(self) -> CancelScope:
+ raise NotImplementedError
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ raise NotImplementedError
+
+
+@contextmanager
+def fail_after(
+ delay: float | None, shield: bool = False
+) -> Generator[CancelScope, None, None]:
+ """
+ Create a context manager which raises a :class:`TimeoutError` if does not finish in
+ time.
+
+ :param delay: maximum allowed time (in seconds) before raising the exception, or
+ ``None`` to disable the timeout
+ :param shield: ``True`` to shield the cancel scope from external cancellation
+ :return: a context manager that yields a cancel scope
+ :rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
+
+ """
+ current_time = get_async_backend().current_time
+ deadline = (current_time() + delay) if delay is not None else math.inf
+ with get_async_backend().create_cancel_scope(
+ deadline=deadline, shield=shield
+ ) as cancel_scope:
+ yield cancel_scope
+
+ if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
+ raise TimeoutError
+
+
+def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
+ """
+ Create a cancel scope with a deadline that expires after the given delay.
+
+ :param delay: maximum allowed time (in seconds) before exiting the context block, or
+ ``None`` to disable the timeout
+ :param shield: ``True`` to shield the cancel scope from external cancellation
+ :return: a cancel scope
+
+ """
+ deadline = (
+ (get_async_backend().current_time() + delay) if delay is not None else math.inf
+ )
+ return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
+
+
+def current_effective_deadline() -> float:
+ """
+ Return the nearest deadline among all the cancel scopes effective for the current
+ task.
+
+ :return: a clock value from the event loop's internal clock (or ``float('inf')`` if
+ there is no deadline in effect, or ``float('-inf')`` if the current scope has
+ been cancelled)
+ :rtype: float
+
+ """
+ return get_async_backend().current_effective_deadline()
+
+
+def create_task_group() -> TaskGroup:
+ """
+ Create a task group.
+
+ :return: a task group
+
+ """
+ return get_async_backend().create_task_group()
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_testing.py b/venv/lib/python3.11/site-packages/anyio/_core/_testing.py
new file mode 100644
index 0000000..1dae3b1
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_testing.py
@@ -0,0 +1,74 @@
+from __future__ import annotations
+
+from collections.abc import Awaitable, Generator
+from typing import Any
+
+from ._eventloop import get_async_backend
+
+
+class TaskInfo:
+ """
+ Represents an asynchronous task.
+
+ :ivar int id: the unique identifier of the task
+ :ivar parent_id: the identifier of the parent task, if any
+ :vartype parent_id: Optional[int]
+ :ivar str name: the description of the task (if any)
+ :ivar ~collections.abc.Coroutine coro: the coroutine object of the task
+ """
+
+ __slots__ = "_name", "id", "parent_id", "name", "coro"
+
+ def __init__(
+ self,
+ id: int,
+ parent_id: int | None,
+ name: str | None,
+ coro: Generator[Any, Any, Any] | Awaitable[Any],
+ ):
+ func = get_current_task
+ self._name = f"{func.__module__}.{func.__qualname__}"
+ self.id: int = id
+ self.parent_id: int | None = parent_id
+ self.name: str | None = name
+ self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, TaskInfo):
+ return self.id == other.id
+
+ return NotImplemented
+
+ def __hash__(self) -> int:
+ return hash(self.id)
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
+
+ def _unwrap(self) -> TaskInfo:
+ return self
+
+
+def get_current_task() -> TaskInfo:
+ """
+ Return the current task.
+
+ :return: a representation of the current task
+
+ """
+ return get_async_backend().get_current_task()
+
+
+def get_running_tasks() -> list[TaskInfo]:
+ """
+ Return a list of running tasks in the current event loop.
+
+ :return: a list of task info objects
+
+ """
+ return get_async_backend().get_running_tasks()
+
+
+async def wait_all_tasks_blocked() -> None:
+ """Wait until all other tasks are waiting for something."""
+ await get_async_backend().wait_all_tasks_blocked()
diff --git a/venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py b/venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py
new file mode 100644
index 0000000..74c6b8f
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/_core/_typedattr.py
@@ -0,0 +1,81 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Mapping
+from typing import Any, TypeVar, final, overload
+
+from ._exceptions import TypedAttributeLookupError
+
+T_Attr = TypeVar("T_Attr")
+T_Default = TypeVar("T_Default")
+undefined = object()
+
+
+def typed_attribute() -> Any:
+ """Return a unique object, used to mark typed attributes."""
+ return object()
+
+
+class TypedAttributeSet:
+ """
+ Superclass for typed attribute collections.
+
+ Checks that every public attribute of every subclass has a type annotation.
+ """
+
+ def __init_subclass__(cls) -> None:
+ annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
+ for attrname in dir(cls):
+ if not attrname.startswith("_") and attrname not in annotations:
+ raise TypeError(
+ f"Attribute {attrname!r} is missing its type annotation"
+ )
+
+ super().__init_subclass__()
+
+
+class TypedAttributeProvider:
+ """Base class for classes that wish to provide typed extra attributes."""
+
+ @property
+ def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
+ """
+ A mapping of the extra attributes to callables that return the corresponding
+ values.
+
+ If the provider wraps another provider, the attributes from that wrapper should
+ also be included in the returned mapping (but the wrapper may override the
+ callables from the wrapped instance).
+
+ """
+ return {}
+
+ @overload
+ def extra(self, attribute: T_Attr) -> T_Attr:
+ ...
+
+ @overload
+ def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default:
+ ...
+
+ @final
+ def extra(self, attribute: Any, default: object = undefined) -> object:
+ """
+ extra(attribute, default=undefined)
+
+ Return the value of the given typed extra attribute.
+
+ :param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
+ look for
+ :param default: the value that should be returned if no value is found for the
+ attribute
+ :raises ~anyio.TypedAttributeLookupError: if the search failed and no default
+ value was given
+
+ """
+ try:
+ return self.extra_attributes[attribute]()
+ except KeyError:
+ if default is undefined:
+ raise TypedAttributeLookupError("Attribute not found") from None
+ else:
+ return default
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__init__.py b/venv/lib/python3.11/site-packages/anyio/abc/__init__.py
new file mode 100644
index 0000000..1ca0fcf
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__init__.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+from typing import Any
+
+from ._eventloop import AsyncBackend as AsyncBackend
+from ._resources import AsyncResource as AsyncResource
+from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
+from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
+from ._sockets import IPAddressType as IPAddressType
+from ._sockets import IPSockAddrType as IPSockAddrType
+from ._sockets import SocketAttribute as SocketAttribute
+from ._sockets import SocketListener as SocketListener
+from ._sockets import SocketStream as SocketStream
+from ._sockets import UDPPacketType as UDPPacketType
+from ._sockets import UDPSocket as UDPSocket
+from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
+from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
+from ._sockets import UNIXSocketStream as UNIXSocketStream
+from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
+from ._streams import AnyByteSendStream as AnyByteSendStream
+from ._streams import AnyByteStream as AnyByteStream
+from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
+from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
+from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
+from ._streams import ByteReceiveStream as ByteReceiveStream
+from ._streams import ByteSendStream as ByteSendStream
+from ._streams import ByteStream as ByteStream
+from ._streams import Listener as Listener
+from ._streams import ObjectReceiveStream as ObjectReceiveStream
+from ._streams import ObjectSendStream as ObjectSendStream
+from ._streams import ObjectStream as ObjectStream
+from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
+from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
+from ._streams import UnreliableObjectStream as UnreliableObjectStream
+from ._subprocesses import Process as Process
+from ._tasks import TaskGroup as TaskGroup
+from ._tasks import TaskStatus as TaskStatus
+from ._testing import TestRunner as TestRunner
+
+# Re-exported here, for backwards compatibility
+# isort: off
+from .._core._synchronization import (
+ CapacityLimiter as CapacityLimiter,
+ Condition as Condition,
+ Event as Event,
+ Lock as Lock,
+ Semaphore as Semaphore,
+)
+from .._core._tasks import CancelScope as CancelScope
+from ..from_thread import BlockingPortal as BlockingPortal
+
+# Re-export imports so they look like they live directly in this package
+key: str
+value: Any
+for key, value in list(locals().items()):
+ if getattr(value, "__module__", "").startswith("anyio.abc."):
+ value.__module__ = __name__
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000..6a8f56a
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/__init__.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_eventloop.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_eventloop.cpython-311.pyc
new file mode 100644
index 0000000..8b965b3
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_eventloop.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_resources.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_resources.cpython-311.pyc
new file mode 100644
index 0000000..36d836a
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_resources.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_sockets.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_sockets.cpython-311.pyc
new file mode 100644
index 0000000..2df9fcd
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_sockets.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_streams.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_streams.cpython-311.pyc
new file mode 100644
index 0000000..62e4f72
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_streams.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-311.pyc
new file mode 100644
index 0000000..9514d3d
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_subprocesses.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_tasks.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_tasks.cpython-311.pyc
new file mode 100644
index 0000000..3404806
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_tasks.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_testing.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_testing.cpython-311.pyc
new file mode 100644
index 0000000..73953de
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/__pycache__/_testing.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_eventloop.py b/venv/lib/python3.11/site-packages/anyio/abc/_eventloop.py
new file mode 100644
index 0000000..4470d83
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_eventloop.py
@@ -0,0 +1,392 @@
+from __future__ import annotations
+
+import math
+import sys
+from abc import ABCMeta, abstractmethod
+from collections.abc import AsyncIterator, Awaitable, Mapping
+from os import PathLike
+from signal import Signals
+from socket import AddressFamily, SocketKind, socket
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ ContextManager,
+ Sequence,
+ TypeVar,
+ overload,
+)
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+if TYPE_CHECKING:
+ from typing import Literal
+
+ from .._core._synchronization import CapacityLimiter, Event
+ from .._core._tasks import CancelScope
+ from .._core._testing import TaskInfo
+ from ..from_thread import BlockingPortal
+ from ._sockets import (
+ ConnectedUDPSocket,
+ ConnectedUNIXDatagramSocket,
+ IPSockAddrType,
+ SocketListener,
+ SocketStream,
+ UDPSocket,
+ UNIXDatagramSocket,
+ UNIXSocketStream,
+ )
+ from ._subprocesses import Process
+ from ._tasks import TaskGroup
+ from ._testing import TestRunner
+
+T_Retval = TypeVar("T_Retval")
+PosArgsT = TypeVarTuple("PosArgsT")
+
+
+class AsyncBackend(metaclass=ABCMeta):
+ @classmethod
+ @abstractmethod
+ def run(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ options: dict[str, Any],
+ ) -> T_Retval:
+ """
+ Run the given coroutine function in an asynchronous event loop.
+
+ The current thread must not be already running an event loop.
+
+ :param func: a coroutine function
+ :param args: positional arguments to ``func``
+ :param kwargs: positional arguments to ``func``
+ :param options: keyword arguments to call the backend ``run()`` implementation
+ with
+ :return: the return value of the coroutine function
+ """
+
+ @classmethod
+ @abstractmethod
+ def current_token(cls) -> object:
+ """
+
+ :return:
+ """
+
+ @classmethod
+ @abstractmethod
+ def current_time(cls) -> float:
+ """
+ Return the current value of the event loop's internal clock.
+
+ :return: the clock value (seconds)
+ """
+
+ @classmethod
+ @abstractmethod
+ def cancelled_exception_class(cls) -> type[BaseException]:
+ """Return the exception class that is raised in a task if it's cancelled."""
+
+ @classmethod
+ @abstractmethod
+ async def checkpoint(cls) -> None:
+ """
+ Check if the task has been cancelled, and allow rescheduling of other tasks.
+
+ This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
+ :meth:`cancel_shielded_checkpoint`.
+ """
+
+ @classmethod
+ async def checkpoint_if_cancelled(cls) -> None:
+ """
+ Check if the current task group has been cancelled.
+
+ This will check if the task has been cancelled, but will not allow other tasks
+ to be scheduled if not.
+
+ """
+ if cls.current_effective_deadline() == -math.inf:
+ await cls.checkpoint()
+
+ @classmethod
+ async def cancel_shielded_checkpoint(cls) -> None:
+ """
+ Allow the rescheduling of other tasks.
+
+ This will give other tasks the opportunity to run, but without checking if the
+ current task group has been cancelled, unlike with :meth:`checkpoint`.
+
+ """
+ with cls.create_cancel_scope(shield=True):
+ await cls.sleep(0)
+
+ @classmethod
+ @abstractmethod
+ async def sleep(cls, delay: float) -> None:
+ """
+ Pause the current task for the specified duration.
+
+ :param delay: the duration, in seconds
+ """
+
+ @classmethod
+ @abstractmethod
+ def create_cancel_scope(
+ cls, *, deadline: float = math.inf, shield: bool = False
+ ) -> CancelScope:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def current_effective_deadline(cls) -> float:
+ """
+ Return the nearest deadline among all the cancel scopes effective for the
+ current task.
+
+ :return:
+ - a clock value from the event loop's internal clock
+ - ``inf`` if there is no deadline in effect
+ - ``-inf`` if the current scope has been cancelled
+ :rtype: float
+ """
+
+ @classmethod
+ @abstractmethod
+ def create_task_group(cls) -> TaskGroup:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def create_event(cls) -> Event:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def run_sync_in_worker_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ abandon_on_cancel: bool = False,
+ limiter: CapacityLimiter | None = None,
+ ) -> T_Retval:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def check_cancelled(cls) -> None:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def run_async_from_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ args: tuple[Unpack[PosArgsT]],
+ token: object,
+ ) -> T_Retval:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def run_sync_from_thread(
+ cls,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ token: object,
+ ) -> T_Retval:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def create_blocking_portal(cls) -> BlockingPortal:
+ pass
+
+ @classmethod
+ @overload
+ async def open_process(
+ cls,
+ command: str | bytes,
+ *,
+ shell: Literal[True],
+ stdin: int | IO[Any] | None,
+ stdout: int | IO[Any] | None,
+ stderr: int | IO[Any] | None,
+ cwd: str | bytes | PathLike[str] | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+ ) -> Process:
+ pass
+
+ @classmethod
+ @overload
+ async def open_process(
+ cls,
+ command: Sequence[str | bytes],
+ *,
+ shell: Literal[False],
+ stdin: int | IO[Any] | None,
+ stdout: int | IO[Any] | None,
+ stderr: int | IO[Any] | None,
+ cwd: str | bytes | PathLike[str] | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+ ) -> Process:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def open_process(
+ cls,
+ command: str | bytes | Sequence[str | bytes],
+ *,
+ shell: bool,
+ stdin: int | IO[Any] | None,
+ stdout: int | IO[Any] | None,
+ stderr: int | IO[Any] | None,
+ cwd: str | bytes | PathLike[str] | None = None,
+ env: Mapping[str, str] | None = None,
+ start_new_session: bool = False,
+ ) -> Process:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def connect_tcp(
+ cls, host: str, port: int, local_address: IPSockAddrType | None = None
+ ) -> SocketStream:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def create_tcp_listener(cls, sock: socket) -> SocketListener:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def create_unix_listener(cls, sock: socket) -> SocketListener:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def create_udp_socket(
+ cls,
+ family: AddressFamily,
+ local_address: IPSockAddrType | None,
+ remote_address: IPSockAddrType | None,
+ reuse_port: bool,
+ ) -> UDPSocket | ConnectedUDPSocket:
+ pass
+
+ @classmethod
+ @overload
+ async def create_unix_datagram_socket(
+ cls, raw_socket: socket, remote_path: None
+ ) -> UNIXDatagramSocket:
+ ...
+
+ @classmethod
+ @overload
+ async def create_unix_datagram_socket(
+ cls, raw_socket: socket, remote_path: str | bytes
+ ) -> ConnectedUNIXDatagramSocket:
+ ...
+
+ @classmethod
+ @abstractmethod
+ async def create_unix_datagram_socket(
+ cls, raw_socket: socket, remote_path: str | bytes | None
+ ) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def getaddrinfo(
+ cls,
+ host: bytes | str | None,
+ port: str | int | None,
+ *,
+ family: int | AddressFamily = 0,
+ type: int | SocketKind = 0,
+ proto: int = 0,
+ flags: int = 0,
+ ) -> list[
+ tuple[
+ AddressFamily,
+ SocketKind,
+ int,
+ str,
+ tuple[str, int] | tuple[str, int, int, int],
+ ]
+ ]:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def getnameinfo(
+ cls, sockaddr: IPSockAddrType, flags: int = 0
+ ) -> tuple[str, str]:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def wait_socket_readable(cls, sock: socket) -> None:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def wait_socket_writable(cls, sock: socket) -> None:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def current_default_thread_limiter(cls) -> CapacityLimiter:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def open_signal_receiver(
+ cls, *signals: Signals
+ ) -> ContextManager[AsyncIterator[Signals]]:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def get_current_task(cls) -> TaskInfo:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def get_running_tasks(cls) -> list[TaskInfo]:
+ pass
+
+ @classmethod
+ @abstractmethod
+ async def wait_all_tasks_blocked(cls) -> None:
+ pass
+
+ @classmethod
+ @abstractmethod
+ def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
+ pass
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_resources.py b/venv/lib/python3.11/site-packages/anyio/abc/_resources.py
new file mode 100644
index 0000000..9693835
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_resources.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from abc import ABCMeta, abstractmethod
+from types import TracebackType
+from typing import TypeVar
+
+T = TypeVar("T")
+
+
+class AsyncResource(metaclass=ABCMeta):
+ """
+ Abstract base class for all closeable asynchronous resources.
+
+ Works as an asynchronous context manager which returns the instance itself on enter,
+ and calls :meth:`aclose` on exit.
+ """
+
+ async def __aenter__(self: T) -> T:
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ await self.aclose()
+
+ @abstractmethod
+ async def aclose(self) -> None:
+ """Close the resource."""
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_sockets.py b/venv/lib/python3.11/site-packages/anyio/abc/_sockets.py
new file mode 100644
index 0000000..b321225
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_sockets.py
@@ -0,0 +1,194 @@
+from __future__ import annotations
+
+import socket
+from abc import abstractmethod
+from collections.abc import Callable, Collection, Mapping
+from contextlib import AsyncExitStack
+from io import IOBase
+from ipaddress import IPv4Address, IPv6Address
+from socket import AddressFamily
+from types import TracebackType
+from typing import Any, Tuple, TypeVar, Union
+
+from .._core._typedattr import (
+ TypedAttributeProvider,
+ TypedAttributeSet,
+ typed_attribute,
+)
+from ._streams import ByteStream, Listener, UnreliableObjectStream
+from ._tasks import TaskGroup
+
+IPAddressType = Union[str, IPv4Address, IPv6Address]
+IPSockAddrType = Tuple[str, int]
+SockAddrType = Union[IPSockAddrType, str]
+UDPPacketType = Tuple[bytes, IPSockAddrType]
+UNIXDatagramPacketType = Tuple[bytes, str]
+T_Retval = TypeVar("T_Retval")
+
+
+class _NullAsyncContextManager:
+ async def __aenter__(self) -> None:
+ pass
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ return None
+
+
+class SocketAttribute(TypedAttributeSet):
+ #: the address family of the underlying socket
+ family: AddressFamily = typed_attribute()
+ #: the local socket address of the underlying socket
+ local_address: SockAddrType = typed_attribute()
+ #: for IP addresses, the local port the underlying socket is bound to
+ local_port: int = typed_attribute()
+ #: the underlying stdlib socket object
+ raw_socket: socket.socket = typed_attribute()
+ #: the remote address the underlying socket is connected to
+ remote_address: SockAddrType = typed_attribute()
+ #: for IP addresses, the remote port the underlying socket is connected to
+ remote_port: int = typed_attribute()
+
+
+class _SocketProvider(TypedAttributeProvider):
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ from .._core._sockets import convert_ipv6_sockaddr as convert
+
+ attributes: dict[Any, Callable[[], Any]] = {
+ SocketAttribute.family: lambda: self._raw_socket.family,
+ SocketAttribute.local_address: lambda: convert(
+ self._raw_socket.getsockname()
+ ),
+ SocketAttribute.raw_socket: lambda: self._raw_socket,
+ }
+ try:
+ peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
+ except OSError:
+ peername = None
+
+ # Provide the remote address for connected sockets
+ if peername is not None:
+ attributes[SocketAttribute.remote_address] = lambda: peername
+
+ # Provide local and remote ports for IP based sockets
+ if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
+ attributes[SocketAttribute.local_port] = (
+ lambda: self._raw_socket.getsockname()[1]
+ )
+ if peername is not None:
+ remote_port = peername[1]
+ attributes[SocketAttribute.remote_port] = lambda: remote_port
+
+ return attributes
+
+ @property
+ @abstractmethod
+ def _raw_socket(self) -> socket.socket:
+ pass
+
+
+class SocketStream(ByteStream, _SocketProvider):
+ """
+ Transports bytes over a socket.
+
+ Supports all relevant extra attributes from :class:`~SocketAttribute`.
+ """
+
+
+class UNIXSocketStream(SocketStream):
+ @abstractmethod
+ async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
+ """
+ Send file descriptors along with a message to the peer.
+
+ :param message: a non-empty bytestring
+ :param fds: a collection of files (either numeric file descriptors or open file
+ or socket objects)
+ """
+
+ @abstractmethod
+ async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
+ """
+ Receive file descriptors along with a message from the peer.
+
+ :param msglen: length of the message to expect from the peer
+ :param maxfds: maximum number of file descriptors to expect from the peer
+ :return: a tuple of (message, file descriptors)
+ """
+
+
+class SocketListener(Listener[SocketStream], _SocketProvider):
+ """
+ Listens to incoming socket connections.
+
+ Supports all relevant extra attributes from :class:`~SocketAttribute`.
+ """
+
+ @abstractmethod
+ async def accept(self) -> SocketStream:
+ """Accept an incoming connection."""
+
+ async def serve(
+ self,
+ handler: Callable[[SocketStream], Any],
+ task_group: TaskGroup | None = None,
+ ) -> None:
+ from .. import create_task_group
+
+ async with AsyncExitStack() as stack:
+ if task_group is None:
+ task_group = await stack.enter_async_context(create_task_group())
+
+ while True:
+ stream = await self.accept()
+ task_group.start_soon(handler, stream)
+
+
+class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
+ """
+ Represents an unconnected UDP socket.
+
+ Supports all relevant extra attributes from :class:`~SocketAttribute`.
+ """
+
+ async def sendto(self, data: bytes, host: str, port: int) -> None:
+ """
+ Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
+
+ """
+ return await self.send((data, (host, port)))
+
+
+class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
+ """
+ Represents an connected UDP socket.
+
+ Supports all relevant extra attributes from :class:`~SocketAttribute`.
+ """
+
+
+class UNIXDatagramSocket(
+ UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
+):
+ """
+ Represents an unconnected Unix datagram socket.
+
+ Supports all relevant extra attributes from :class:`~SocketAttribute`.
+ """
+
+ async def sendto(self, data: bytes, path: str) -> None:
+ """Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
+ return await self.send((data, path))
+
+
+class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
+ """
+ Represents a connected Unix datagram socket.
+
+ Supports all relevant extra attributes from :class:`~SocketAttribute`.
+ """
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_streams.py b/venv/lib/python3.11/site-packages/anyio/abc/_streams.py
new file mode 100644
index 0000000..8c63868
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_streams.py
@@ -0,0 +1,203 @@
+from __future__ import annotations
+
+from abc import abstractmethod
+from collections.abc import Callable
+from typing import Any, Generic, TypeVar, Union
+
+from .._core._exceptions import EndOfStream
+from .._core._typedattr import TypedAttributeProvider
+from ._resources import AsyncResource
+from ._tasks import TaskGroup
+
+T_Item = TypeVar("T_Item")
+T_co = TypeVar("T_co", covariant=True)
+T_contra = TypeVar("T_contra", contravariant=True)
+
+
+class UnreliableObjectReceiveStream(
+ Generic[T_co], AsyncResource, TypedAttributeProvider
+):
+ """
+ An interface for receiving objects.
+
+ This interface makes no guarantees that the received messages arrive in the order in
+ which they were sent, or that no messages are missed.
+
+ Asynchronously iterating over objects of this type will yield objects matching the
+ given type parameter.
+ """
+
+ def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
+ return self
+
+ async def __anext__(self) -> T_co:
+ try:
+ return await self.receive()
+ except EndOfStream:
+ raise StopAsyncIteration
+
+ @abstractmethod
+ async def receive(self) -> T_co:
+ """
+ Receive the next item.
+
+ :raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
+ closed
+ :raises ~anyio.EndOfStream: if this stream has been closed from the other end
+ :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
+ due to external causes
+ """
+
+
+class UnreliableObjectSendStream(
+ Generic[T_contra], AsyncResource, TypedAttributeProvider
+):
+ """
+ An interface for sending objects.
+
+ This interface makes no guarantees that the messages sent will reach the
+ recipient(s) in the same order in which they were sent, or at all.
+ """
+
+ @abstractmethod
+ async def send(self, item: T_contra) -> None:
+ """
+ Send an item to the peer(s).
+
+ :param item: the item to send
+ :raises ~anyio.ClosedResourceError: if the send stream has been explicitly
+ closed
+ :raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
+ due to external causes
+ """
+
+
+class UnreliableObjectStream(
+ UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
+):
+ """
+ A bidirectional message stream which does not guarantee the order or reliability of
+ message delivery.
+ """
+
+
+class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
+ """
+ A receive message stream which guarantees that messages are received in the same
+ order in which they were sent, and that no messages are missed.
+ """
+
+
+class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
+ """
+ A send message stream which guarantees that messages are delivered in the same order
+ in which they were sent, without missing any messages in the middle.
+ """
+
+
+class ObjectStream(
+ ObjectReceiveStream[T_Item],
+ ObjectSendStream[T_Item],
+ UnreliableObjectStream[T_Item],
+):
+ """
+ A bidirectional message stream which guarantees the order and reliability of message
+ delivery.
+ """
+
+ @abstractmethod
+ async def send_eof(self) -> None:
+ """
+ Send an end-of-file indication to the peer.
+
+ You should not try to send any further data to this stream after calling this
+ method. This method is idempotent (does nothing on successive calls).
+ """
+
+
+class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
+ """
+ An interface for receiving bytes from a single peer.
+
+ Iterating this byte stream will yield a byte string of arbitrary length, but no more
+ than 65536 bytes.
+ """
+
+ def __aiter__(self) -> ByteReceiveStream:
+ return self
+
+ async def __anext__(self) -> bytes:
+ try:
+ return await self.receive()
+ except EndOfStream:
+ raise StopAsyncIteration
+
+ @abstractmethod
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ """
+ Receive at most ``max_bytes`` bytes from the peer.
+
+ .. note:: Implementors of this interface should not return an empty
+ :class:`bytes` object, and users should ignore them.
+
+ :param max_bytes: maximum number of bytes to receive
+ :return: the received bytes
+ :raises ~anyio.EndOfStream: if this stream has been closed from the other end
+ """
+
+
+class ByteSendStream(AsyncResource, TypedAttributeProvider):
+ """An interface for sending bytes to a single peer."""
+
+ @abstractmethod
+ async def send(self, item: bytes) -> None:
+ """
+ Send the given bytes to the peer.
+
+ :param item: the bytes to send
+ """
+
+
+class ByteStream(ByteReceiveStream, ByteSendStream):
+ """A bidirectional byte stream."""
+
+ @abstractmethod
+ async def send_eof(self) -> None:
+ """
+ Send an end-of-file indication to the peer.
+
+ You should not try to send any further data to this stream after calling this
+ method. This method is idempotent (does nothing on successive calls).
+ """
+
+
+#: Type alias for all unreliable bytes-oriented receive streams.
+AnyUnreliableByteReceiveStream = Union[
+ UnreliableObjectReceiveStream[bytes], ByteReceiveStream
+]
+#: Type alias for all unreliable bytes-oriented send streams.
+AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]
+#: Type alias for all unreliable bytes-oriented streams.
+AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]
+#: Type alias for all bytes-oriented receive streams.
+AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
+#: Type alias for all bytes-oriented send streams.
+AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]
+#: Type alias for all bytes-oriented streams.
+AnyByteStream = Union[ObjectStream[bytes], ByteStream]
+
+
+class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
+ """An interface for objects that let you accept incoming connections."""
+
+ @abstractmethod
+ async def serve(
+ self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
+ ) -> None:
+ """
+ Accept incoming connections as they come in and start tasks to handle them.
+
+ :param handler: a callable that will be used to handle each accepted connection
+ :param task_group: the task group that will be used to start tasks for handling
+ each accepted connection (if omitted, an ad-hoc task group will be created)
+ """
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py b/venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py
new file mode 100644
index 0000000..ce0564c
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_subprocesses.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+from abc import abstractmethod
+from signal import Signals
+
+from ._resources import AsyncResource
+from ._streams import ByteReceiveStream, ByteSendStream
+
+
+class Process(AsyncResource):
+ """An asynchronous version of :class:`subprocess.Popen`."""
+
+ @abstractmethod
+ async def wait(self) -> int:
+ """
+ Wait until the process exits.
+
+ :return: the exit code of the process
+ """
+
+ @abstractmethod
+ def terminate(self) -> None:
+ """
+ Terminates the process, gracefully if possible.
+
+ On Windows, this calls ``TerminateProcess()``.
+ On POSIX systems, this sends ``SIGTERM`` to the process.
+
+ .. seealso:: :meth:`subprocess.Popen.terminate`
+ """
+
+ @abstractmethod
+ def kill(self) -> None:
+ """
+ Kills the process.
+
+ On Windows, this calls ``TerminateProcess()``.
+ On POSIX systems, this sends ``SIGKILL`` to the process.
+
+ .. seealso:: :meth:`subprocess.Popen.kill`
+ """
+
+ @abstractmethod
+ def send_signal(self, signal: Signals) -> None:
+ """
+ Send a signal to the subprocess.
+
+ .. seealso:: :meth:`subprocess.Popen.send_signal`
+
+ :param signal: the signal number (e.g. :data:`signal.SIGHUP`)
+ """
+
+ @property
+ @abstractmethod
+ def pid(self) -> int:
+ """The process ID of the process."""
+
+ @property
+ @abstractmethod
+ def returncode(self) -> int | None:
+ """
+ The return code of the process. If the process has not yet terminated, this will
+ be ``None``.
+ """
+
+ @property
+ @abstractmethod
+ def stdin(self) -> ByteSendStream | None:
+ """The stream for the standard input of the process."""
+
+ @property
+ @abstractmethod
+ def stdout(self) -> ByteReceiveStream | None:
+ """The stream for the standard output of the process."""
+
+ @property
+ @abstractmethod
+ def stderr(self) -> ByteReceiveStream | None:
+ """The stream for the standard error output of the process."""
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_tasks.py b/venv/lib/python3.11/site-packages/anyio/abc/_tasks.py
new file mode 100644
index 0000000..7ad4938
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_tasks.py
@@ -0,0 +1,97 @@
+from __future__ import annotations
+
+import sys
+from abc import ABCMeta, abstractmethod
+from collections.abc import Awaitable, Callable
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+if TYPE_CHECKING:
+ from .._core._tasks import CancelScope
+
+T_Retval = TypeVar("T_Retval")
+T_contra = TypeVar("T_contra", contravariant=True)
+PosArgsT = TypeVarTuple("PosArgsT")
+
+
+class TaskStatus(Protocol[T_contra]):
+ @overload
+ def started(self: TaskStatus[None]) -> None:
+ ...
+
+ @overload
+ def started(self, value: T_contra) -> None:
+ ...
+
+ def started(self, value: T_contra | None = None) -> None:
+ """
+ Signal that the task has started.
+
+ :param value: object passed back to the starter of the task
+ """
+
+
+class TaskGroup(metaclass=ABCMeta):
+ """
+ Groups several asynchronous tasks together.
+
+ :ivar cancel_scope: the cancel scope inherited by all child tasks
+ :vartype cancel_scope: CancelScope
+ """
+
+ cancel_scope: CancelScope
+
+ @abstractmethod
+ def start_soon(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
+ *args: Unpack[PosArgsT],
+ name: object = None,
+ ) -> None:
+ """
+ Start a new task in this task group.
+
+ :param func: a coroutine function
+ :param args: positional arguments to call the function with
+ :param name: name of the task, for the purposes of introspection and debugging
+
+ .. versionadded:: 3.0
+ """
+
+ @abstractmethod
+ async def start(
+ self,
+ func: Callable[..., Awaitable[Any]],
+ *args: object,
+ name: object = None,
+ ) -> Any:
+ """
+ Start a new task and wait until it signals for readiness.
+
+ :param func: a coroutine function
+ :param args: positional arguments to call the function with
+ :param name: name of the task, for the purposes of introspection and debugging
+ :return: the value passed to ``task_status.started()``
+ :raises RuntimeError: if the task finishes without calling
+ ``task_status.started()``
+
+ .. versionadded:: 3.0
+ """
+
+ @abstractmethod
+ async def __aenter__(self) -> TaskGroup:
+ """Enter the task group context and allow starting new tasks."""
+
+ @abstractmethod
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ """Exit the task group context waiting for all tasks to finish."""
diff --git a/venv/lib/python3.11/site-packages/anyio/abc/_testing.py b/venv/lib/python3.11/site-packages/anyio/abc/_testing.py
new file mode 100644
index 0000000..4d70b9e
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/abc/_testing.py
@@ -0,0 +1,66 @@
+from __future__ import annotations
+
+import types
+from abc import ABCMeta, abstractmethod
+from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
+from typing import Any, TypeVar
+
+_T = TypeVar("_T")
+
+
+class TestRunner(metaclass=ABCMeta):
+ """
+ Encapsulates a running event loop. Every call made through this object will use the
+ same event loop.
+ """
+
+ def __enter__(self) -> TestRunner:
+ return self
+
+ @abstractmethod
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: types.TracebackType | None,
+ ) -> bool | None:
+ ...
+
+ @abstractmethod
+ def run_asyncgen_fixture(
+ self,
+ fixture_func: Callable[..., AsyncGenerator[_T, Any]],
+ kwargs: dict[str, Any],
+ ) -> Iterable[_T]:
+ """
+ Run an async generator fixture.
+
+ :param fixture_func: the fixture function
+ :param kwargs: keyword arguments to call the fixture function with
+ :return: an iterator yielding the value yielded from the async generator
+ """
+
+ @abstractmethod
+ def run_fixture(
+ self,
+ fixture_func: Callable[..., Coroutine[Any, Any, _T]],
+ kwargs: dict[str, Any],
+ ) -> _T:
+ """
+ Run an async fixture.
+
+ :param fixture_func: the fixture function
+ :param kwargs: keyword arguments to call the fixture function with
+ :return: the return value of the fixture function
+ """
+
+ @abstractmethod
+ def run_test(
+ self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
+ ) -> None:
+ """
+ Run an async test function.
+
+ :param test_func: the test function
+ :param kwargs: keyword arguments to call the test function with
+ """
diff --git a/venv/lib/python3.11/site-packages/anyio/from_thread.py b/venv/lib/python3.11/site-packages/anyio/from_thread.py
new file mode 100644
index 0000000..4a98703
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/from_thread.py
@@ -0,0 +1,476 @@
+from __future__ import annotations
+
+import sys
+import threading
+from collections.abc import Awaitable, Callable, Generator
+from concurrent.futures import FIRST_COMPLETED, Future, ThreadPoolExecutor, wait
+from contextlib import AbstractContextManager, contextmanager
+from inspect import isawaitable
+from types import TracebackType
+from typing import (
+ Any,
+ AsyncContextManager,
+ ContextManager,
+ Generic,
+ Iterable,
+ TypeVar,
+ cast,
+ overload,
+)
+
+from ._core import _eventloop
+from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
+from ._core._synchronization import Event
+from ._core._tasks import CancelScope, create_task_group
+from .abc import AsyncBackend
+from .abc._tasks import TaskStatus
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+T_Retval = TypeVar("T_Retval")
+T_co = TypeVar("T_co", covariant=True)
+PosArgsT = TypeVarTuple("PosArgsT")
+
+
+def run(
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
+) -> T_Retval:
+ """
+ Call a coroutine function from a worker thread.
+
+ :param func: a coroutine function
+ :param args: positional arguments for the callable
+ :return: the return value of the coroutine function
+
+ """
+ try:
+ async_backend = threadlocals.current_async_backend
+ token = threadlocals.current_token
+ except AttributeError:
+ raise RuntimeError(
+ "This function can only be run from an AnyIO worker thread"
+ ) from None
+
+ return async_backend.run_async_from_thread(func, args, token=token)
+
+
+def run_sync(
+ func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
+) -> T_Retval:
+ """
+ Call a function in the event loop thread from a worker thread.
+
+ :param func: a callable
+ :param args: positional arguments for the callable
+ :return: the return value of the callable
+
+ """
+ try:
+ async_backend = threadlocals.current_async_backend
+ token = threadlocals.current_token
+ except AttributeError:
+ raise RuntimeError(
+ "This function can only be run from an AnyIO worker thread"
+ ) from None
+
+ return async_backend.run_sync_from_thread(func, args, token=token)
+
+
+class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
+ _enter_future: Future[T_co]
+ _exit_future: Future[bool | None]
+ _exit_event: Event
+ _exit_exc_info: tuple[
+ type[BaseException] | None, BaseException | None, TracebackType | None
+ ] = (None, None, None)
+
+ def __init__(self, async_cm: AsyncContextManager[T_co], portal: BlockingPortal):
+ self._async_cm = async_cm
+ self._portal = portal
+
+ async def run_async_cm(self) -> bool | None:
+ try:
+ self._exit_event = Event()
+ value = await self._async_cm.__aenter__()
+ except BaseException as exc:
+ self._enter_future.set_exception(exc)
+ raise
+ else:
+ self._enter_future.set_result(value)
+
+ try:
+ # Wait for the sync context manager to exit.
+ # This next statement can raise `get_cancelled_exc_class()` if
+ # something went wrong in a task group in this async context
+ # manager.
+ await self._exit_event.wait()
+ finally:
+ # In case of cancellation, it could be that we end up here before
+ # `_BlockingAsyncContextManager.__exit__` is called, and an
+ # `_exit_exc_info` has been set.
+ result = await self._async_cm.__aexit__(*self._exit_exc_info)
+ return result
+
+ def __enter__(self) -> T_co:
+ self._enter_future = Future()
+ self._exit_future = self._portal.start_task_soon(self.run_async_cm)
+ return self._enter_future.result()
+
+ def __exit__(
+ self,
+ __exc_type: type[BaseException] | None,
+ __exc_value: BaseException | None,
+ __traceback: TracebackType | None,
+ ) -> bool | None:
+ self._exit_exc_info = __exc_type, __exc_value, __traceback
+ self._portal.call(self._exit_event.set)
+ return self._exit_future.result()
+
+
+class _BlockingPortalTaskStatus(TaskStatus):
+ def __init__(self, future: Future):
+ self._future = future
+
+ def started(self, value: object = None) -> None:
+ self._future.set_result(value)
+
+
+class BlockingPortal:
+ """An object that lets external threads run code in an asynchronous event loop."""
+
+ def __new__(cls) -> BlockingPortal:
+ return get_async_backend().create_blocking_portal()
+
+ def __init__(self) -> None:
+ self._event_loop_thread_id: int | None = threading.get_ident()
+ self._stop_event = Event()
+ self._task_group = create_task_group()
+ self._cancelled_exc_class = get_cancelled_exc_class()
+
+ async def __aenter__(self) -> BlockingPortal:
+ await self._task_group.__aenter__()
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> bool | None:
+ await self.stop()
+ return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
+
+ def _check_running(self) -> None:
+ if self._event_loop_thread_id is None:
+ raise RuntimeError("This portal is not running")
+ if self._event_loop_thread_id == threading.get_ident():
+ raise RuntimeError(
+ "This method cannot be called from the event loop thread"
+ )
+
+ async def sleep_until_stopped(self) -> None:
+ """Sleep until :meth:`stop` is called."""
+ await self._stop_event.wait()
+
+ async def stop(self, cancel_remaining: bool = False) -> None:
+ """
+ Signal the portal to shut down.
+
+ This marks the portal as no longer accepting new calls and exits from
+ :meth:`sleep_until_stopped`.
+
+ :param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
+ to let them finish before returning
+
+ """
+ self._event_loop_thread_id = None
+ self._stop_event.set()
+ if cancel_remaining:
+ self._task_group.cancel_scope.cancel()
+
+ async def _call_func(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ future: Future[T_Retval],
+ ) -> None:
+ def callback(f: Future[T_Retval]) -> None:
+ if f.cancelled() and self._event_loop_thread_id not in (
+ None,
+ threading.get_ident(),
+ ):
+ self.call(scope.cancel)
+
+ try:
+ retval_or_awaitable = func(*args, **kwargs)
+ if isawaitable(retval_or_awaitable):
+ with CancelScope() as scope:
+ if future.cancelled():
+ scope.cancel()
+ else:
+ future.add_done_callback(callback)
+
+ retval = await retval_or_awaitable
+ else:
+ retval = retval_or_awaitable
+ except self._cancelled_exc_class:
+ future.cancel()
+ future.set_running_or_notify_cancel()
+ except BaseException as exc:
+ if not future.cancelled():
+ future.set_exception(exc)
+
+ # Let base exceptions fall through
+ if not isinstance(exc, Exception):
+ raise
+ else:
+ if not future.cancelled():
+ future.set_result(retval)
+ finally:
+ scope = None # type: ignore[assignment]
+
+ def _spawn_task_from_thread(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
+ args: tuple[Unpack[PosArgsT]],
+ kwargs: dict[str, Any],
+ name: object,
+ future: Future[T_Retval],
+ ) -> None:
+ """
+ Spawn a new task using the given callable.
+
+ Implementors must ensure that the future is resolved when the task finishes.
+
+ :param func: a callable
+ :param args: positional arguments to be passed to the callable
+ :param kwargs: keyword arguments to be passed to the callable
+ :param name: name of the task (will be coerced to a string if not ``None``)
+ :param future: a future that will resolve to the return value of the callable,
+ or the exception raised during its execution
+
+ """
+ raise NotImplementedError
+
+ @overload
+ def call(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ *args: Unpack[PosArgsT],
+ ) -> T_Retval:
+ ...
+
+ @overload
+ def call(
+ self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
+ ) -> T_Retval:
+ ...
+
+ def call(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
+ *args: Unpack[PosArgsT],
+ ) -> T_Retval:
+ """
+ Call the given function in the event loop thread.
+
+ If the callable returns a coroutine object, it is awaited on.
+
+ :param func: any callable
+ :raises RuntimeError: if the portal is not running or if this method is called
+ from within the event loop thread
+
+ """
+ return cast(T_Retval, self.start_task_soon(func, *args).result())
+
+ @overload
+ def start_task_soon(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
+ *args: Unpack[PosArgsT],
+ name: object = None,
+ ) -> Future[T_Retval]:
+ ...
+
+ @overload
+ def start_task_soon(
+ self,
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ *args: Unpack[PosArgsT],
+ name: object = None,
+ ) -> Future[T_Retval]:
+ ...
+
+ def start_task_soon(
+ self,
+ func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
+ *args: Unpack[PosArgsT],
+ name: object = None,
+ ) -> Future[T_Retval]:
+ """
+ Start a task in the portal's task group.
+
+ The task will be run inside a cancel scope which can be cancelled by cancelling
+ the returned future.
+
+ :param func: the target function
+ :param args: positional arguments passed to ``func``
+ :param name: name of the task (will be coerced to a string if not ``None``)
+ :return: a future that resolves with the return value of the callable if the
+ task completes successfully, or with the exception raised in the task
+ :raises RuntimeError: if the portal is not running or if this method is called
+ from within the event loop thread
+ :rtype: concurrent.futures.Future[T_Retval]
+
+ .. versionadded:: 3.0
+
+ """
+ self._check_running()
+ f: Future[T_Retval] = Future()
+ self._spawn_task_from_thread(func, args, {}, name, f)
+ return f
+
+ def start_task(
+ self,
+ func: Callable[..., Awaitable[T_Retval]],
+ *args: object,
+ name: object = None,
+ ) -> tuple[Future[T_Retval], Any]:
+ """
+ Start a task in the portal's task group and wait until it signals for readiness.
+
+ This method works the same way as :meth:`.abc.TaskGroup.start`.
+
+ :param func: the target function
+ :param args: positional arguments passed to ``func``
+ :param name: name of the task (will be coerced to a string if not ``None``)
+ :return: a tuple of (future, task_status_value) where the ``task_status_value``
+ is the value passed to ``task_status.started()`` from within the target
+ function
+ :rtype: tuple[concurrent.futures.Future[T_Retval], Any]
+
+ .. versionadded:: 3.0
+
+ """
+
+ def task_done(future: Future[T_Retval]) -> None:
+ if not task_status_future.done():
+ if future.cancelled():
+ task_status_future.cancel()
+ elif future.exception():
+ task_status_future.set_exception(future.exception())
+ else:
+ exc = RuntimeError(
+ "Task exited without calling task_status.started()"
+ )
+ task_status_future.set_exception(exc)
+
+ self._check_running()
+ task_status_future: Future = Future()
+ task_status = _BlockingPortalTaskStatus(task_status_future)
+ f: Future = Future()
+ f.add_done_callback(task_done)
+ self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
+ return f, task_status_future.result()
+
+ def wrap_async_context_manager(
+ self, cm: AsyncContextManager[T_co]
+ ) -> ContextManager[T_co]:
+ """
+ Wrap an async context manager as a synchronous context manager via this portal.
+
+ Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
+ in the middle until the synchronous context manager exits.
+
+ :param cm: an asynchronous context manager
+ :return: a synchronous context manager
+
+ .. versionadded:: 2.1
+
+ """
+ return _BlockingAsyncContextManager(cm, self)
+
+
+@contextmanager
+def start_blocking_portal(
+ backend: str = "asyncio", backend_options: dict[str, Any] | None = None
+) -> Generator[BlockingPortal, Any, None]:
+ """
+ Start a new event loop in a new thread and run a blocking portal in its main task.
+
+ The parameters are the same as for :func:`~anyio.run`.
+
+ :param backend: name of the backend
+ :param backend_options: backend options
+ :return: a context manager that yields a blocking portal
+
+ .. versionchanged:: 3.0
+ Usage as a context manager is now required.
+
+ """
+
+ async def run_portal() -> None:
+ async with BlockingPortal() as portal_:
+ if future.set_running_or_notify_cancel():
+ future.set_result(portal_)
+ await portal_.sleep_until_stopped()
+
+ future: Future[BlockingPortal] = Future()
+ with ThreadPoolExecutor(1) as executor:
+ run_future = executor.submit(
+ _eventloop.run, # type: ignore[arg-type]
+ run_portal,
+ backend=backend,
+ backend_options=backend_options,
+ )
+ try:
+ wait(
+ cast(Iterable[Future], [run_future, future]),
+ return_when=FIRST_COMPLETED,
+ )
+ except BaseException:
+ future.cancel()
+ run_future.cancel()
+ raise
+
+ if future.done():
+ portal = future.result()
+ cancel_remaining_tasks = False
+ try:
+ yield portal
+ except BaseException:
+ cancel_remaining_tasks = True
+ raise
+ finally:
+ try:
+ portal.call(portal.stop, cancel_remaining_tasks)
+ except RuntimeError:
+ pass
+
+ run_future.result()
+
+
+def check_cancelled() -> None:
+ """
+ Check if the cancel scope of the host task's running the current worker thread has
+ been cancelled.
+
+ If the host task's current cancel scope has indeed been cancelled, the
+ backend-specific cancellation exception will be raised.
+
+ :raises RuntimeError: if the current thread was not spawned by
+ :func:`.to_thread.run_sync`
+
+ """
+ try:
+ async_backend: AsyncBackend = threadlocals.current_async_backend
+ except AttributeError:
+ raise RuntimeError(
+ "This function can only be run from an AnyIO worker thread"
+ ) from None
+
+ async_backend.check_cancelled()
diff --git a/venv/lib/python3.11/site-packages/anyio/lowlevel.py b/venv/lib/python3.11/site-packages/anyio/lowlevel.py
new file mode 100644
index 0000000..a9e10f4
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/lowlevel.py
@@ -0,0 +1,163 @@
+from __future__ import annotations
+
+import enum
+from dataclasses import dataclass
+from typing import Any, Generic, Literal, TypeVar, overload
+from weakref import WeakKeyDictionary
+
+from ._core._eventloop import get_async_backend
+
+T = TypeVar("T")
+D = TypeVar("D")
+
+
+async def checkpoint() -> None:
+ """
+ Check for cancellation and allow the scheduler to switch to another task.
+
+ Equivalent to (but more efficient than)::
+
+ await checkpoint_if_cancelled()
+ await cancel_shielded_checkpoint()
+
+
+ .. versionadded:: 3.0
+
+ """
+ await get_async_backend().checkpoint()
+
+
+async def checkpoint_if_cancelled() -> None:
+ """
+ Enter a checkpoint if the enclosing cancel scope has been cancelled.
+
+ This does not allow the scheduler to switch to a different task.
+
+ .. versionadded:: 3.0
+
+ """
+ await get_async_backend().checkpoint_if_cancelled()
+
+
+async def cancel_shielded_checkpoint() -> None:
+ """
+ Allow the scheduler to switch to another task but without checking for cancellation.
+
+ Equivalent to (but potentially more efficient than)::
+
+ with CancelScope(shield=True):
+ await checkpoint()
+
+
+ .. versionadded:: 3.0
+
+ """
+ await get_async_backend().cancel_shielded_checkpoint()
+
+
+def current_token() -> object:
+ """
+ Return a backend specific token object that can be used to get back to the event
+ loop.
+
+ """
+ return get_async_backend().current_token()
+
+
+_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
+_token_wrappers: dict[Any, _TokenWrapper] = {}
+
+
+@dataclass(frozen=True)
+class _TokenWrapper:
+ __slots__ = "_token", "__weakref__"
+ _token: object
+
+
+class _NoValueSet(enum.Enum):
+ NO_VALUE_SET = enum.auto()
+
+
+class RunvarToken(Generic[T]):
+ __slots__ = "_var", "_value", "_redeemed"
+
+ def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
+ self._var = var
+ self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
+ self._redeemed = False
+
+
+class RunVar(Generic[T]):
+ """
+ Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
+ """
+
+ __slots__ = "_name", "_default"
+
+ NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
+
+ _token_wrappers: set[_TokenWrapper] = set()
+
+ def __init__(
+ self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
+ ):
+ self._name = name
+ self._default = default
+
+ @property
+ def _current_vars(self) -> dict[str, T]:
+ token = current_token()
+ try:
+ return _run_vars[token]
+ except KeyError:
+ run_vars = _run_vars[token] = {}
+ return run_vars
+
+ @overload
+ def get(self, default: D) -> T | D:
+ ...
+
+ @overload
+ def get(self) -> T:
+ ...
+
+ def get(
+ self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
+ ) -> T | D:
+ try:
+ return self._current_vars[self._name]
+ except KeyError:
+ if default is not RunVar.NO_VALUE_SET:
+ return default
+ elif self._default is not RunVar.NO_VALUE_SET:
+ return self._default
+
+ raise LookupError(
+ f'Run variable "{self._name}" has no value and no default set'
+ )
+
+ def set(self, value: T) -> RunvarToken[T]:
+ current_vars = self._current_vars
+ token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
+ current_vars[self._name] = value
+ return token
+
+ def reset(self, token: RunvarToken[T]) -> None:
+ if token._var is not self:
+ raise ValueError("This token does not belong to this RunVar")
+
+ if token._redeemed:
+ raise ValueError("This token has already been used")
+
+ if token._value is _NoValueSet.NO_VALUE_SET:
+ try:
+ del self._current_vars[self._name]
+ except KeyError:
+ pass
+ else:
+ self._current_vars[self._name] = token._value
+
+ token._redeemed = True
+
+ def __repr__(self) -> str:
+ return f"<RunVar name={self._name!r}>"
diff --git a/venv/lib/python3.11/site-packages/anyio/py.typed b/venv/lib/python3.11/site-packages/anyio/py.typed
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/py.typed
diff --git a/venv/lib/python3.11/site-packages/anyio/pytest_plugin.py b/venv/lib/python3.11/site-packages/anyio/pytest_plugin.py
new file mode 100644
index 0000000..a8dd6f3
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/pytest_plugin.py
@@ -0,0 +1,149 @@
+from __future__ import annotations
+
+from collections.abc import Iterator
+from contextlib import ExitStack, contextmanager
+from inspect import isasyncgenfunction, iscoroutinefunction
+from typing import Any, Dict, Tuple, cast
+
+import pytest
+import sniffio
+
+from ._core._eventloop import get_all_backends, get_async_backend
+from .abc import TestRunner
+
+_current_runner: TestRunner | None = None
+_runner_stack: ExitStack | None = None
+_runner_leases = 0
+
+
+def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
+ if isinstance(backend, str):
+ return backend, {}
+ elif isinstance(backend, tuple) and len(backend) == 2:
+ if isinstance(backend[0], str) and isinstance(backend[1], dict):
+ return cast(Tuple[str, Dict[str, Any]], backend)
+
+ raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
+
+
+@contextmanager
+def get_runner(
+ backend_name: str, backend_options: dict[str, Any]
+) -> Iterator[TestRunner]:
+ global _current_runner, _runner_leases, _runner_stack
+ if _current_runner is None:
+ asynclib = get_async_backend(backend_name)
+ _runner_stack = ExitStack()
+ if sniffio.current_async_library_cvar.get(None) is None:
+ # Since we're in control of the event loop, we can cache the name of the
+ # async library
+ token = sniffio.current_async_library_cvar.set(backend_name)
+ _runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
+
+ backend_options = backend_options or {}
+ _current_runner = _runner_stack.enter_context(
+ asynclib.create_test_runner(backend_options)
+ )
+
+ _runner_leases += 1
+ try:
+ yield _current_runner
+ finally:
+ _runner_leases -= 1
+ if not _runner_leases:
+ assert _runner_stack is not None
+ _runner_stack.close()
+ _runner_stack = _current_runner = None
+
+
+def pytest_configure(config: Any) -> None:
+ config.addinivalue_line(
+ "markers",
+ "anyio: mark the (coroutine function) test to be run "
+ "asynchronously via anyio.",
+ )
+
+
+def pytest_fixture_setup(fixturedef: Any, request: Any) -> None:
+ def wrapper(*args, anyio_backend, **kwargs): # type: ignore[no-untyped-def]
+ backend_name, backend_options = extract_backend_and_options(anyio_backend)
+ if has_backend_arg:
+ kwargs["anyio_backend"] = anyio_backend
+
+ with get_runner(backend_name, backend_options) as runner:
+ if isasyncgenfunction(func):
+ yield from runner.run_asyncgen_fixture(func, kwargs)
+ else:
+ yield runner.run_fixture(func, kwargs)
+
+ # Only apply this to coroutine functions and async generator functions in requests
+ # that involve the anyio_backend fixture
+ func = fixturedef.func
+ if isasyncgenfunction(func) or iscoroutinefunction(func):
+ if "anyio_backend" in request.fixturenames:
+ has_backend_arg = "anyio_backend" in fixturedef.argnames
+ fixturedef.func = wrapper
+ if not has_backend_arg:
+ fixturedef.argnames += ("anyio_backend",)
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
+ if collector.istestfunction(obj, name):
+ inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
+ if iscoroutinefunction(inner_func):
+ marker = collector.get_closest_marker("anyio")
+ own_markers = getattr(obj, "pytestmark", ())
+ if marker or any(marker.name == "anyio" for marker in own_markers):
+ pytest.mark.usefixtures("anyio_backend")(obj)
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
+ def run_with_hypothesis(**kwargs: Any) -> None:
+ with get_runner(backend_name, backend_options) as runner:
+ runner.run_test(original_func, kwargs)
+
+ backend = pyfuncitem.funcargs.get("anyio_backend")
+ if backend:
+ backend_name, backend_options = extract_backend_and_options(backend)
+
+ if hasattr(pyfuncitem.obj, "hypothesis"):
+ # Wrap the inner test function unless it's already wrapped
+ original_func = pyfuncitem.obj.hypothesis.inner_test
+ if original_func.__qualname__ != run_with_hypothesis.__qualname__:
+ if iscoroutinefunction(original_func):
+ pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
+
+ return None
+
+ if iscoroutinefunction(pyfuncitem.obj):
+ funcargs = pyfuncitem.funcargs
+ testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
+ with get_runner(backend_name, backend_options) as runner:
+ runner.run_test(pyfuncitem.obj, testargs)
+
+ return True
+
+ return None
+
+
+@pytest.fixture(scope="module", params=get_all_backends())
+def anyio_backend(request: Any) -> Any:
+ return request.param
+
+
+@pytest.fixture
+def anyio_backend_name(anyio_backend: Any) -> str:
+ if isinstance(anyio_backend, str):
+ return anyio_backend
+ else:
+ return anyio_backend[0]
+
+
+@pytest.fixture
+def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
+ if isinstance(anyio_backend, str):
+ return {}
+ else:
+ return anyio_backend[1]
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__init__.py b/venv/lib/python3.11/site-packages/anyio/streams/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__init__.py
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000..6e021f2
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/__init__.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/buffered.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/buffered.cpython-311.pyc
new file mode 100644
index 0000000..f092e5e
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/buffered.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/file.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/file.cpython-311.pyc
new file mode 100644
index 0000000..c900e65
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/file.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/memory.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/memory.cpython-311.pyc
new file mode 100644
index 0000000..18b1a6a
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/memory.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/stapled.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/stapled.cpython-311.pyc
new file mode 100644
index 0000000..e87e2c4
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/stapled.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/text.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/text.cpython-311.pyc
new file mode 100644
index 0000000..f43704b
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/text.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/tls.cpython-311.pyc b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/tls.cpython-311.pyc
new file mode 100644
index 0000000..f2b786c
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/__pycache__/tls.cpython-311.pyc
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/buffered.py b/venv/lib/python3.11/site-packages/anyio/streams/buffered.py
new file mode 100644
index 0000000..f5d5e83
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/buffered.py
@@ -0,0 +1,119 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Mapping
+from dataclasses import dataclass, field
+from typing import Any
+
+from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
+from ..abc import AnyByteReceiveStream, ByteReceiveStream
+
+
+@dataclass(eq=False)
+class BufferedByteReceiveStream(ByteReceiveStream):
+ """
+ Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
+ receiving capabilities in the form of a byte stream.
+ """
+
+ receive_stream: AnyByteReceiveStream
+ _buffer: bytearray = field(init=False, default_factory=bytearray)
+ _closed: bool = field(init=False, default=False)
+
+ async def aclose(self) -> None:
+ await self.receive_stream.aclose()
+ self._closed = True
+
+ @property
+ def buffer(self) -> bytes:
+ """The bytes currently in the buffer."""
+ return bytes(self._buffer)
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return self.receive_stream.extra_attributes
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ if self._closed:
+ raise ClosedResourceError
+
+ if self._buffer:
+ chunk = bytes(self._buffer[:max_bytes])
+ del self._buffer[:max_bytes]
+ return chunk
+ elif isinstance(self.receive_stream, ByteReceiveStream):
+ return await self.receive_stream.receive(max_bytes)
+ else:
+ # With a bytes-oriented object stream, we need to handle any surplus bytes
+ # we get from the receive() call
+ chunk = await self.receive_stream.receive()
+ if len(chunk) > max_bytes:
+ # Save the surplus bytes in the buffer
+ self._buffer.extend(chunk[max_bytes:])
+ return chunk[:max_bytes]
+ else:
+ return chunk
+
+ async def receive_exactly(self, nbytes: int) -> bytes:
+ """
+ Read exactly the given amount of bytes from the stream.
+
+ :param nbytes: the number of bytes to read
+ :return: the bytes read
+ :raises ~anyio.IncompleteRead: if the stream was closed before the requested
+ amount of bytes could be read from the stream
+
+ """
+ while True:
+ remaining = nbytes - len(self._buffer)
+ if remaining <= 0:
+ retval = self._buffer[:nbytes]
+ del self._buffer[:nbytes]
+ return bytes(retval)
+
+ try:
+ if isinstance(self.receive_stream, ByteReceiveStream):
+ chunk = await self.receive_stream.receive(remaining)
+ else:
+ chunk = await self.receive_stream.receive()
+ except EndOfStream as exc:
+ raise IncompleteRead from exc
+
+ self._buffer.extend(chunk)
+
+ async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
+ """
+ Read from the stream until the delimiter is found or max_bytes have been read.
+
+ :param delimiter: the marker to look for in the stream
+ :param max_bytes: maximum number of bytes that will be read before raising
+ :exc:`~anyio.DelimiterNotFound`
+ :return: the bytes read (not including the delimiter)
+ :raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
+ was found
+ :raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
+ bytes read up to the maximum allowed
+
+ """
+ delimiter_size = len(delimiter)
+ offset = 0
+ while True:
+ # Check if the delimiter can be found in the current buffer
+ index = self._buffer.find(delimiter, offset)
+ if index >= 0:
+ found = self._buffer[:index]
+ del self._buffer[: index + len(delimiter) :]
+ return bytes(found)
+
+ # Check if the buffer is already at or over the limit
+ if len(self._buffer) >= max_bytes:
+ raise DelimiterNotFound(max_bytes)
+
+ # Read more data into the buffer from the socket
+ try:
+ data = await self.receive_stream.receive()
+ except EndOfStream as exc:
+ raise IncompleteRead from exc
+
+ # Move the offset forward and add the new data to the buffer
+ offset = max(len(self._buffer) - delimiter_size + 1, 0)
+ self._buffer.extend(data)
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/file.py b/venv/lib/python3.11/site-packages/anyio/streams/file.py
new file mode 100644
index 0000000..f492464
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/file.py
@@ -0,0 +1,148 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Mapping
+from io import SEEK_SET, UnsupportedOperation
+from os import PathLike
+from pathlib import Path
+from typing import Any, BinaryIO, cast
+
+from .. import (
+ BrokenResourceError,
+ ClosedResourceError,
+ EndOfStream,
+ TypedAttributeSet,
+ to_thread,
+ typed_attribute,
+)
+from ..abc import ByteReceiveStream, ByteSendStream
+
+
+class FileStreamAttribute(TypedAttributeSet):
+ #: the open file descriptor
+ file: BinaryIO = typed_attribute()
+ #: the path of the file on the file system, if available (file must be a real file)
+ path: Path = typed_attribute()
+ #: the file number, if available (file must be a real file or a TTY)
+ fileno: int = typed_attribute()
+
+
+class _BaseFileStream:
+ def __init__(self, file: BinaryIO):
+ self._file = file
+
+ async def aclose(self) -> None:
+ await to_thread.run_sync(self._file.close)
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ attributes: dict[Any, Callable[[], Any]] = {
+ FileStreamAttribute.file: lambda: self._file,
+ }
+
+ if hasattr(self._file, "name"):
+ attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
+
+ try:
+ self._file.fileno()
+ except UnsupportedOperation:
+ pass
+ else:
+ attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
+
+ return attributes
+
+
+class FileReadStream(_BaseFileStream, ByteReceiveStream):
+ """
+ A byte stream that reads from a file in the file system.
+
+ :param file: a file that has been opened for reading in binary mode
+
+ .. versionadded:: 3.0
+ """
+
+ @classmethod
+ async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
+ """
+ Create a file read stream by opening the given file.
+
+ :param path: path of the file to read from
+
+ """
+ file = await to_thread.run_sync(Path(path).open, "rb")
+ return cls(cast(BinaryIO, file))
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ try:
+ data = await to_thread.run_sync(self._file.read, max_bytes)
+ except ValueError:
+ raise ClosedResourceError from None
+ except OSError as exc:
+ raise BrokenResourceError from exc
+
+ if data:
+ return data
+ else:
+ raise EndOfStream
+
+ async def seek(self, position: int, whence: int = SEEK_SET) -> int:
+ """
+ Seek the file to the given position.
+
+ .. seealso:: :meth:`io.IOBase.seek`
+
+ .. note:: Not all file descriptors are seekable.
+
+ :param position: position to seek the file to
+ :param whence: controls how ``position`` is interpreted
+ :return: the new absolute position
+ :raises OSError: if the file is not seekable
+
+ """
+ return await to_thread.run_sync(self._file.seek, position, whence)
+
+ async def tell(self) -> int:
+ """
+ Return the current stream position.
+
+ .. note:: Not all file descriptors are seekable.
+
+ :return: the current absolute position
+ :raises OSError: if the file is not seekable
+
+ """
+ return await to_thread.run_sync(self._file.tell)
+
+
+class FileWriteStream(_BaseFileStream, ByteSendStream):
+ """
+ A byte stream that writes to a file in the file system.
+
+ :param file: a file that has been opened for writing in binary mode
+
+ .. versionadded:: 3.0
+ """
+
+ @classmethod
+ async def from_path(
+ cls, path: str | PathLike[str], append: bool = False
+ ) -> FileWriteStream:
+ """
+ Create a file write stream by opening the given file for writing.
+
+ :param path: path of the file to write to
+ :param append: if ``True``, open the file for appending; if ``False``, any
+ existing file at the given path will be truncated
+
+ """
+ mode = "ab" if append else "wb"
+ file = await to_thread.run_sync(Path(path).open, mode)
+ return cls(cast(BinaryIO, file))
+
+ async def send(self, item: bytes) -> None:
+ try:
+ await to_thread.run_sync(self._file.write, item)
+ except ValueError:
+ raise ClosedResourceError from None
+ except OSError as exc:
+ raise BrokenResourceError from exc
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/memory.py b/venv/lib/python3.11/site-packages/anyio/streams/memory.py
new file mode 100644
index 0000000..bc2425b
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/memory.py
@@ -0,0 +1,283 @@
+from __future__ import annotations
+
+from collections import OrderedDict, deque
+from dataclasses import dataclass, field
+from types import TracebackType
+from typing import Generic, NamedTuple, TypeVar
+
+from .. import (
+ BrokenResourceError,
+ ClosedResourceError,
+ EndOfStream,
+ WouldBlock,
+)
+from ..abc import Event, ObjectReceiveStream, ObjectSendStream
+from ..lowlevel import checkpoint
+
+T_Item = TypeVar("T_Item")
+T_co = TypeVar("T_co", covariant=True)
+T_contra = TypeVar("T_contra", contravariant=True)
+
+
+class MemoryObjectStreamStatistics(NamedTuple):
+ current_buffer_used: int #: number of items stored in the buffer
+ #: maximum number of items that can be stored on this stream (or :data:`math.inf`)
+ max_buffer_size: float
+ open_send_streams: int #: number of unclosed clones of the send stream
+ open_receive_streams: int #: number of unclosed clones of the receive stream
+ #: number of tasks blocked on :meth:`MemoryObjectSendStream.send`
+ tasks_waiting_send: int
+ #: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`
+ tasks_waiting_receive: int
+
+
+@dataclass(eq=False)
+class MemoryObjectStreamState(Generic[T_Item]):
+ max_buffer_size: float = field()
+ buffer: deque[T_Item] = field(init=False, default_factory=deque)
+ open_send_channels: int = field(init=False, default=0)
+ open_receive_channels: int = field(init=False, default=0)
+ waiting_receivers: OrderedDict[Event, list[T_Item]] = field(
+ init=False, default_factory=OrderedDict
+ )
+ waiting_senders: OrderedDict[Event, T_Item] = field(
+ init=False, default_factory=OrderedDict
+ )
+
+ def statistics(self) -> MemoryObjectStreamStatistics:
+ return MemoryObjectStreamStatistics(
+ len(self.buffer),
+ self.max_buffer_size,
+ self.open_send_channels,
+ self.open_receive_channels,
+ len(self.waiting_senders),
+ len(self.waiting_receivers),
+ )
+
+
+@dataclass(eq=False)
+class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):
+ _state: MemoryObjectStreamState[T_co]
+ _closed: bool = field(init=False, default=False)
+
+ def __post_init__(self) -> None:
+ self._state.open_receive_channels += 1
+
+ def receive_nowait(self) -> T_co:
+ """
+ Receive the next item if it can be done without waiting.
+
+ :return: the received item
+ :raises ~anyio.ClosedResourceError: if this send stream has been closed
+ :raises ~anyio.EndOfStream: if the buffer is empty and this stream has been
+ closed from the sending end
+ :raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks
+ waiting to send
+
+ """
+ if self._closed:
+ raise ClosedResourceError
+
+ if self._state.waiting_senders:
+ # Get the item from the next sender
+ send_event, item = self._state.waiting_senders.popitem(last=False)
+ self._state.buffer.append(item)
+ send_event.set()
+
+ if self._state.buffer:
+ return self._state.buffer.popleft()
+ elif not self._state.open_send_channels:
+ raise EndOfStream
+
+ raise WouldBlock
+
+ async def receive(self) -> T_co:
+ await checkpoint()
+ try:
+ return self.receive_nowait()
+ except WouldBlock:
+ # Add ourselves in the queue
+ receive_event = Event()
+ container: list[T_co] = []
+ self._state.waiting_receivers[receive_event] = container
+
+ try:
+ await receive_event.wait()
+ finally:
+ self._state.waiting_receivers.pop(receive_event, None)
+
+ if container:
+ return container[0]
+ else:
+ raise EndOfStream
+
+ def clone(self) -> MemoryObjectReceiveStream[T_co]:
+ """
+ Create a clone of this receive stream.
+
+ Each clone can be closed separately. Only when all clones have been closed will
+ the receiving end of the memory stream be considered closed by the sending ends.
+
+ :return: the cloned stream
+
+ """
+ if self._closed:
+ raise ClosedResourceError
+
+ return MemoryObjectReceiveStream(_state=self._state)
+
+ def close(self) -> None:
+ """
+ Close the stream.
+
+ This works the exact same way as :meth:`aclose`, but is provided as a special
+ case for the benefit of synchronous callbacks.
+
+ """
+ if not self._closed:
+ self._closed = True
+ self._state.open_receive_channels -= 1
+ if self._state.open_receive_channels == 0:
+ send_events = list(self._state.waiting_senders.keys())
+ for event in send_events:
+ event.set()
+
+ async def aclose(self) -> None:
+ self.close()
+
+ def statistics(self) -> MemoryObjectStreamStatistics:
+ """
+ Return statistics about the current state of this stream.
+
+ .. versionadded:: 3.0
+ """
+ return self._state.statistics()
+
+ def __enter__(self) -> MemoryObjectReceiveStream[T_co]:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.close()
+
+
+@dataclass(eq=False)
+class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):
+ _state: MemoryObjectStreamState[T_contra]
+ _closed: bool = field(init=False, default=False)
+
+ def __post_init__(self) -> None:
+ self._state.open_send_channels += 1
+
+ def send_nowait(self, item: T_contra) -> None:
+ """
+ Send an item immediately if it can be done without waiting.
+
+ :param item: the item to send
+ :raises ~anyio.ClosedResourceError: if this send stream has been closed
+ :raises ~anyio.BrokenResourceError: if the stream has been closed from the
+ receiving end
+ :raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting
+ to receive
+
+ """
+ if self._closed:
+ raise ClosedResourceError
+ if not self._state.open_receive_channels:
+ raise BrokenResourceError
+
+ if self._state.waiting_receivers:
+ receive_event, container = self._state.waiting_receivers.popitem(last=False)
+ container.append(item)
+ receive_event.set()
+ elif len(self._state.buffer) < self._state.max_buffer_size:
+ self._state.buffer.append(item)
+ else:
+ raise WouldBlock
+
+ async def send(self, item: T_contra) -> None:
+ """
+ Send an item to the stream.
+
+ If the buffer is full, this method blocks until there is again room in the
+ buffer or the item can be sent directly to a receiver.
+
+ :param item: the item to send
+ :raises ~anyio.ClosedResourceError: if this send stream has been closed
+ :raises ~anyio.BrokenResourceError: if the stream has been closed from the
+ receiving end
+
+ """
+ await checkpoint()
+ try:
+ self.send_nowait(item)
+ except WouldBlock:
+ # Wait until there's someone on the receiving end
+ send_event = Event()
+ self._state.waiting_senders[send_event] = item
+ try:
+ await send_event.wait()
+ except BaseException:
+ self._state.waiting_senders.pop(send_event, None)
+ raise
+
+ if self._state.waiting_senders.pop(send_event, None):
+ raise BrokenResourceError from None
+
+ def clone(self) -> MemoryObjectSendStream[T_contra]:
+ """
+ Create a clone of this send stream.
+
+ Each clone can be closed separately. Only when all clones have been closed will
+ the sending end of the memory stream be considered closed by the receiving ends.
+
+ :return: the cloned stream
+
+ """
+ if self._closed:
+ raise ClosedResourceError
+
+ return MemoryObjectSendStream(_state=self._state)
+
+ def close(self) -> None:
+ """
+ Close the stream.
+
+ This works the exact same way as :meth:`aclose`, but is provided as a special
+ case for the benefit of synchronous callbacks.
+
+ """
+ if not self._closed:
+ self._closed = True
+ self._state.open_send_channels -= 1
+ if self._state.open_send_channels == 0:
+ receive_events = list(self._state.waiting_receivers.keys())
+ self._state.waiting_receivers.clear()
+ for event in receive_events:
+ event.set()
+
+ async def aclose(self) -> None:
+ self.close()
+
+ def statistics(self) -> MemoryObjectStreamStatistics:
+ """
+ Return statistics about the current state of this stream.
+
+ .. versionadded:: 3.0
+ """
+ return self._state.statistics()
+
+ def __enter__(self) -> MemoryObjectSendStream[T_contra]:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_val: BaseException | None,
+ exc_tb: TracebackType | None,
+ ) -> None:
+ self.close()
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/stapled.py b/venv/lib/python3.11/site-packages/anyio/streams/stapled.py
new file mode 100644
index 0000000..80f64a2
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/stapled.py
@@ -0,0 +1,141 @@
+from __future__ import annotations
+
+from collections.abc import Callable, Mapping, Sequence
+from dataclasses import dataclass
+from typing import Any, Generic, TypeVar
+
+from ..abc import (
+ ByteReceiveStream,
+ ByteSendStream,
+ ByteStream,
+ Listener,
+ ObjectReceiveStream,
+ ObjectSendStream,
+ ObjectStream,
+ TaskGroup,
+)
+
+T_Item = TypeVar("T_Item")
+T_Stream = TypeVar("T_Stream")
+
+
+@dataclass(eq=False)
+class StapledByteStream(ByteStream):
+ """
+ Combines two byte streams into a single, bidirectional byte stream.
+
+ Extra attributes will be provided from both streams, with the receive stream
+ providing the values in case of a conflict.
+
+ :param ByteSendStream send_stream: the sending byte stream
+ :param ByteReceiveStream receive_stream: the receiving byte stream
+ """
+
+ send_stream: ByteSendStream
+ receive_stream: ByteReceiveStream
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ return await self.receive_stream.receive(max_bytes)
+
+ async def send(self, item: bytes) -> None:
+ await self.send_stream.send(item)
+
+ async def send_eof(self) -> None:
+ await self.send_stream.aclose()
+
+ async def aclose(self) -> None:
+ await self.send_stream.aclose()
+ await self.receive_stream.aclose()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return {
+ **self.send_stream.extra_attributes,
+ **self.receive_stream.extra_attributes,
+ }
+
+
+@dataclass(eq=False)
+class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
+ """
+ Combines two object streams into a single, bidirectional object stream.
+
+ Extra attributes will be provided from both streams, with the receive stream
+ providing the values in case of a conflict.
+
+ :param ObjectSendStream send_stream: the sending object stream
+ :param ObjectReceiveStream receive_stream: the receiving object stream
+ """
+
+ send_stream: ObjectSendStream[T_Item]
+ receive_stream: ObjectReceiveStream[T_Item]
+
+ async def receive(self) -> T_Item:
+ return await self.receive_stream.receive()
+
+ async def send(self, item: T_Item) -> None:
+ await self.send_stream.send(item)
+
+ async def send_eof(self) -> None:
+ await self.send_stream.aclose()
+
+ async def aclose(self) -> None:
+ await self.send_stream.aclose()
+ await self.receive_stream.aclose()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return {
+ **self.send_stream.extra_attributes,
+ **self.receive_stream.extra_attributes,
+ }
+
+
+@dataclass(eq=False)
+class MultiListener(Generic[T_Stream], Listener[T_Stream]):
+ """
+ Combines multiple listeners into one, serving connections from all of them at once.
+
+ Any MultiListeners in the given collection of listeners will have their listeners
+ moved into this one.
+
+ Extra attributes are provided from each listener, with each successive listener
+ overriding any conflicting attributes from the previous one.
+
+ :param listeners: listeners to serve
+ :type listeners: Sequence[Listener[T_Stream]]
+ """
+
+ listeners: Sequence[Listener[T_Stream]]
+
+ def __post_init__(self) -> None:
+ listeners: list[Listener[T_Stream]] = []
+ for listener in self.listeners:
+ if isinstance(listener, MultiListener):
+ listeners.extend(listener.listeners)
+ del listener.listeners[:] # type: ignore[attr-defined]
+ else:
+ listeners.append(listener)
+
+ self.listeners = listeners
+
+ async def serve(
+ self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
+ ) -> None:
+ from .. import create_task_group
+
+ async with create_task_group() as tg:
+ for listener in self.listeners:
+ tg.start_soon(listener.serve, handler, task_group)
+
+ async def aclose(self) -> None:
+ for listener in self.listeners:
+ await listener.aclose()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ attributes: dict = {}
+ for listener in self.listeners:
+ attributes.update(listener.extra_attributes)
+
+ return attributes
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/text.py b/venv/lib/python3.11/site-packages/anyio/streams/text.py
new file mode 100644
index 0000000..f1a1127
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/text.py
@@ -0,0 +1,147 @@
+from __future__ import annotations
+
+import codecs
+from collections.abc import Callable, Mapping
+from dataclasses import InitVar, dataclass, field
+from typing import Any
+
+from ..abc import (
+ AnyByteReceiveStream,
+ AnyByteSendStream,
+ AnyByteStream,
+ ObjectReceiveStream,
+ ObjectSendStream,
+ ObjectStream,
+)
+
+
+@dataclass(eq=False)
+class TextReceiveStream(ObjectReceiveStream[str]):
+ """
+ Stream wrapper that decodes bytes to strings using the given encoding.
+
+ Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
+ completely received unicode characters as soon as they come in.
+
+ :param transport_stream: any bytes-based receive stream
+ :param encoding: character encoding to use for decoding bytes to strings (defaults
+ to ``utf-8``)
+ :param errors: handling scheme for decoding errors (defaults to ``strict``; see the
+ `codecs module documentation`_ for a comprehensive list of options)
+
+ .. _codecs module documentation:
+ https://docs.python.org/3/library/codecs.html#codec-objects
+ """
+
+ transport_stream: AnyByteReceiveStream
+ encoding: InitVar[str] = "utf-8"
+ errors: InitVar[str] = "strict"
+ _decoder: codecs.IncrementalDecoder = field(init=False)
+
+ def __post_init__(self, encoding: str, errors: str) -> None:
+ decoder_class = codecs.getincrementaldecoder(encoding)
+ self._decoder = decoder_class(errors=errors)
+
+ async def receive(self) -> str:
+ while True:
+ chunk = await self.transport_stream.receive()
+ decoded = self._decoder.decode(chunk)
+ if decoded:
+ return decoded
+
+ async def aclose(self) -> None:
+ await self.transport_stream.aclose()
+ self._decoder.reset()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return self.transport_stream.extra_attributes
+
+
+@dataclass(eq=False)
+class TextSendStream(ObjectSendStream[str]):
+ """
+ Sends strings to the wrapped stream as bytes using the given encoding.
+
+ :param AnyByteSendStream transport_stream: any bytes-based send stream
+ :param str encoding: character encoding to use for encoding strings to bytes
+ (defaults to ``utf-8``)
+ :param str errors: handling scheme for encoding errors (defaults to ``strict``; see
+ the `codecs module documentation`_ for a comprehensive list of options)
+
+ .. _codecs module documentation:
+ https://docs.python.org/3/library/codecs.html#codec-objects
+ """
+
+ transport_stream: AnyByteSendStream
+ encoding: InitVar[str] = "utf-8"
+ errors: str = "strict"
+ _encoder: Callable[..., tuple[bytes, int]] = field(init=False)
+
+ def __post_init__(self, encoding: str) -> None:
+ self._encoder = codecs.getencoder(encoding)
+
+ async def send(self, item: str) -> None:
+ encoded = self._encoder(item, self.errors)[0]
+ await self.transport_stream.send(encoded)
+
+ async def aclose(self) -> None:
+ await self.transport_stream.aclose()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return self.transport_stream.extra_attributes
+
+
+@dataclass(eq=False)
+class TextStream(ObjectStream[str]):
+ """
+ A bidirectional stream that decodes bytes to strings on receive and encodes strings
+ to bytes on send.
+
+ Extra attributes will be provided from both streams, with the receive stream
+ providing the values in case of a conflict.
+
+ :param AnyByteStream transport_stream: any bytes-based stream
+ :param str encoding: character encoding to use for encoding/decoding strings to/from
+ bytes (defaults to ``utf-8``)
+ :param str errors: handling scheme for encoding errors (defaults to ``strict``; see
+ the `codecs module documentation`_ for a comprehensive list of options)
+
+ .. _codecs module documentation:
+ https://docs.python.org/3/library/codecs.html#codec-objects
+ """
+
+ transport_stream: AnyByteStream
+ encoding: InitVar[str] = "utf-8"
+ errors: InitVar[str] = "strict"
+ _receive_stream: TextReceiveStream = field(init=False)
+ _send_stream: TextSendStream = field(init=False)
+
+ def __post_init__(self, encoding: str, errors: str) -> None:
+ self._receive_stream = TextReceiveStream(
+ self.transport_stream, encoding=encoding, errors=errors
+ )
+ self._send_stream = TextSendStream(
+ self.transport_stream, encoding=encoding, errors=errors
+ )
+
+ async def receive(self) -> str:
+ return await self._receive_stream.receive()
+
+ async def send(self, item: str) -> None:
+ await self._send_stream.send(item)
+
+ async def send_eof(self) -> None:
+ await self.transport_stream.send_eof()
+
+ async def aclose(self) -> None:
+ await self._send_stream.aclose()
+ await self._receive_stream.aclose()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return {
+ **self._send_stream.extra_attributes,
+ **self._receive_stream.extra_attributes,
+ }
diff --git a/venv/lib/python3.11/site-packages/anyio/streams/tls.py b/venv/lib/python3.11/site-packages/anyio/streams/tls.py
new file mode 100644
index 0000000..e913eed
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/streams/tls.py
@@ -0,0 +1,338 @@
+from __future__ import annotations
+
+import logging
+import re
+import ssl
+import sys
+from collections.abc import Callable, Mapping
+from dataclasses import dataclass
+from functools import wraps
+from typing import Any, Tuple, TypeVar
+
+from .. import (
+ BrokenResourceError,
+ EndOfStream,
+ aclose_forcefully,
+ get_cancelled_exc_class,
+)
+from .._core._typedattr import TypedAttributeSet, typed_attribute
+from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+T_Retval = TypeVar("T_Retval")
+PosArgsT = TypeVarTuple("PosArgsT")
+_PCTRTT = Tuple[Tuple[str, str], ...]
+_PCTRTTT = Tuple[_PCTRTT, ...]
+
+
+class TLSAttribute(TypedAttributeSet):
+ """Contains Transport Layer Security related attributes."""
+
+ #: the selected ALPN protocol
+ alpn_protocol: str | None = typed_attribute()
+ #: the channel binding for type ``tls-unique``
+ channel_binding_tls_unique: bytes = typed_attribute()
+ #: the selected cipher
+ cipher: tuple[str, str, int] = typed_attribute()
+ #: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`
+ # for more information)
+ peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()
+ #: the peer certificate in binary form
+ peer_certificate_binary: bytes | None = typed_attribute()
+ #: ``True`` if this is the server side of the connection
+ server_side: bool = typed_attribute()
+ #: ciphers shared by the client during the TLS handshake (``None`` if this is the
+ #: client side)
+ shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()
+ #: the :class:`~ssl.SSLObject` used for encryption
+ ssl_object: ssl.SSLObject = typed_attribute()
+ #: ``True`` if this stream does (and expects) a closing TLS handshake when the
+ #: stream is being closed
+ standard_compatible: bool = typed_attribute()
+ #: the TLS protocol version (e.g. ``TLSv1.2``)
+ tls_version: str = typed_attribute()
+
+
+@dataclass(eq=False)
+class TLSStream(ByteStream):
+ """
+ A stream wrapper that encrypts all sent data and decrypts received data.
+
+ This class has no public initializer; use :meth:`wrap` instead.
+ All extra attributes from :class:`~TLSAttribute` are supported.
+
+ :var AnyByteStream transport_stream: the wrapped stream
+
+ """
+
+ transport_stream: AnyByteStream
+ standard_compatible: bool
+ _ssl_object: ssl.SSLObject
+ _read_bio: ssl.MemoryBIO
+ _write_bio: ssl.MemoryBIO
+
+ @classmethod
+ async def wrap(
+ cls,
+ transport_stream: AnyByteStream,
+ *,
+ server_side: bool | None = None,
+ hostname: str | None = None,
+ ssl_context: ssl.SSLContext | None = None,
+ standard_compatible: bool = True,
+ ) -> TLSStream:
+ """
+ Wrap an existing stream with Transport Layer Security.
+
+ This performs a TLS handshake with the peer.
+
+ :param transport_stream: a bytes-transporting stream to wrap
+ :param server_side: ``True`` if this is the server side of the connection,
+ ``False`` if this is the client side (if omitted, will be set to ``False``
+ if ``hostname`` has been provided, ``False`` otherwise). Used only to create
+ a default context when an explicit context has not been provided.
+ :param hostname: host name of the peer (if host name checking is desired)
+ :param ssl_context: the SSLContext object to use (if not provided, a secure
+ default will be created)
+ :param standard_compatible: if ``False``, skip the closing handshake when
+ closing the connection, and don't raise an exception if the peer does the
+ same
+ :raises ~ssl.SSLError: if the TLS handshake fails
+
+ """
+ if server_side is None:
+ server_side = not hostname
+
+ if not ssl_context:
+ purpose = (
+ ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH
+ )
+ ssl_context = ssl.create_default_context(purpose)
+
+ # Re-enable detection of unexpected EOFs if it was disabled by Python
+ if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
+ ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
+
+ bio_in = ssl.MemoryBIO()
+ bio_out = ssl.MemoryBIO()
+ ssl_object = ssl_context.wrap_bio(
+ bio_in, bio_out, server_side=server_side, server_hostname=hostname
+ )
+ wrapper = cls(
+ transport_stream=transport_stream,
+ standard_compatible=standard_compatible,
+ _ssl_object=ssl_object,
+ _read_bio=bio_in,
+ _write_bio=bio_out,
+ )
+ await wrapper._call_sslobject_method(ssl_object.do_handshake)
+ return wrapper
+
+ async def _call_sslobject_method(
+ self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
+ ) -> T_Retval:
+ while True:
+ try:
+ result = func(*args)
+ except ssl.SSLWantReadError:
+ try:
+ # Flush any pending writes first
+ if self._write_bio.pending:
+ await self.transport_stream.send(self._write_bio.read())
+
+ data = await self.transport_stream.receive()
+ except EndOfStream:
+ self._read_bio.write_eof()
+ except OSError as exc:
+ self._read_bio.write_eof()
+ self._write_bio.write_eof()
+ raise BrokenResourceError from exc
+ else:
+ self._read_bio.write(data)
+ except ssl.SSLWantWriteError:
+ await self.transport_stream.send(self._write_bio.read())
+ except ssl.SSLSyscallError as exc:
+ self._read_bio.write_eof()
+ self._write_bio.write_eof()
+ raise BrokenResourceError from exc
+ except ssl.SSLError as exc:
+ self._read_bio.write_eof()
+ self._write_bio.write_eof()
+ if (
+ isinstance(exc, ssl.SSLEOFError)
+ or "UNEXPECTED_EOF_WHILE_READING" in exc.strerror
+ ):
+ if self.standard_compatible:
+ raise BrokenResourceError from exc
+ else:
+ raise EndOfStream from None
+
+ raise
+ else:
+ # Flush any pending writes first
+ if self._write_bio.pending:
+ await self.transport_stream.send(self._write_bio.read())
+
+ return result
+
+ async def unwrap(self) -> tuple[AnyByteStream, bytes]:
+ """
+ Does the TLS closing handshake.
+
+ :return: a tuple of (wrapped byte stream, bytes left in the read buffer)
+
+ """
+ await self._call_sslobject_method(self._ssl_object.unwrap)
+ self._read_bio.write_eof()
+ self._write_bio.write_eof()
+ return self.transport_stream, self._read_bio.read()
+
+ async def aclose(self) -> None:
+ if self.standard_compatible:
+ try:
+ await self.unwrap()
+ except BaseException:
+ await aclose_forcefully(self.transport_stream)
+ raise
+
+ await self.transport_stream.aclose()
+
+ async def receive(self, max_bytes: int = 65536) -> bytes:
+ data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)
+ if not data:
+ raise EndOfStream
+
+ return data
+
+ async def send(self, item: bytes) -> None:
+ await self._call_sslobject_method(self._ssl_object.write, item)
+
+ async def send_eof(self) -> None:
+ tls_version = self.extra(TLSAttribute.tls_version)
+ match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)
+ if match:
+ major, minor = int(match.group(1)), int(match.group(2) or 0)
+ if (major, minor) < (1, 3):
+ raise NotImplementedError(
+ f"send_eof() requires at least TLSv1.3; current "
+ f"session uses {tls_version}"
+ )
+
+ raise NotImplementedError(
+ "send_eof() has not yet been implemented for TLS streams"
+ )
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return {
+ **self.transport_stream.extra_attributes,
+ TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,
+ TLSAttribute.channel_binding_tls_unique: (
+ self._ssl_object.get_channel_binding
+ ),
+ TLSAttribute.cipher: self._ssl_object.cipher,
+ TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),
+ TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(
+ True
+ ),
+ TLSAttribute.server_side: lambda: self._ssl_object.server_side,
+ TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()
+ if self._ssl_object.server_side
+ else None,
+ TLSAttribute.standard_compatible: lambda: self.standard_compatible,
+ TLSAttribute.ssl_object: lambda: self._ssl_object,
+ TLSAttribute.tls_version: self._ssl_object.version,
+ }
+
+
+@dataclass(eq=False)
+class TLSListener(Listener[TLSStream]):
+ """
+ A convenience listener that wraps another listener and auto-negotiates a TLS session
+ on every accepted connection.
+
+ If the TLS handshake times out or raises an exception,
+ :meth:`handle_handshake_error` is called to do whatever post-mortem processing is
+ deemed necessary.
+
+ Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.
+
+ :param Listener listener: the listener to wrap
+ :param ssl_context: the SSL context object
+ :param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`
+ :param handshake_timeout: time limit for the TLS handshake
+ (passed to :func:`~anyio.fail_after`)
+ """
+
+ listener: Listener[Any]
+ ssl_context: ssl.SSLContext
+ standard_compatible: bool = True
+ handshake_timeout: float = 30
+
+ @staticmethod
+ async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:
+ """
+ Handle an exception raised during the TLS handshake.
+
+ This method does 3 things:
+
+ #. Forcefully closes the original stream
+ #. Logs the exception (unless it was a cancellation exception) using the
+ ``anyio.streams.tls`` logger
+ #. Reraises the exception if it was a base exception or a cancellation exception
+
+ :param exc: the exception
+ :param stream: the original stream
+
+ """
+ await aclose_forcefully(stream)
+
+ # Log all except cancellation exceptions
+ if not isinstance(exc, get_cancelled_exc_class()):
+ # CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using
+ # any asyncio implementation, so we explicitly pass the exception to log
+ # (https://github.com/python/cpython/issues/108668). Trio does not have this
+ # issue because it works around the CPython bug.
+ logging.getLogger(__name__).exception(
+ "Error during TLS handshake", exc_info=exc
+ )
+
+ # Only reraise base exceptions and cancellation exceptions
+ if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):
+ raise
+
+ async def serve(
+ self,
+ handler: Callable[[TLSStream], Any],
+ task_group: TaskGroup | None = None,
+ ) -> None:
+ @wraps(handler)
+ async def handler_wrapper(stream: AnyByteStream) -> None:
+ from .. import fail_after
+
+ try:
+ with fail_after(self.handshake_timeout):
+ wrapped_stream = await TLSStream.wrap(
+ stream,
+ ssl_context=self.ssl_context,
+ standard_compatible=self.standard_compatible,
+ )
+ except BaseException as exc:
+ await self.handle_handshake_error(exc, stream)
+ else:
+ await handler(wrapped_stream)
+
+ await self.listener.serve(handler_wrapper, task_group)
+
+ async def aclose(self) -> None:
+ await self.listener.aclose()
+
+ @property
+ def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
+ return {
+ TLSAttribute.standard_compatible: lambda: self.standard_compatible,
+ }
diff --git a/venv/lib/python3.11/site-packages/anyio/to_process.py b/venv/lib/python3.11/site-packages/anyio/to_process.py
new file mode 100644
index 0000000..1ff06f0
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/to_process.py
@@ -0,0 +1,259 @@
+from __future__ import annotations
+
+import os
+import pickle
+import subprocess
+import sys
+from collections import deque
+from collections.abc import Callable
+from importlib.util import module_from_spec, spec_from_file_location
+from typing import TypeVar, cast
+
+from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
+from ._core._exceptions import BrokenWorkerProcess
+from ._core._subprocesses import open_process
+from ._core._synchronization import CapacityLimiter
+from ._core._tasks import CancelScope, fail_after
+from .abc import ByteReceiveStream, ByteSendStream, Process
+from .lowlevel import RunVar, checkpoint_if_cancelled
+from .streams.buffered import BufferedByteReceiveStream
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+WORKER_MAX_IDLE_TIME = 300 # 5 minutes
+
+T_Retval = TypeVar("T_Retval")
+PosArgsT = TypeVarTuple("PosArgsT")
+
+_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
+_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
+ "_process_pool_idle_workers"
+)
+_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
+
+
+async def run_sync(
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ *args: Unpack[PosArgsT],
+ cancellable: bool = False,
+ limiter: CapacityLimiter | None = None,
+) -> T_Retval:
+ """
+ Call the given function with the given arguments in a worker process.
+
+ If the ``cancellable`` option is enabled and the task waiting for its completion is
+ cancelled, the worker process running it will be abruptly terminated using SIGKILL
+ (or ``terminateProcess()`` on Windows).
+
+ :param func: a callable
+ :param args: positional arguments for the callable
+ :param cancellable: ``True`` to allow cancellation of the operation while it's
+ running
+ :param limiter: capacity limiter to use to limit the total amount of processes
+ running (if omitted, the default limiter is used)
+ :return: an awaitable that yields the return value of the function.
+
+ """
+
+ async def send_raw_command(pickled_cmd: bytes) -> object:
+ try:
+ await stdin.send(pickled_cmd)
+ response = await buffered.receive_until(b"\n", 50)
+ status, length = response.split(b" ")
+ if status not in (b"RETURN", b"EXCEPTION"):
+ raise RuntimeError(
+ f"Worker process returned unexpected response: {response!r}"
+ )
+
+ pickled_response = await buffered.receive_exactly(int(length))
+ except BaseException as exc:
+ workers.discard(process)
+ try:
+ process.kill()
+ with CancelScope(shield=True):
+ await process.aclose()
+ except ProcessLookupError:
+ pass
+
+ if isinstance(exc, get_cancelled_exc_class()):
+ raise
+ else:
+ raise BrokenWorkerProcess from exc
+
+ retval = pickle.loads(pickled_response)
+ if status == b"EXCEPTION":
+ assert isinstance(retval, BaseException)
+ raise retval
+ else:
+ return retval
+
+ # First pickle the request before trying to reserve a worker process
+ await checkpoint_if_cancelled()
+ request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
+
+ # If this is the first run in this event loop thread, set up the necessary variables
+ try:
+ workers = _process_pool_workers.get()
+ idle_workers = _process_pool_idle_workers.get()
+ except LookupError:
+ workers = set()
+ idle_workers = deque()
+ _process_pool_workers.set(workers)
+ _process_pool_idle_workers.set(idle_workers)
+ get_async_backend().setup_process_pool_exit_at_shutdown(workers)
+
+ async with limiter or current_default_process_limiter():
+ # Pop processes from the pool (starting from the most recently used) until we
+ # find one that hasn't exited yet
+ process: Process
+ while idle_workers:
+ process, idle_since = idle_workers.pop()
+ if process.returncode is None:
+ stdin = cast(ByteSendStream, process.stdin)
+ buffered = BufferedByteReceiveStream(
+ cast(ByteReceiveStream, process.stdout)
+ )
+
+ # Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
+ # seconds or longer
+ now = current_time()
+ killed_processes: list[Process] = []
+ while idle_workers:
+ if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
+ break
+
+ process_to_kill, idle_since = idle_workers.popleft()
+ process_to_kill.kill()
+ workers.remove(process_to_kill)
+ killed_processes.append(process_to_kill)
+
+ with CancelScope(shield=True):
+ for killed_process in killed_processes:
+ await killed_process.aclose()
+
+ break
+
+ workers.remove(process)
+ else:
+ command = [sys.executable, "-u", "-m", __name__]
+ process = await open_process(
+ command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
+ )
+ try:
+ stdin = cast(ByteSendStream, process.stdin)
+ buffered = BufferedByteReceiveStream(
+ cast(ByteReceiveStream, process.stdout)
+ )
+ with fail_after(20):
+ message = await buffered.receive(6)
+
+ if message != b"READY\n":
+ raise BrokenWorkerProcess(
+ f"Worker process returned unexpected response: {message!r}"
+ )
+
+ main_module_path = getattr(sys.modules["__main__"], "__file__", None)
+ pickled = pickle.dumps(
+ ("init", sys.path, main_module_path),
+ protocol=pickle.HIGHEST_PROTOCOL,
+ )
+ await send_raw_command(pickled)
+ except (BrokenWorkerProcess, get_cancelled_exc_class()):
+ raise
+ except BaseException as exc:
+ process.kill()
+ raise BrokenWorkerProcess(
+ "Error during worker process initialization"
+ ) from exc
+
+ workers.add(process)
+
+ with CancelScope(shield=not cancellable):
+ try:
+ return cast(T_Retval, await send_raw_command(request))
+ finally:
+ if process in workers:
+ idle_workers.append((process, current_time()))
+
+
+def current_default_process_limiter() -> CapacityLimiter:
+ """
+ Return the capacity limiter that is used by default to limit the number of worker
+ processes.
+
+ :return: a capacity limiter object
+
+ """
+ try:
+ return _default_process_limiter.get()
+ except LookupError:
+ limiter = CapacityLimiter(os.cpu_count() or 2)
+ _default_process_limiter.set(limiter)
+ return limiter
+
+
+def process_worker() -> None:
+ # Redirect standard streams to os.devnull so that user code won't interfere with the
+ # parent-worker communication
+ stdin = sys.stdin
+ stdout = sys.stdout
+ sys.stdin = open(os.devnull)
+ sys.stdout = open(os.devnull, "w")
+
+ stdout.buffer.write(b"READY\n")
+ while True:
+ retval = exception = None
+ try:
+ command, *args = pickle.load(stdin.buffer)
+ except EOFError:
+ return
+ except BaseException as exc:
+ exception = exc
+ else:
+ if command == "run":
+ func, args = args
+ try:
+ retval = func(*args)
+ except BaseException as exc:
+ exception = exc
+ elif command == "init":
+ main_module_path: str | None
+ sys.path, main_module_path = args
+ del sys.modules["__main__"]
+ if main_module_path:
+ # Load the parent's main module but as __mp_main__ instead of
+ # __main__ (like multiprocessing does) to avoid infinite recursion
+ try:
+ spec = spec_from_file_location("__mp_main__", main_module_path)
+ if spec and spec.loader:
+ main = module_from_spec(spec)
+ spec.loader.exec_module(main)
+ sys.modules["__main__"] = main
+ except BaseException as exc:
+ exception = exc
+
+ try:
+ if exception is not None:
+ status = b"EXCEPTION"
+ pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
+ else:
+ status = b"RETURN"
+ pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
+ except BaseException as exc:
+ exception = exc
+ status = b"EXCEPTION"
+ pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
+
+ stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
+ stdout.buffer.write(pickled)
+
+ # Respect SIGTERM
+ if isinstance(exception, SystemExit):
+ raise exception
+
+
+if __name__ == "__main__":
+ process_worker()
diff --git a/venv/lib/python3.11/site-packages/anyio/to_thread.py b/venv/lib/python3.11/site-packages/anyio/to_thread.py
new file mode 100644
index 0000000..5070516
--- /dev/null
+++ b/venv/lib/python3.11/site-packages/anyio/to_thread.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+import sys
+from collections.abc import Callable
+from typing import TypeVar
+from warnings import warn
+
+from ._core._eventloop import get_async_backend
+from .abc import CapacityLimiter
+
+if sys.version_info >= (3, 11):
+ from typing import TypeVarTuple, Unpack
+else:
+ from typing_extensions import TypeVarTuple, Unpack
+
+T_Retval = TypeVar("T_Retval")
+PosArgsT = TypeVarTuple("PosArgsT")
+
+
+async def run_sync(
+ func: Callable[[Unpack[PosArgsT]], T_Retval],
+ *args: Unpack[PosArgsT],
+ abandon_on_cancel: bool = False,
+ cancellable: bool | None = None,
+ limiter: CapacityLimiter | None = None,
+) -> T_Retval:
+ """
+ Call the given function with the given arguments in a worker thread.
+
+ If the ``cancellable`` option is enabled and the task waiting for its completion is
+ cancelled, the thread will still run its course but its return value (or any raised
+ exception) will be ignored.
+
+ :param func: a callable
+ :param args: positional arguments for the callable
+ :param abandon_on_cancel: ``True`` to abandon the thread (leaving it to run
+ unchecked on own) if the host task is cancelled, ``False`` to ignore
+ cancellations in the host task until the operation has completed in the worker
+ thread
+ :param cancellable: deprecated alias of ``abandon_on_cancel``; will override
+ ``abandon_on_cancel`` if both parameters are passed
+ :param limiter: capacity limiter to use to limit the total amount of threads running
+ (if omitted, the default limiter is used)
+ :return: an awaitable that yields the return value of the function.
+
+ """
+ if cancellable is not None:
+ abandon_on_cancel = cancellable
+ warn(
+ "The `cancellable=` keyword argument to `anyio.to_thread.run_sync` is "
+ "deprecated since AnyIO 4.1.0; use `abandon_on_cancel=` instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ return await get_async_backend().run_sync_in_worker_thread(
+ func, args, abandon_on_cancel=abandon_on_cancel, limiter=limiter
+ )
+
+
+def current_default_thread_limiter() -> CapacityLimiter:
+ """
+ Return the capacity limiter that is used by default to limit the number of
+ concurrent threads.
+
+ :return: a capacity limiter object
+
+ """
+ return get_async_backend().current_default_thread_limiter()