+
+# Click
+
+Click is a Python package for creating beautiful command line interfaces
+in a composable way with as little code as necessary. It's the "Command
+Line Interface Creation Kit". It's highly configurable but comes with
+sensible defaults out of the box.
+
+It aims to make the process of writing command line tools quick and fun
+while also preventing any frustration caused by the inability to
+implement an intended CLI API.
+
+Click in three points:
+
+- Arbitrary nesting of commands
+- Automatic help page generation
+- Supports lazy loading of subcommands at runtime
+
+
+## A Simple Example
+
+```python
+import click
+
+@click.command()
+@click.option("--count", default=1, help="Number of greetings.")
+@click.option("--name", prompt="Your name", help="The person to greet.")
+def hello(count, name):
+ """Simple program that greets NAME for a total of COUNT times."""
+ for _ in range(count):
+ click.echo(f"Hello, {name}!")
+
+if __name__ == '__main__':
+ hello()
+```
+
+```
+$ python hello.py --count=3
+Your name: Click
+Hello, Click!
+Hello, Click!
+Hello, Click!
+```
+
+
+## Donate
+
+The Pallets organization develops and supports Click and other popular
+packages. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, [please
+donate today][].
+
+[please donate today]: https://palletsprojects.com/donate
+
+## Contributing
+
+See our [detailed contributing documentation][contrib] for many ways to
+contribute, including reporting issues, requesting features, asking or answering
+questions, and making PRs.
+
+[contrib]: https://palletsprojects.com/contributing/
+
diff --git a/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/RECORD b/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/RECORD
new file mode 100644
index 0000000..9a1cb36
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/RECORD
@@ -0,0 +1,40 @@
+click-8.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+click-8.3.0.dist-info/METADATA,sha256=P6vpEHZ_MLBt4SO2eB-QaadcOdiznkzaZtJImRo7_V4,2621
+click-8.3.0.dist-info/RECORD,,
+click-8.3.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
+click-8.3.0.dist-info/licenses/LICENSE.txt,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475
+click/__init__.py,sha256=6YyS1aeyknZ0LYweWozNZy0A9nZ_11wmYIhv3cbQrYo,4473
+click/__pycache__/__init__.cpython-311.pyc,,
+click/__pycache__/_compat.cpython-311.pyc,,
+click/__pycache__/_termui_impl.cpython-311.pyc,,
+click/__pycache__/_textwrap.cpython-311.pyc,,
+click/__pycache__/_utils.cpython-311.pyc,,
+click/__pycache__/_winconsole.cpython-311.pyc,,
+click/__pycache__/core.cpython-311.pyc,,
+click/__pycache__/decorators.cpython-311.pyc,,
+click/__pycache__/exceptions.cpython-311.pyc,,
+click/__pycache__/formatting.cpython-311.pyc,,
+click/__pycache__/globals.cpython-311.pyc,,
+click/__pycache__/parser.cpython-311.pyc,,
+click/__pycache__/shell_completion.cpython-311.pyc,,
+click/__pycache__/termui.cpython-311.pyc,,
+click/__pycache__/testing.cpython-311.pyc,,
+click/__pycache__/types.cpython-311.pyc,,
+click/__pycache__/utils.cpython-311.pyc,,
+click/_compat.py,sha256=v3xBZkFbvA1BXPRkFfBJc6-pIwPI7345m-kQEnpVAs4,18693
+click/_termui_impl.py,sha256=ktpAHyJtNkhyR-x64CQFD6xJQI11fTA3qg2AV3iCToU,26799
+click/_textwrap.py,sha256=BOae0RQ6vg3FkNgSJyOoGzG1meGMxJ_ukWVZKx_v-0o,1400
+click/_utils.py,sha256=kZwtTf5gMuCilJJceS2iTCvRvCY-0aN5rJq8gKw7p8g,943
+click/_winconsole.py,sha256=_vxUuUaxwBhoR0vUWCNuHY8VUefiMdCIyU2SXPqoF-A,8465
+click/core.py,sha256=1A5T8UoAXklIGPTJ83_DJbVi35ehtJS2FTkP_wQ7es0,128855
+click/decorators.py,sha256=5P7abhJtAQYp_KHgjUvhMv464ERwOzrv2enNknlwHyQ,18461
+click/exceptions.py,sha256=8utf8w6V5hJXMnO_ic1FNrtbwuEn1NUu1aDwV8UqnG4,9954
+click/formatting.py,sha256=RVfwwr0rwWNpgGr8NaHodPzkIr7_tUyVh_nDdanLMNc,9730
+click/globals.py,sha256=gM-Nh6A4M0HB_SgkaF5M4ncGGMDHc_flHXu9_oh4GEU,1923
+click/parser.py,sha256=Q31pH0FlQZEq-UXE_ABRzlygEfvxPTuZbWNh4xfXmzw,19010
+click/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+click/shell_completion.py,sha256=Cc4GQUFuWpfQBa9sF5qXeeYI7n3tI_1k6ZdSn4BZbT0,20994
+click/termui.py,sha256=vAYrKC2a7f_NfEIhAThEVYfa__ib5XQbTSCGtJlABRA,30847
+click/testing.py,sha256=EERbzcl1br0mW0qBS9EqkknfNfXB9WQEW0ELIpkvuSs,19102
+click/types.py,sha256=ek54BNSFwPKsqtfT7jsqcc4WHui8AIFVMKM4oVZIXhc,39927
+click/utils.py,sha256=gCUoewdAhA-QLBUUHxrLh4uj6m7T1WjZZMNPvR0I7YA,20257
diff --git a/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/WHEEL b/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/WHEEL
new file mode 100644
index 0000000..d8b9936
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.12.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/licenses/LICENSE.txt b/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/licenses/LICENSE.txt
new file mode 100644
index 0000000..d12a849
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click-8.3.0.dist-info/licenses/LICENSE.txt
@@ -0,0 +1,28 @@
+Copyright 2014 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/tapdown/lib/python3.11/site-packages/click/__init__.py b/tapdown/lib/python3.11/site-packages/click/__init__.py
new file mode 100644
index 0000000..1aa547c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/__init__.py
@@ -0,0 +1,123 @@
+"""
+Click is a simple Python module inspired by the stdlib optparse to make
+writing command line scripts fun. Unlike other modules, it's based
+around a simple API that does not come with too much magic and is
+composable.
+"""
+
+from __future__ import annotations
+
+from .core import Argument as Argument
+from .core import Command as Command
+from .core import CommandCollection as CommandCollection
+from .core import Context as Context
+from .core import Group as Group
+from .core import Option as Option
+from .core import Parameter as Parameter
+from .decorators import argument as argument
+from .decorators import command as command
+from .decorators import confirmation_option as confirmation_option
+from .decorators import group as group
+from .decorators import help_option as help_option
+from .decorators import make_pass_decorator as make_pass_decorator
+from .decorators import option as option
+from .decorators import pass_context as pass_context
+from .decorators import pass_obj as pass_obj
+from .decorators import password_option as password_option
+from .decorators import version_option as version_option
+from .exceptions import Abort as Abort
+from .exceptions import BadArgumentUsage as BadArgumentUsage
+from .exceptions import BadOptionUsage as BadOptionUsage
+from .exceptions import BadParameter as BadParameter
+from .exceptions import ClickException as ClickException
+from .exceptions import FileError as FileError
+from .exceptions import MissingParameter as MissingParameter
+from .exceptions import NoSuchOption as NoSuchOption
+from .exceptions import UsageError as UsageError
+from .formatting import HelpFormatter as HelpFormatter
+from .formatting import wrap_text as wrap_text
+from .globals import get_current_context as get_current_context
+from .termui import clear as clear
+from .termui import confirm as confirm
+from .termui import echo_via_pager as echo_via_pager
+from .termui import edit as edit
+from .termui import getchar as getchar
+from .termui import launch as launch
+from .termui import pause as pause
+from .termui import progressbar as progressbar
+from .termui import prompt as prompt
+from .termui import secho as secho
+from .termui import style as style
+from .termui import unstyle as unstyle
+from .types import BOOL as BOOL
+from .types import Choice as Choice
+from .types import DateTime as DateTime
+from .types import File as File
+from .types import FLOAT as FLOAT
+from .types import FloatRange as FloatRange
+from .types import INT as INT
+from .types import IntRange as IntRange
+from .types import ParamType as ParamType
+from .types import Path as Path
+from .types import STRING as STRING
+from .types import Tuple as Tuple
+from .types import UNPROCESSED as UNPROCESSED
+from .types import UUID as UUID
+from .utils import echo as echo
+from .utils import format_filename as format_filename
+from .utils import get_app_dir as get_app_dir
+from .utils import get_binary_stream as get_binary_stream
+from .utils import get_text_stream as get_text_stream
+from .utils import open_file as open_file
+
+
+def __getattr__(name: str) -> object:
+ import warnings
+
+ if name == "BaseCommand":
+ from .core import _BaseCommand
+
+ warnings.warn(
+ "'BaseCommand' is deprecated and will be removed in Click 9.0. Use"
+ " 'Command' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _BaseCommand
+
+ if name == "MultiCommand":
+ from .core import _MultiCommand
+
+ warnings.warn(
+ "'MultiCommand' is deprecated and will be removed in Click 9.0. Use"
+ " 'Group' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _MultiCommand
+
+ if name == "OptionParser":
+ from .parser import _OptionParser
+
+ warnings.warn(
+ "'OptionParser' is deprecated and will be removed in Click 9.0. The"
+ " old parser is available in 'optparse'.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _OptionParser
+
+ if name == "__version__":
+ import importlib.metadata
+ import warnings
+
+ warnings.warn(
+ "The '__version__' attribute is deprecated and will be removed in"
+ " Click 9.1. Use feature detection or"
+ " 'importlib.metadata.version(\"click\")' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return importlib.metadata.version("click")
+
+ raise AttributeError(name)
diff --git a/tapdown/lib/python3.11/site-packages/click/_compat.py b/tapdown/lib/python3.11/site-packages/click/_compat.py
new file mode 100644
index 0000000..f2726b9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/_compat.py
@@ -0,0 +1,622 @@
+from __future__ import annotations
+
+import codecs
+import collections.abc as cabc
+import io
+import os
+import re
+import sys
+import typing as t
+from types import TracebackType
+from weakref import WeakKeyDictionary
+
+CYGWIN = sys.platform.startswith("cygwin")
+WIN = sys.platform.startswith("win")
+auto_wrap_for_ansi: t.Callable[[t.TextIO], t.TextIO] | None = None
+_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
+
+
+def _make_text_stream(
+ stream: t.BinaryIO,
+ encoding: str | None,
+ errors: str | None,
+ force_readable: bool = False,
+ force_writable: bool = False,
+) -> t.TextIO:
+ if encoding is None:
+ encoding = get_best_encoding(stream)
+ if errors is None:
+ errors = "replace"
+ return _NonClosingTextIOWrapper(
+ stream,
+ encoding,
+ errors,
+ line_buffering=True,
+ force_readable=force_readable,
+ force_writable=force_writable,
+ )
+
+
+def is_ascii_encoding(encoding: str) -> bool:
+ """Checks if a given encoding is ascii."""
+ try:
+ return codecs.lookup(encoding).name == "ascii"
+ except LookupError:
+ return False
+
+
+def get_best_encoding(stream: t.IO[t.Any]) -> str:
+ """Returns the default stream encoding if not found."""
+ rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
+ if is_ascii_encoding(rv):
+ return "utf-8"
+ return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+ def __init__(
+ self,
+ stream: t.BinaryIO,
+ encoding: str | None,
+ errors: str | None,
+ force_readable: bool = False,
+ force_writable: bool = False,
+ **extra: t.Any,
+ ) -> None:
+ self._stream = stream = t.cast(
+ t.BinaryIO, _FixupStream(stream, force_readable, force_writable)
+ )
+ super().__init__(stream, encoding, errors, **extra)
+
+ def __del__(self) -> None:
+ try:
+ self.detach()
+ except Exception:
+ pass
+
+ def isatty(self) -> bool:
+ # https://bitbucket.org/pypy/pypy/issue/1803
+ return self._stream.isatty()
+
+
+class _FixupStream:
+ """The new io interface needs more from streams than streams
+ traditionally implement. As such, this fix-up code is necessary in
+ some circumstances.
+
+ The forcing of readable and writable flags are there because some tools
+ put badly patched objects on sys (one such offender are certain version
+ of jupyter notebook).
+ """
+
+ def __init__(
+ self,
+ stream: t.BinaryIO,
+ force_readable: bool = False,
+ force_writable: bool = False,
+ ):
+ self._stream = stream
+ self._force_readable = force_readable
+ self._force_writable = force_writable
+
+ def __getattr__(self, name: str) -> t.Any:
+ return getattr(self._stream, name)
+
+ def read1(self, size: int) -> bytes:
+ f = getattr(self._stream, "read1", None)
+
+ if f is not None:
+ return t.cast(bytes, f(size))
+
+ return self._stream.read(size)
+
+ def readable(self) -> bool:
+ if self._force_readable:
+ return True
+ x = getattr(self._stream, "readable", None)
+ if x is not None:
+ return t.cast(bool, x())
+ try:
+ self._stream.read(0)
+ except Exception:
+ return False
+ return True
+
+ def writable(self) -> bool:
+ if self._force_writable:
+ return True
+ x = getattr(self._stream, "writable", None)
+ if x is not None:
+ return t.cast(bool, x())
+ try:
+ self._stream.write(b"")
+ except Exception:
+ try:
+ self._stream.write(b"")
+ except Exception:
+ return False
+ return True
+
+ def seekable(self) -> bool:
+ x = getattr(self._stream, "seekable", None)
+ if x is not None:
+ return t.cast(bool, x())
+ try:
+ self._stream.seek(self._stream.tell())
+ except Exception:
+ return False
+ return True
+
+
+def _is_binary_reader(stream: t.IO[t.Any], default: bool = False) -> bool:
+ try:
+ return isinstance(stream.read(0), bytes)
+ except Exception:
+ return default
+ # This happens in some cases where the stream was already
+ # closed. In this case, we assume the default.
+
+
+def _is_binary_writer(stream: t.IO[t.Any], default: bool = False) -> bool:
+ try:
+ stream.write(b"")
+ except Exception:
+ try:
+ stream.write("")
+ return False
+ except Exception:
+ pass
+ return default
+ return True
+
+
+def _find_binary_reader(stream: t.IO[t.Any]) -> t.BinaryIO | None:
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_reader(stream, False):
+ return t.cast(t.BinaryIO, stream)
+
+ buf = getattr(stream, "buffer", None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_reader(buf, True):
+ return t.cast(t.BinaryIO, buf)
+
+ return None
+
+
+def _find_binary_writer(stream: t.IO[t.Any]) -> t.BinaryIO | None:
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_writer(stream, False):
+ return t.cast(t.BinaryIO, stream)
+
+ buf = getattr(stream, "buffer", None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_writer(buf, True):
+ return t.cast(t.BinaryIO, buf)
+
+ return None
+
+
+def _stream_is_misconfigured(stream: t.TextIO) -> bool:
+ """A stream is misconfigured if its encoding is ASCII."""
+ # If the stream does not have an encoding set, we assume it's set
+ # to ASCII. This appears to happen in certain unittest
+ # environments. It's not quite clear what the correct behavior is
+ # but this at least will force Click to recover somehow.
+ return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
+
+
+def _is_compat_stream_attr(stream: t.TextIO, attr: str, value: str | None) -> bool:
+ """A stream attribute is compatible if it is equal to the
+ desired value or the desired value is unset and the attribute
+ has a value.
+ """
+ stream_value = getattr(stream, attr, None)
+ return stream_value == value or (value is None and stream_value is not None)
+
+
+def _is_compatible_text_stream(
+ stream: t.TextIO, encoding: str | None, errors: str | None
+) -> bool:
+ """Check if a stream's encoding and errors attributes are
+ compatible with the desired values.
+ """
+ return _is_compat_stream_attr(
+ stream, "encoding", encoding
+ ) and _is_compat_stream_attr(stream, "errors", errors)
+
+
+def _force_correct_text_stream(
+ text_stream: t.IO[t.Any],
+ encoding: str | None,
+ errors: str | None,
+ is_binary: t.Callable[[t.IO[t.Any], bool], bool],
+ find_binary: t.Callable[[t.IO[t.Any]], t.BinaryIO | None],
+ force_readable: bool = False,
+ force_writable: bool = False,
+) -> t.TextIO:
+ if is_binary(text_stream, False):
+ binary_reader = t.cast(t.BinaryIO, text_stream)
+ else:
+ text_stream = t.cast(t.TextIO, text_stream)
+ # If the stream looks compatible, and won't default to a
+ # misconfigured ascii encoding, return it as-is.
+ if _is_compatible_text_stream(text_stream, encoding, errors) and not (
+ encoding is None and _stream_is_misconfigured(text_stream)
+ ):
+ return text_stream
+
+ # Otherwise, get the underlying binary reader.
+ possible_binary_reader = find_binary(text_stream)
+
+ # If that's not possible, silently use the original reader
+ # and get mojibake instead of exceptions.
+ if possible_binary_reader is None:
+ return text_stream
+
+ binary_reader = possible_binary_reader
+
+ # Default errors to replace instead of strict in order to get
+ # something that works.
+ if errors is None:
+ errors = "replace"
+
+ # Wrap the binary stream in a text stream with the correct
+ # encoding parameters.
+ return _make_text_stream(
+ binary_reader,
+ encoding,
+ errors,
+ force_readable=force_readable,
+ force_writable=force_writable,
+ )
+
+
+def _force_correct_text_reader(
+ text_reader: t.IO[t.Any],
+ encoding: str | None,
+ errors: str | None,
+ force_readable: bool = False,
+) -> t.TextIO:
+ return _force_correct_text_stream(
+ text_reader,
+ encoding,
+ errors,
+ _is_binary_reader,
+ _find_binary_reader,
+ force_readable=force_readable,
+ )
+
+
+def _force_correct_text_writer(
+ text_writer: t.IO[t.Any],
+ encoding: str | None,
+ errors: str | None,
+ force_writable: bool = False,
+) -> t.TextIO:
+ return _force_correct_text_stream(
+ text_writer,
+ encoding,
+ errors,
+ _is_binary_writer,
+ _find_binary_writer,
+ force_writable=force_writable,
+ )
+
+
+def get_binary_stdin() -> t.BinaryIO:
+ reader = _find_binary_reader(sys.stdin)
+ if reader is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
+ return reader
+
+
+def get_binary_stdout() -> t.BinaryIO:
+ writer = _find_binary_writer(sys.stdout)
+ if writer is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stdout.")
+ return writer
+
+
+def get_binary_stderr() -> t.BinaryIO:
+ writer = _find_binary_writer(sys.stderr)
+ if writer is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stderr.")
+ return writer
+
+
+def get_text_stdin(encoding: str | None = None, errors: str | None = None) -> t.TextIO:
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_reader(sys.stdin, encoding, errors, force_readable=True)
+
+
+def get_text_stdout(encoding: str | None = None, errors: str | None = None) -> t.TextIO:
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stdout, encoding, errors, force_writable=True)
+
+
+def get_text_stderr(encoding: str | None = None, errors: str | None = None) -> t.TextIO:
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(sys.stderr, encoding, errors, force_writable=True)
+
+
+def _wrap_io_open(
+ file: str | os.PathLike[str] | int,
+ mode: str,
+ encoding: str | None,
+ errors: str | None,
+) -> t.IO[t.Any]:
+ """Handles not passing ``encoding`` and ``errors`` in binary mode."""
+ if "b" in mode:
+ return open(file, mode)
+
+ return open(file, mode, encoding=encoding, errors=errors)
+
+
+def open_stream(
+ filename: str | os.PathLike[str],
+ mode: str = "r",
+ encoding: str | None = None,
+ errors: str | None = "strict",
+ atomic: bool = False,
+) -> tuple[t.IO[t.Any], bool]:
+ binary = "b" in mode
+ filename = os.fspath(filename)
+
+ # Standard streams first. These are simple because they ignore the
+ # atomic flag. Use fsdecode to handle Path("-").
+ if os.fsdecode(filename) == "-":
+ if any(m in mode for m in ["w", "a", "x"]):
+ if binary:
+ return get_binary_stdout(), False
+ return get_text_stdout(encoding=encoding, errors=errors), False
+ if binary:
+ return get_binary_stdin(), False
+ return get_text_stdin(encoding=encoding, errors=errors), False
+
+ # Non-atomic writes directly go out through the regular open functions.
+ if not atomic:
+ return _wrap_io_open(filename, mode, encoding, errors), True
+
+ # Some usability stuff for atomic writes
+ if "a" in mode:
+ raise ValueError(
+ "Appending to an existing file is not supported, because that"
+ " would involve an expensive `copy`-operation to a temporary"
+ " file. Open the file in normal `w`-mode and copy explicitly"
+ " if that's what you're after."
+ )
+ if "x" in mode:
+ raise ValueError("Use the `overwrite`-parameter instead.")
+ if "w" not in mode:
+ raise ValueError("Atomic writes only make sense with `w`-mode.")
+
+ # Atomic writes are more complicated. They work by opening a file
+ # as a proxy in the same folder and then using the fdopen
+ # functionality to wrap it in a Python file. Then we wrap it in an
+ # atomic file that moves the file over on close.
+ import errno
+ import random
+
+ try:
+ perm: int | None = os.stat(filename).st_mode
+ except OSError:
+ perm = None
+
+ flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
+
+ if binary:
+ flags |= getattr(os, "O_BINARY", 0)
+
+ while True:
+ tmp_filename = os.path.join(
+ os.path.dirname(filename),
+ f".__atomic-write{random.randrange(1 << 32):08x}",
+ )
+ try:
+ fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
+ break
+ except OSError as e:
+ if e.errno == errno.EEXIST or (
+ os.name == "nt"
+ and e.errno == errno.EACCES
+ and os.path.isdir(e.filename)
+ and os.access(e.filename, os.W_OK)
+ ):
+ continue
+ raise
+
+ if perm is not None:
+ os.chmod(tmp_filename, perm) # in case perm includes bits in umask
+
+ f = _wrap_io_open(fd, mode, encoding, errors)
+ af = _AtomicFile(f, tmp_filename, os.path.realpath(filename))
+ return t.cast(t.IO[t.Any], af), True
+
+
+class _AtomicFile:
+ def __init__(self, f: t.IO[t.Any], tmp_filename: str, real_filename: str) -> None:
+ self._f = f
+ self._tmp_filename = tmp_filename
+ self._real_filename = real_filename
+ self.closed = False
+
+ @property
+ def name(self) -> str:
+ return self._real_filename
+
+ def close(self, delete: bool = False) -> None:
+ if self.closed:
+ return
+ self._f.close()
+ os.replace(self._tmp_filename, self._real_filename)
+ self.closed = True
+
+ def __getattr__(self, name: str) -> t.Any:
+ return getattr(self._f, name)
+
+ def __enter__(self) -> _AtomicFile:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ self.close(delete=exc_type is not None)
+
+ def __repr__(self) -> str:
+ return repr(self._f)
+
+
+def strip_ansi(value: str) -> str:
+ return _ansi_re.sub("", value)
+
+
+def _is_jupyter_kernel_output(stream: t.IO[t.Any]) -> bool:
+ while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
+ stream = stream._stream
+
+ return stream.__class__.__module__.startswith("ipykernel.")
+
+
+def should_strip_ansi(
+ stream: t.IO[t.Any] | None = None, color: bool | None = None
+) -> bool:
+ if color is None:
+ if stream is None:
+ stream = sys.stdin
+ return not isatty(stream) and not _is_jupyter_kernel_output(stream)
+ return not color
+
+
+# On Windows, wrap the output streams with colorama to support ANSI
+# color codes.
+# NOTE: double check is needed so mypy does not analyze this on Linux
+if sys.platform.startswith("win") and WIN:
+ from ._winconsole import _get_windows_console_stream
+
+ def _get_argv_encoding() -> str:
+ import locale
+
+ return locale.getpreferredencoding()
+
+ _ansi_stream_wrappers: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
+
+ def auto_wrap_for_ansi(stream: t.TextIO, color: bool | None = None) -> t.TextIO:
+ """Support ANSI color and style codes on Windows by wrapping a
+ stream with colorama.
+ """
+ try:
+ cached = _ansi_stream_wrappers.get(stream)
+ except Exception:
+ cached = None
+
+ if cached is not None:
+ return cached
+
+ import colorama
+
+ strip = should_strip_ansi(stream, color)
+ ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+ rv = t.cast(t.TextIO, ansi_wrapper.stream)
+ _write = rv.write
+
+ def _safe_write(s: str) -> int:
+ try:
+ return _write(s)
+ except BaseException:
+ ansi_wrapper.reset_all()
+ raise
+
+ rv.write = _safe_write # type: ignore[method-assign]
+
+ try:
+ _ansi_stream_wrappers[stream] = rv
+ except Exception:
+ pass
+
+ return rv
+
+else:
+
+ def _get_argv_encoding() -> str:
+ return getattr(sys.stdin, "encoding", None) or sys.getfilesystemencoding()
+
+ def _get_windows_console_stream(
+ f: t.TextIO, encoding: str | None, errors: str | None
+ ) -> t.TextIO | None:
+ return None
+
+
+def term_len(x: str) -> int:
+ return len(strip_ansi(x))
+
+
+def isatty(stream: t.IO[t.Any]) -> bool:
+ try:
+ return stream.isatty()
+ except Exception:
+ return False
+
+
+def _make_cached_stream_func(
+ src_func: t.Callable[[], t.TextIO | None],
+ wrapper_func: t.Callable[[], t.TextIO],
+) -> t.Callable[[], t.TextIO | None]:
+ cache: cabc.MutableMapping[t.TextIO, t.TextIO] = WeakKeyDictionary()
+
+ def func() -> t.TextIO | None:
+ stream = src_func()
+
+ if stream is None:
+ return None
+
+ try:
+ rv = cache.get(stream)
+ except Exception:
+ rv = None
+ if rv is not None:
+ return rv
+ rv = wrapper_func()
+ try:
+ cache[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ return func
+
+
+_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams: cabc.Mapping[str, t.Callable[[], t.BinaryIO]] = {
+ "stdin": get_binary_stdin,
+ "stdout": get_binary_stdout,
+ "stderr": get_binary_stderr,
+}
+
+text_streams: cabc.Mapping[str, t.Callable[[str | None, str | None], t.TextIO]] = {
+ "stdin": get_text_stdin,
+ "stdout": get_text_stdout,
+ "stderr": get_text_stderr,
+}
diff --git a/tapdown/lib/python3.11/site-packages/click/_termui_impl.py b/tapdown/lib/python3.11/site-packages/click/_termui_impl.py
new file mode 100644
index 0000000..47f87b8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/_termui_impl.py
@@ -0,0 +1,847 @@
+"""
+This module contains implementations for the termui module. To keep the
+import time of Click down, some infrequently used functionality is
+placed in this module and only imported as needed.
+"""
+
+from __future__ import annotations
+
+import collections.abc as cabc
+import contextlib
+import math
+import os
+import shlex
+import sys
+import time
+import typing as t
+from gettext import gettext as _
+from io import StringIO
+from pathlib import Path
+from types import TracebackType
+
+from ._compat import _default_text_stdout
+from ._compat import CYGWIN
+from ._compat import get_best_encoding
+from ._compat import isatty
+from ._compat import open_stream
+from ._compat import strip_ansi
+from ._compat import term_len
+from ._compat import WIN
+from .exceptions import ClickException
+from .utils import echo
+
+V = t.TypeVar("V")
+
+if os.name == "nt":
+ BEFORE_BAR = "\r"
+ AFTER_BAR = "\n"
+else:
+ BEFORE_BAR = "\r\033[?25l"
+ AFTER_BAR = "\033[?25h\n"
+
+
+class ProgressBar(t.Generic[V]):
+ def __init__(
+ self,
+ iterable: cabc.Iterable[V] | None,
+ length: int | None = None,
+ fill_char: str = "#",
+ empty_char: str = " ",
+ bar_template: str = "%(bar)s",
+ info_sep: str = " ",
+ hidden: bool = False,
+ show_eta: bool = True,
+ show_percent: bool | None = None,
+ show_pos: bool = False,
+ item_show_func: t.Callable[[V | None], str | None] | None = None,
+ label: str | None = None,
+ file: t.TextIO | None = None,
+ color: bool | None = None,
+ update_min_steps: int = 1,
+ width: int = 30,
+ ) -> None:
+ self.fill_char = fill_char
+ self.empty_char = empty_char
+ self.bar_template = bar_template
+ self.info_sep = info_sep
+ self.hidden = hidden
+ self.show_eta = show_eta
+ self.show_percent = show_percent
+ self.show_pos = show_pos
+ self.item_show_func = item_show_func
+ self.label: str = label or ""
+
+ if file is None:
+ file = _default_text_stdout()
+
+ # There are no standard streams attached to write to. For example,
+ # pythonw on Windows.
+ if file is None:
+ file = StringIO()
+
+ self.file = file
+ self.color = color
+ self.update_min_steps = update_min_steps
+ self._completed_intervals = 0
+ self.width: int = width
+ self.autowidth: bool = width == 0
+
+ if length is None:
+ from operator import length_hint
+
+ length = length_hint(iterable, -1)
+
+ if length == -1:
+ length = None
+ if iterable is None:
+ if length is None:
+ raise TypeError("iterable or length is required")
+ iterable = t.cast("cabc.Iterable[V]", range(length))
+ self.iter: cabc.Iterable[V] = iter(iterable)
+ self.length = length
+ self.pos: int = 0
+ self.avg: list[float] = []
+ self.last_eta: float
+ self.start: float
+ self.start = self.last_eta = time.time()
+ self.eta_known: bool = False
+ self.finished: bool = False
+ self.max_width: int | None = None
+ self.entered: bool = False
+ self.current_item: V | None = None
+ self._is_atty = isatty(self.file)
+ self._last_line: str | None = None
+
+ def __enter__(self) -> ProgressBar[V]:
+ self.entered = True
+ self.render_progress()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ self.render_finish()
+
+ def __iter__(self) -> cabc.Iterator[V]:
+ if not self.entered:
+ raise RuntimeError("You need to use progress bars in a with block.")
+ self.render_progress()
+ return self.generator()
+
+ def __next__(self) -> V:
+ # Iteration is defined in terms of a generator function,
+ # returned by iter(self); use that to define next(). This works
+ # because `self.iter` is an iterable consumed by that generator,
+ # so it is re-entry safe. Calling `next(self.generator())`
+ # twice works and does "what you want".
+ return next(iter(self))
+
+ def render_finish(self) -> None:
+ if self.hidden or not self._is_atty:
+ return
+ self.file.write(AFTER_BAR)
+ self.file.flush()
+
+ @property
+ def pct(self) -> float:
+ if self.finished:
+ return 1.0
+ return min(self.pos / (float(self.length or 1) or 1), 1.0)
+
+ @property
+ def time_per_iteration(self) -> float:
+ if not self.avg:
+ return 0.0
+ return sum(self.avg) / float(len(self.avg))
+
+ @property
+ def eta(self) -> float:
+ if self.length is not None and not self.finished:
+ return self.time_per_iteration * (self.length - self.pos)
+ return 0.0
+
+ def format_eta(self) -> str:
+ if self.eta_known:
+ t = int(self.eta)
+ seconds = t % 60
+ t //= 60
+ minutes = t % 60
+ t //= 60
+ hours = t % 24
+ t //= 24
+ if t > 0:
+ return f"{t}d {hours:02}:{minutes:02}:{seconds:02}"
+ else:
+ return f"{hours:02}:{minutes:02}:{seconds:02}"
+ return ""
+
+ def format_pos(self) -> str:
+ pos = str(self.pos)
+ if self.length is not None:
+ pos += f"/{self.length}"
+ return pos
+
+ def format_pct(self) -> str:
+ return f"{int(self.pct * 100): 4}%"[1:]
+
+ def format_bar(self) -> str:
+ if self.length is not None:
+ bar_length = int(self.pct * self.width)
+ bar = self.fill_char * bar_length
+ bar += self.empty_char * (self.width - bar_length)
+ elif self.finished:
+ bar = self.fill_char * self.width
+ else:
+ chars = list(self.empty_char * (self.width or 1))
+ if self.time_per_iteration != 0:
+ chars[
+ int(
+ (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
+ * self.width
+ )
+ ] = self.fill_char
+ bar = "".join(chars)
+ return bar
+
+ def format_progress_line(self) -> str:
+ show_percent = self.show_percent
+
+ info_bits = []
+ if self.length is not None and show_percent is None:
+ show_percent = not self.show_pos
+
+ if self.show_pos:
+ info_bits.append(self.format_pos())
+ if show_percent:
+ info_bits.append(self.format_pct())
+ if self.show_eta and self.eta_known and not self.finished:
+ info_bits.append(self.format_eta())
+ if self.item_show_func is not None:
+ item_info = self.item_show_func(self.current_item)
+ if item_info is not None:
+ info_bits.append(item_info)
+
+ return (
+ self.bar_template
+ % {
+ "label": self.label,
+ "bar": self.format_bar(),
+ "info": self.info_sep.join(info_bits),
+ }
+ ).rstrip()
+
+ def render_progress(self) -> None:
+ if self.hidden:
+ return
+
+ if not self._is_atty:
+ # Only output the label once if the output is not a TTY.
+ if self._last_line != self.label:
+ self._last_line = self.label
+ echo(self.label, file=self.file, color=self.color)
+ return
+
+ buf = []
+ # Update width in case the terminal has been resized
+ if self.autowidth:
+ import shutil
+
+ old_width = self.width
+ self.width = 0
+ clutter_length = term_len(self.format_progress_line())
+ new_width = max(0, shutil.get_terminal_size().columns - clutter_length)
+ if new_width < old_width and self.max_width is not None:
+ buf.append(BEFORE_BAR)
+ buf.append(" " * self.max_width)
+ self.max_width = new_width
+ self.width = new_width
+
+ clear_width = self.width
+ if self.max_width is not None:
+ clear_width = self.max_width
+
+ buf.append(BEFORE_BAR)
+ line = self.format_progress_line()
+ line_len = term_len(line)
+ if self.max_width is None or self.max_width < line_len:
+ self.max_width = line_len
+
+ buf.append(line)
+ buf.append(" " * (clear_width - line_len))
+ line = "".join(buf)
+ # Render the line only if it changed.
+
+ if line != self._last_line:
+ self._last_line = line
+ echo(line, file=self.file, color=self.color, nl=False)
+ self.file.flush()
+
+ def make_step(self, n_steps: int) -> None:
+ self.pos += n_steps
+ if self.length is not None and self.pos >= self.length:
+ self.finished = True
+
+ if (time.time() - self.last_eta) < 1.0:
+ return
+
+ self.last_eta = time.time()
+
+ # self.avg is a rolling list of length <= 7 of steps where steps are
+ # defined as time elapsed divided by the total progress through
+ # self.length.
+ if self.pos:
+ step = (time.time() - self.start) / self.pos
+ else:
+ step = time.time() - self.start
+
+ self.avg = self.avg[-6:] + [step]
+
+ self.eta_known = self.length is not None
+
+ def update(self, n_steps: int, current_item: V | None = None) -> None:
+ """Update the progress bar by advancing a specified number of
+ steps, and optionally set the ``current_item`` for this new
+ position.
+
+ :param n_steps: Number of steps to advance.
+ :param current_item: Optional item to set as ``current_item``
+ for the updated position.
+
+ .. versionchanged:: 8.0
+ Added the ``current_item`` optional parameter.
+
+ .. versionchanged:: 8.0
+ Only render when the number of steps meets the
+ ``update_min_steps`` threshold.
+ """
+ if current_item is not None:
+ self.current_item = current_item
+
+ self._completed_intervals += n_steps
+
+ if self._completed_intervals >= self.update_min_steps:
+ self.make_step(self._completed_intervals)
+ self.render_progress()
+ self._completed_intervals = 0
+
+ def finish(self) -> None:
+ self.eta_known = False
+ self.current_item = None
+ self.finished = True
+
+ def generator(self) -> cabc.Iterator[V]:
+ """Return a generator which yields the items added to the bar
+ during construction, and updates the progress bar *after* the
+ yielded block returns.
+ """
+ # WARNING: the iterator interface for `ProgressBar` relies on
+ # this and only works because this is a simple generator which
+ # doesn't create or manage additional state. If this function
+ # changes, the impact should be evaluated both against
+ # `iter(bar)` and `next(bar)`. `next()` in particular may call
+ # `self.generator()` repeatedly, and this must remain safe in
+ # order for that interface to work.
+ if not self.entered:
+ raise RuntimeError("You need to use progress bars in a with block.")
+
+ if not self._is_atty:
+ yield from self.iter
+ else:
+ for rv in self.iter:
+ self.current_item = rv
+
+ # This allows show_item_func to be updated before the
+ # item is processed. Only trigger at the beginning of
+ # the update interval.
+ if self._completed_intervals == 0:
+ self.render_progress()
+
+ yield rv
+ self.update(1)
+
+ self.finish()
+ self.render_progress()
+
+
+def pager(generator: cabc.Iterable[str], color: bool | None = None) -> None:
+ """Decide what method to use for paging through text."""
+ stdout = _default_text_stdout()
+
+ # There are no standard streams attached to write to. For example,
+ # pythonw on Windows.
+ if stdout is None:
+ stdout = StringIO()
+
+ if not isatty(sys.stdin) or not isatty(stdout):
+ return _nullpager(stdout, generator, color)
+
+ # Split and normalize the pager command into parts.
+ pager_cmd_parts = shlex.split(os.environ.get("PAGER", ""), posix=False)
+ if pager_cmd_parts:
+ if WIN:
+ if _tempfilepager(generator, pager_cmd_parts, color):
+ return
+ elif _pipepager(generator, pager_cmd_parts, color):
+ return
+
+ if os.environ.get("TERM") in ("dumb", "emacs"):
+ return _nullpager(stdout, generator, color)
+ if (WIN or sys.platform.startswith("os2")) and _tempfilepager(
+ generator, ["more"], color
+ ):
+ return
+ if _pipepager(generator, ["less"], color):
+ return
+
+ import tempfile
+
+ fd, filename = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ if _pipepager(generator, ["more"], color):
+ return
+ return _nullpager(stdout, generator, color)
+ finally:
+ os.unlink(filename)
+
+
+def _pipepager(
+ generator: cabc.Iterable[str], cmd_parts: list[str], color: bool | None
+) -> bool:
+ """Page through text by feeding it to another program. Invoking a
+ pager through this might support colors.
+
+ Returns `True` if the command was found, `False` otherwise and thus another
+ pager should be attempted.
+ """
+ # Split the command into the invoked CLI and its parameters.
+ if not cmd_parts:
+ return False
+
+ import shutil
+
+ cmd = cmd_parts[0]
+ cmd_params = cmd_parts[1:]
+
+ cmd_filepath = shutil.which(cmd)
+ if not cmd_filepath:
+ return False
+ # Resolves symlinks and produces a normalized absolute path string.
+ cmd_path = Path(cmd_filepath).resolve()
+ cmd_name = cmd_path.name
+
+ import subprocess
+
+ # Make a local copy of the environment to not affect the global one.
+ env = dict(os.environ)
+
+ # If we're piping to less and the user hasn't decided on colors, we enable
+ # them by default we find the -R flag in the command line arguments.
+ if color is None and cmd_name == "less":
+ less_flags = f"{os.environ.get('LESS', '')}{' '.join(cmd_params)}"
+ if not less_flags:
+ env["LESS"] = "-R"
+ color = True
+ elif "r" in less_flags or "R" in less_flags:
+ color = True
+
+ c = subprocess.Popen(
+ [str(cmd_path)] + cmd_params,
+ shell=True,
+ stdin=subprocess.PIPE,
+ env=env,
+ errors="replace",
+ text=True,
+ )
+ assert c.stdin is not None
+ try:
+ for text in generator:
+ if not color:
+ text = strip_ansi(text)
+
+ c.stdin.write(text)
+ except BrokenPipeError:
+ # In case the pager exited unexpectedly, ignore the broken pipe error.
+ pass
+ except Exception as e:
+ # In case there is an exception we want to close the pager immediately
+ # and let the caller handle it.
+ # Otherwise the pager will keep running, and the user may not notice
+ # the error message, or worse yet it may leave the terminal in a broken state.
+ c.terminate()
+ raise e
+ finally:
+ # We must close stdin and wait for the pager to exit before we continue
+ try:
+ c.stdin.close()
+ # Close implies flush, so it might throw a BrokenPipeError if the pager
+ # process exited already.
+ except BrokenPipeError:
+ pass
+
+ # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+ # search or other commands inside less).
+ #
+ # That means when the user hits ^C, the parent process (click) terminates,
+ # but less is still alive, paging the output and messing up the terminal.
+ #
+ # If the user wants to make the pager exit on ^C, they should set
+ # `LESS='-K'`. It's not our decision to make.
+ while True:
+ try:
+ c.wait()
+ except KeyboardInterrupt:
+ pass
+ else:
+ break
+
+ return True
+
+
+def _tempfilepager(
+ generator: cabc.Iterable[str], cmd_parts: list[str], color: bool | None
+) -> bool:
+ """Page through text by invoking a program on a temporary file.
+
+ Returns `True` if the command was found, `False` otherwise and thus another
+ pager should be attempted.
+ """
+ # Split the command into the invoked CLI and its parameters.
+ if not cmd_parts:
+ return False
+
+ import shutil
+
+ cmd = cmd_parts[0]
+
+ cmd_filepath = shutil.which(cmd)
+ if not cmd_filepath:
+ return False
+ # Resolves symlinks and produces a normalized absolute path string.
+ cmd_path = Path(cmd_filepath).resolve()
+
+ import subprocess
+ import tempfile
+
+ fd, filename = tempfile.mkstemp()
+ # TODO: This never terminates if the passed generator never terminates.
+ text = "".join(generator)
+ if not color:
+ text = strip_ansi(text)
+ encoding = get_best_encoding(sys.stdout)
+ with open_stream(filename, "wb")[0] as f:
+ f.write(text.encode(encoding))
+ try:
+ subprocess.call([str(cmd_path), filename])
+ except OSError:
+ # Command not found
+ pass
+ finally:
+ os.close(fd)
+ os.unlink(filename)
+
+ return True
+
+
+def _nullpager(
+ stream: t.TextIO, generator: cabc.Iterable[str], color: bool | None
+) -> None:
+ """Simply print unformatted text. This is the ultimate fallback."""
+ for text in generator:
+ if not color:
+ text = strip_ansi(text)
+ stream.write(text)
+
+
+class Editor:
+ def __init__(
+ self,
+ editor: str | None = None,
+ env: cabc.Mapping[str, str] | None = None,
+ require_save: bool = True,
+ extension: str = ".txt",
+ ) -> None:
+ self.editor = editor
+ self.env = env
+ self.require_save = require_save
+ self.extension = extension
+
+ def get_editor(self) -> str:
+ if self.editor is not None:
+ return self.editor
+ for key in "VISUAL", "EDITOR":
+ rv = os.environ.get(key)
+ if rv:
+ return rv
+ if WIN:
+ return "notepad"
+
+ from shutil import which
+
+ for editor in "sensible-editor", "vim", "nano":
+ if which(editor) is not None:
+ return editor
+ return "vi"
+
+ def edit_files(self, filenames: cabc.Iterable[str]) -> None:
+ import subprocess
+
+ editor = self.get_editor()
+ environ: dict[str, str] | None = None
+
+ if self.env:
+ environ = os.environ.copy()
+ environ.update(self.env)
+
+ exc_filename = " ".join(f'"{filename}"' for filename in filenames)
+
+ try:
+ c = subprocess.Popen(
+ args=f"{editor} {exc_filename}", env=environ, shell=True
+ )
+ exit_code = c.wait()
+ if exit_code != 0:
+ raise ClickException(
+ _("{editor}: Editing failed").format(editor=editor)
+ )
+ except OSError as e:
+ raise ClickException(
+ _("{editor}: Editing failed: {e}").format(editor=editor, e=e)
+ ) from e
+
+ @t.overload
+ def edit(self, text: bytes | bytearray) -> bytes | None: ...
+
+ # We cannot know whether or not the type expected is str or bytes when None
+ # is passed, so str is returned as that was what was done before.
+ @t.overload
+ def edit(self, text: str | None) -> str | None: ...
+
+ def edit(self, text: str | bytes | bytearray | None) -> str | bytes | None:
+ import tempfile
+
+ if text is None:
+ data: bytes | bytearray = b""
+ elif isinstance(text, (bytes, bytearray)):
+ data = text
+ else:
+ if text and not text.endswith("\n"):
+ text += "\n"
+
+ if WIN:
+ data = text.replace("\n", "\r\n").encode("utf-8-sig")
+ else:
+ data = text.encode("utf-8")
+
+ fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
+ f: t.BinaryIO
+
+ try:
+ with os.fdopen(fd, "wb") as f:
+ f.write(data)
+
+ # If the filesystem resolution is 1 second, like Mac OS
+ # 10.12 Extended, or 2 seconds, like FAT32, and the editor
+ # closes very fast, require_save can fail. Set the modified
+ # time to be 2 seconds in the past to work around this.
+ os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))
+ # Depending on the resolution, the exact value might not be
+ # recorded, so get the new recorded value.
+ timestamp = os.path.getmtime(name)
+
+ self.edit_files((name,))
+
+ if self.require_save and os.path.getmtime(name) == timestamp:
+ return None
+
+ with open(name, "rb") as f:
+ rv = f.read()
+
+ if isinstance(text, (bytes, bytearray)):
+ return rv
+
+ return rv.decode("utf-8-sig").replace("\r\n", "\n")
+ finally:
+ os.unlink(name)
+
+
+def open_url(url: str, wait: bool = False, locate: bool = False) -> int:
+ import subprocess
+
+ def _unquote_file(url: str) -> str:
+ from urllib.parse import unquote
+
+ if url.startswith("file://"):
+ url = unquote(url[7:])
+
+ return url
+
+ if sys.platform == "darwin":
+ args = ["open"]
+ if wait:
+ args.append("-W")
+ if locate:
+ args.append("-R")
+ args.append(_unquote_file(url))
+ null = open("/dev/null", "w")
+ try:
+ return subprocess.Popen(args, stderr=null).wait()
+ finally:
+ null.close()
+ elif WIN:
+ if locate:
+ url = _unquote_file(url)
+ args = ["explorer", f"/select,{url}"]
+ else:
+ args = ["start"]
+ if wait:
+ args.append("/WAIT")
+ args.append("")
+ args.append(url)
+ try:
+ return subprocess.call(args)
+ except OSError:
+ # Command not found
+ return 127
+ elif CYGWIN:
+ if locate:
+ url = _unquote_file(url)
+ args = ["cygstart", os.path.dirname(url)]
+ else:
+ args = ["cygstart"]
+ if wait:
+ args.append("-w")
+ args.append(url)
+ try:
+ return subprocess.call(args)
+ except OSError:
+ # Command not found
+ return 127
+
+ try:
+ if locate:
+ url = os.path.dirname(_unquote_file(url)) or "."
+ else:
+ url = _unquote_file(url)
+ c = subprocess.Popen(["xdg-open", url])
+ if wait:
+ return c.wait()
+ return 0
+ except OSError:
+ if url.startswith(("http://", "https://")) and not locate and not wait:
+ import webbrowser
+
+ webbrowser.open(url)
+ return 0
+ return 1
+
+
+def _translate_ch_to_exc(ch: str) -> None:
+ if ch == "\x03":
+ raise KeyboardInterrupt()
+
+ if ch == "\x04" and not WIN: # Unix-like, Ctrl+D
+ raise EOFError()
+
+ if ch == "\x1a" and WIN: # Windows, Ctrl+Z
+ raise EOFError()
+
+ return None
+
+
+if sys.platform == "win32":
+ import msvcrt
+
+ @contextlib.contextmanager
+ def raw_terminal() -> cabc.Iterator[int]:
+ yield -1
+
+ def getchar(echo: bool) -> str:
+ # The function `getch` will return a bytes object corresponding to
+ # the pressed character. Since Windows 10 build 1803, it will also
+ # return \x00 when called a second time after pressing a regular key.
+ #
+ # `getwch` does not share this probably-bugged behavior. Moreover, it
+ # returns a Unicode object by default, which is what we want.
+ #
+ # Either of these functions will return \x00 or \xe0 to indicate
+ # a special key, and you need to call the same function again to get
+ # the "rest" of the code. The fun part is that \u00e0 is
+ # "latin small letter a with grave", so if you type that on a French
+ # keyboard, you _also_ get a \xe0.
+ # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
+ # resulting Unicode string reads as "a with grave" + "capital H".
+ # This is indistinguishable from when the user actually types
+ # "a with grave" and then "capital H".
+ #
+ # When \xe0 is returned, we assume it's part of a special-key sequence
+ # and call `getwch` again, but that means that when the user types
+ # the \u00e0 character, `getchar` doesn't return until a second
+ # character is typed.
+ # The alternative is returning immediately, but that would mess up
+ # cross-platform handling of arrow keys and others that start with
+ # \xe0. Another option is using `getch`, but then we can't reliably
+ # read non-ASCII characters, because return values of `getch` are
+ # limited to the current 8-bit codepage.
+ #
+ # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
+ # is doing the right thing in more situations than with `getch`.
+
+ if echo:
+ func = t.cast(t.Callable[[], str], msvcrt.getwche)
+ else:
+ func = t.cast(t.Callable[[], str], msvcrt.getwch)
+
+ rv = func()
+
+ if rv in ("\x00", "\xe0"):
+ # \x00 and \xe0 are control characters that indicate special key,
+ # see above.
+ rv += func()
+
+ _translate_ch_to_exc(rv)
+ return rv
+
+else:
+ import termios
+ import tty
+
+ @contextlib.contextmanager
+ def raw_terminal() -> cabc.Iterator[int]:
+ f: t.TextIO | None
+ fd: int
+
+ if not isatty(sys.stdin):
+ f = open("/dev/tty")
+ fd = f.fileno()
+ else:
+ fd = sys.stdin.fileno()
+ f = None
+
+ try:
+ old_settings = termios.tcgetattr(fd)
+
+ try:
+ tty.setraw(fd)
+ yield fd
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ sys.stdout.flush()
+
+ if f is not None:
+ f.close()
+ except termios.error:
+ pass
+
+ def getchar(echo: bool) -> str:
+ with raw_terminal() as fd:
+ ch = os.read(fd, 32).decode(get_best_encoding(sys.stdin), "replace")
+
+ if echo and isatty(sys.stdout):
+ sys.stdout.write(ch)
+
+ _translate_ch_to_exc(ch)
+ return ch
diff --git a/tapdown/lib/python3.11/site-packages/click/_textwrap.py b/tapdown/lib/python3.11/site-packages/click/_textwrap.py
new file mode 100644
index 0000000..97fbee3
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/_textwrap.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+ def _handle_long_word(
+ self,
+ reversed_chunks: list[str],
+ cur_line: list[str],
+ cur_len: int,
+ width: int,
+ ) -> None:
+ space_left = max(width - cur_len, 1)
+
+ if self.break_long_words:
+ last = reversed_chunks[-1]
+ cut = last[:space_left]
+ res = last[space_left:]
+ cur_line.append(cut)
+ reversed_chunks[-1] = res
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ @contextmanager
+ def extra_indent(self, indent: str) -> cabc.Iterator[None]:
+ old_initial_indent = self.initial_indent
+ old_subsequent_indent = self.subsequent_indent
+ self.initial_indent += indent
+ self.subsequent_indent += indent
+
+ try:
+ yield
+ finally:
+ self.initial_indent = old_initial_indent
+ self.subsequent_indent = old_subsequent_indent
+
+ def indent_only(self, text: str) -> str:
+ rv = []
+
+ for idx, line in enumerate(text.splitlines()):
+ indent = self.initial_indent
+
+ if idx > 0:
+ indent = self.subsequent_indent
+
+ rv.append(f"{indent}{line}")
+
+ return "\n".join(rv)
diff --git a/tapdown/lib/python3.11/site-packages/click/_utils.py b/tapdown/lib/python3.11/site-packages/click/_utils.py
new file mode 100644
index 0000000..09fb008
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/_utils.py
@@ -0,0 +1,36 @@
+from __future__ import annotations
+
+import enum
+import typing as t
+
+
+class Sentinel(enum.Enum):
+ """Enum used to define sentinel values.
+
+ .. seealso::
+
+ `PEP 661 - Sentinel Values `_.
+ """
+
+ UNSET = object()
+ FLAG_NEEDS_VALUE = object()
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}.{self.name}"
+
+
+UNSET = Sentinel.UNSET
+"""Sentinel used to indicate that a value is not set."""
+
+FLAG_NEEDS_VALUE = Sentinel.FLAG_NEEDS_VALUE
+"""Sentinel used to indicate an option was passed as a flag without a
+value but is not a flag option.
+
+``Option.consume_value`` uses this to prompt or use the ``flag_value``.
+"""
+
+T_UNSET = t.Literal[UNSET] # type: ignore[valid-type]
+"""Type hint for the :data:`UNSET` sentinel value."""
+
+T_FLAG_NEEDS_VALUE = t.Literal[FLAG_NEEDS_VALUE] # type: ignore[valid-type]
+"""Type hint for the :data:`FLAG_NEEDS_VALUE` sentinel value."""
diff --git a/tapdown/lib/python3.11/site-packages/click/_winconsole.py b/tapdown/lib/python3.11/site-packages/click/_winconsole.py
new file mode 100644
index 0000000..e56c7c6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/_winconsole.py
@@ -0,0 +1,296 @@
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prompt.
+from __future__ import annotations
+
+import collections.abc as cabc
+import io
+import sys
+import time
+import typing as t
+from ctypes import Array
+from ctypes import byref
+from ctypes import c_char
+from ctypes import c_char_p
+from ctypes import c_int
+from ctypes import c_ssize_t
+from ctypes import c_ulong
+from ctypes import c_void_p
+from ctypes import POINTER
+from ctypes import py_object
+from ctypes import Structure
+from ctypes.wintypes import DWORD
+from ctypes.wintypes import HANDLE
+from ctypes.wintypes import LPCWSTR
+from ctypes.wintypes import LPWSTR
+
+from ._compat import _NonClosingTextIOWrapper
+
+assert sys.platform == "win32"
+import msvcrt # noqa: E402
+from ctypes import windll # noqa: E402
+from ctypes import WINFUNCTYPE # noqa: E402
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetConsoleMode = kernel32.GetConsoleMode
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+ ("CommandLineToArgvW", windll.shell32)
+)
+LocalFree = WINFUNCTYPE(c_void_p, c_void_p)(("LocalFree", windll.kernel32))
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b"\x1a"
+MAX_BYTES_WRITTEN = 32767
+
+if t.TYPE_CHECKING:
+ try:
+ # Using `typing_extensions.Buffer` instead of `collections.abc`
+ # on Windows for some reason does not have `Sized` implemented.
+ from collections.abc import Buffer # type: ignore
+ except ImportError:
+ from typing_extensions import Buffer
+
+try:
+ from ctypes import pythonapi
+except ImportError:
+ # On PyPy we cannot get buffers so our ability to operate here is
+ # severely limited.
+ get_buffer = None
+else:
+
+ class Py_buffer(Structure):
+ _fields_ = [ # noqa: RUF012
+ ("buf", c_void_p),
+ ("obj", py_object),
+ ("len", c_ssize_t),
+ ("itemsize", c_ssize_t),
+ ("readonly", c_int),
+ ("ndim", c_int),
+ ("format", c_char_p),
+ ("shape", c_ssize_p),
+ ("strides", c_ssize_p),
+ ("suboffsets", c_ssize_p),
+ ("internal", c_void_p),
+ ]
+
+ PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+ PyBuffer_Release = pythonapi.PyBuffer_Release
+
+ def get_buffer(obj: Buffer, writable: bool = False) -> Array[c_char]:
+ buf = Py_buffer()
+ flags: int = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+ PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+
+ try:
+ buffer_type = c_char * buf.len
+ out: Array[c_char] = buffer_type.from_address(buf.buf)
+ return out
+ finally:
+ PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+ def __init__(self, handle: int | None) -> None:
+ self.handle = handle
+
+ def isatty(self) -> t.Literal[True]:
+ super().isatty()
+ return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+ def readable(self) -> t.Literal[True]:
+ return True
+
+ def readinto(self, b: Buffer) -> int:
+ bytes_to_be_read = len(b)
+ if not bytes_to_be_read:
+ return 0
+ elif bytes_to_be_read % 2:
+ raise ValueError(
+ "cannot read odd number of bytes from UTF-16-LE encoded console"
+ )
+
+ buffer = get_buffer(b, writable=True)
+ code_units_to_be_read = bytes_to_be_read // 2
+ code_units_read = c_ulong()
+
+ rv = ReadConsoleW(
+ HANDLE(self.handle),
+ buffer,
+ code_units_to_be_read,
+ byref(code_units_read),
+ None,
+ )
+ if GetLastError() == ERROR_OPERATION_ABORTED:
+ # wait for KeyboardInterrupt
+ time.sleep(0.1)
+ if not rv:
+ raise OSError(f"Windows error: {GetLastError()}")
+
+ if buffer[0] == EOF:
+ return 0
+ return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+ def writable(self) -> t.Literal[True]:
+ return True
+
+ @staticmethod
+ def _get_error_message(errno: int) -> str:
+ if errno == ERROR_SUCCESS:
+ return "ERROR_SUCCESS"
+ elif errno == ERROR_NOT_ENOUGH_MEMORY:
+ return "ERROR_NOT_ENOUGH_MEMORY"
+ return f"Windows error {errno}"
+
+ def write(self, b: Buffer) -> int:
+ bytes_to_be_written = len(b)
+ buf = get_buffer(b)
+ code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
+ code_units_written = c_ulong()
+
+ WriteConsoleW(
+ HANDLE(self.handle),
+ buf,
+ code_units_to_be_written,
+ byref(code_units_written),
+ None,
+ )
+ bytes_written = 2 * code_units_written.value
+
+ if bytes_written == 0 and bytes_to_be_written > 0:
+ raise OSError(self._get_error_message(GetLastError()))
+ return bytes_written
+
+
+class ConsoleStream:
+ def __init__(self, text_stream: t.TextIO, byte_stream: t.BinaryIO) -> None:
+ self._text_stream = text_stream
+ self.buffer = byte_stream
+
+ @property
+ def name(self) -> str:
+ return self.buffer.name
+
+ def write(self, x: t.AnyStr) -> int:
+ if isinstance(x, str):
+ return self._text_stream.write(x)
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(x)
+
+ def writelines(self, lines: cabc.Iterable[t.AnyStr]) -> None:
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name: str) -> t.Any:
+ return getattr(self._text_stream, name)
+
+ def isatty(self) -> bool:
+ return self.buffer.isatty()
+
+ def __repr__(self) -> str:
+ return f""
+
+
+def _get_text_stdin(buffer_stream: t.BinaryIO) -> t.TextIO:
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
+
+
+def _get_text_stdout(buffer_stream: t.BinaryIO) -> t.TextIO:
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
+
+
+def _get_text_stderr(buffer_stream: t.BinaryIO) -> t.TextIO:
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return t.cast(t.TextIO, ConsoleStream(text_stream, buffer_stream))
+
+
+_stream_factories: cabc.Mapping[int, t.Callable[[t.BinaryIO], t.TextIO]] = {
+ 0: _get_text_stdin,
+ 1: _get_text_stdout,
+ 2: _get_text_stderr,
+}
+
+
+def _is_console(f: t.TextIO) -> bool:
+ if not hasattr(f, "fileno"):
+ return False
+
+ try:
+ fileno = f.fileno()
+ except (OSError, io.UnsupportedOperation):
+ return False
+
+ handle = msvcrt.get_osfhandle(fileno)
+ return bool(GetConsoleMode(handle, byref(DWORD())))
+
+
+def _get_windows_console_stream(
+ f: t.TextIO, encoding: str | None, errors: str | None
+) -> t.TextIO | None:
+ if (
+ get_buffer is None
+ or encoding not in {"utf-16-le", None}
+ or errors not in {"strict", None}
+ or not _is_console(f)
+ ):
+ return None
+
+ func = _stream_factories.get(f.fileno())
+ if func is None:
+ return None
+
+ b = getattr(f, "buffer", None)
+
+ if b is None:
+ return None
+
+ return func(b)
diff --git a/tapdown/lib/python3.11/site-packages/click/core.py b/tapdown/lib/python3.11/site-packages/click/core.py
new file mode 100644
index 0000000..ff2f74a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/core.py
@@ -0,0 +1,3347 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import enum
+import errno
+import inspect
+import os
+import sys
+import typing as t
+from collections import abc
+from collections import Counter
+from contextlib import AbstractContextManager
+from contextlib import contextmanager
+from contextlib import ExitStack
+from functools import update_wrapper
+from gettext import gettext as _
+from gettext import ngettext
+from itertools import repeat
+from types import TracebackType
+
+from . import types
+from ._utils import FLAG_NEEDS_VALUE
+from ._utils import UNSET
+from .exceptions import Abort
+from .exceptions import BadParameter
+from .exceptions import ClickException
+from .exceptions import Exit
+from .exceptions import MissingParameter
+from .exceptions import NoArgsIsHelpError
+from .exceptions import UsageError
+from .formatting import HelpFormatter
+from .formatting import join_options
+from .globals import pop_context
+from .globals import push_context
+from .parser import _OptionParser
+from .parser import _split_opt
+from .termui import confirm
+from .termui import prompt
+from .termui import style
+from .utils import _detect_program_name
+from .utils import _expand_args
+from .utils import echo
+from .utils import make_default_short_help
+from .utils import make_str
+from .utils import PacifyFlushWrapper
+
+if t.TYPE_CHECKING:
+ from .shell_completion import CompletionItem
+
+F = t.TypeVar("F", bound="t.Callable[..., t.Any]")
+V = t.TypeVar("V")
+
+
+def _complete_visible_commands(
+ ctx: Context, incomplete: str
+) -> cabc.Iterator[tuple[str, Command]]:
+ """List all the subcommands of a group that start with the
+ incomplete value and aren't hidden.
+
+ :param ctx: Invocation context for the group.
+ :param incomplete: Value being completed. May be empty.
+ """
+ multi = t.cast(Group, ctx.command)
+
+ for name in multi.list_commands(ctx):
+ if name.startswith(incomplete):
+ command = multi.get_command(ctx, name)
+
+ if command is not None and not command.hidden:
+ yield name, command
+
+
+def _check_nested_chain(
+ base_command: Group, cmd_name: str, cmd: Command, register: bool = False
+) -> None:
+ if not base_command.chain or not isinstance(cmd, Group):
+ return
+
+ if register:
+ message = (
+ f"It is not possible to add the group {cmd_name!r} to another"
+ f" group {base_command.name!r} that is in chain mode."
+ )
+ else:
+ message = (
+ f"Found the group {cmd_name!r} as subcommand to another group "
+ f" {base_command.name!r} that is in chain mode. This is not supported."
+ )
+
+ raise RuntimeError(message)
+
+
+def batch(iterable: cabc.Iterable[V], batch_size: int) -> list[tuple[V, ...]]:
+ return list(zip(*repeat(iter(iterable), batch_size), strict=False))
+
+
+@contextmanager
+def augment_usage_errors(
+ ctx: Context, param: Parameter | None = None
+) -> cabc.Iterator[None]:
+ """Context manager that attaches extra information to exceptions."""
+ try:
+ yield
+ except BadParameter as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ if param is not None and e.param is None:
+ e.param = param
+ raise
+ except UsageError as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ raise
+
+
+def iter_params_for_processing(
+ invocation_order: cabc.Sequence[Parameter],
+ declaration_order: cabc.Sequence[Parameter],
+) -> list[Parameter]:
+ """Returns all declared parameters in the order they should be processed.
+
+ The declared parameters are re-shuffled depending on the order in which
+ they were invoked, as well as the eagerness of each parameters.
+
+ The invocation order takes precedence over the declaration order. I.e. the
+ order in which the user provided them to the CLI is respected.
+
+ This behavior and its effect on callback evaluation is detailed at:
+ https://click.palletsprojects.com/en/stable/advanced/#callback-evaluation-order
+ """
+
+ def sort_key(item: Parameter) -> tuple[bool, float]:
+ try:
+ idx: float = invocation_order.index(item)
+ except ValueError:
+ idx = float("inf")
+
+ return not item.is_eager, idx
+
+ return sorted(declaration_order, key=sort_key)
+
+
+class ParameterSource(enum.Enum):
+ """This is an :class:`~enum.Enum` that indicates the source of a
+ parameter's value.
+
+ Use :meth:`click.Context.get_parameter_source` to get the
+ source for a parameter by name.
+
+ .. versionchanged:: 8.0
+ Use :class:`~enum.Enum` and drop the ``validate`` method.
+
+ .. versionchanged:: 8.0
+ Added the ``PROMPT`` value.
+ """
+
+ COMMANDLINE = enum.auto()
+ """The value was provided by the command line args."""
+ ENVIRONMENT = enum.auto()
+ """The value was provided with an environment variable."""
+ DEFAULT = enum.auto()
+ """Used the default specified by the parameter."""
+ DEFAULT_MAP = enum.auto()
+ """Used a default provided by :attr:`Context.default_map`."""
+ PROMPT = enum.auto()
+ """Used a prompt to confirm a default or provide a value."""
+
+
+class Context:
+ """The context is a special internal object that holds state relevant
+ for the script execution at every single level. It's normally invisible
+ to commands unless they opt-in to getting access to it.
+
+ The context is useful as it can pass internal objects around and can
+ control special execution features such as reading data from
+ environment variables.
+
+ A context can be used as context manager in which case it will call
+ :meth:`close` on teardown.
+
+ :param command: the command class for this context.
+ :param parent: the parent context.
+ :param info_name: the info name for this invocation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it is usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param obj: an arbitrary object of user data.
+ :param auto_envvar_prefix: the prefix to use for automatic environment
+ variables. If this is `None` then reading
+ from environment variables is disabled. This
+ does not affect manually set environment
+ variables which are always read.
+ :param default_map: a dictionary (like object) with default values
+ for parameters.
+ :param terminal_width: the width of the terminal. The default is
+ inherit from parent context. If no context
+ defines the terminal width then auto
+ detection will be applied.
+ :param max_content_width: the maximum width for content rendered by
+ Click (this currently only affects help
+ pages). This defaults to 80 characters if
+ not overridden. In other words: even if the
+ terminal is larger than that, Click will not
+ format things wider than 80 characters by
+ default. In addition to that, formatters might
+ add some safety mapping on the right.
+ :param resilient_parsing: if this flag is enabled then Click will
+ parse without any interactivity or callback
+ invocation. Default values will also be
+ ignored. This is useful for implementing
+ things such as completion support.
+ :param allow_extra_args: if this is set to `True` then extra arguments
+ at the end will not raise an error and will be
+ kept on the context. The default is to inherit
+ from the command.
+ :param allow_interspersed_args: if this is set to `False` then options
+ and arguments cannot be mixed. The
+ default is to inherit from the command.
+ :param ignore_unknown_options: instructs click to ignore options it does
+ not know and keeps them for later
+ processing.
+ :param help_option_names: optionally a list of strings that define how
+ the default help parameter is named. The
+ default is ``['--help']``.
+ :param token_normalize_func: an optional function that is used to
+ normalize tokens (options, choices,
+ etc.). This for instance can be used to
+ implement case insensitive behavior.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are used in texts that Click prints which is by
+ default not the case. This for instance would affect
+ help output.
+ :param show_default: Show the default value for commands. If this
+ value is not set, it defaults to the value from the parent
+ context. ``Command.show_default`` overrides this default for the
+ specific command.
+
+ .. versionchanged:: 8.2
+ The ``protected_args`` attribute is deprecated and will be removed in
+ Click 9.0. ``args`` will contain remaining unparsed tokens.
+
+ .. versionchanged:: 8.1
+ The ``show_default`` parameter is overridden by
+ ``Command.show_default``, instead of the other way around.
+
+ .. versionchanged:: 8.0
+ The ``show_default`` parameter defaults to the value from the
+ parent context.
+
+ .. versionchanged:: 7.1
+ Added the ``show_default`` parameter.
+
+ .. versionchanged:: 4.0
+ Added the ``color``, ``ignore_unknown_options``, and
+ ``max_content_width`` parameters.
+
+ .. versionchanged:: 3.0
+ Added the ``allow_extra_args`` and ``allow_interspersed_args``
+ parameters.
+
+ .. versionchanged:: 2.0
+ Added the ``resilient_parsing``, ``help_option_names``, and
+ ``token_normalize_func`` parameters.
+ """
+
+ #: The formatter class to create with :meth:`make_formatter`.
+ #:
+ #: .. versionadded:: 8.0
+ formatter_class: type[HelpFormatter] = HelpFormatter
+
+ def __init__(
+ self,
+ command: Command,
+ parent: Context | None = None,
+ info_name: str | None = None,
+ obj: t.Any | None = None,
+ auto_envvar_prefix: str | None = None,
+ default_map: cabc.MutableMapping[str, t.Any] | None = None,
+ terminal_width: int | None = None,
+ max_content_width: int | None = None,
+ resilient_parsing: bool = False,
+ allow_extra_args: bool | None = None,
+ allow_interspersed_args: bool | None = None,
+ ignore_unknown_options: bool | None = None,
+ help_option_names: list[str] | None = None,
+ token_normalize_func: t.Callable[[str], str] | None = None,
+ color: bool | None = None,
+ show_default: bool | None = None,
+ ) -> None:
+ #: the parent context or `None` if none exists.
+ self.parent = parent
+ #: the :class:`Command` for this context.
+ self.command = command
+ #: the descriptive information name
+ self.info_name = info_name
+ #: Map of parameter names to their parsed values. Parameters
+ #: with ``expose_value=False`` are not stored.
+ self.params: dict[str, t.Any] = {}
+ #: the leftover arguments.
+ self.args: list[str] = []
+ #: protected arguments. These are arguments that are prepended
+ #: to `args` when certain parsing scenarios are encountered but
+ #: must be never propagated to another arguments. This is used
+ #: to implement nested parsing.
+ self._protected_args: list[str] = []
+ #: the collected prefixes of the command's options.
+ self._opt_prefixes: set[str] = set(parent._opt_prefixes) if parent else set()
+
+ if obj is None and parent is not None:
+ obj = parent.obj
+
+ #: the user object stored.
+ self.obj: t.Any = obj
+ self._meta: dict[str, t.Any] = getattr(parent, "meta", {})
+
+ #: A dictionary (-like object) with defaults for parameters.
+ if (
+ default_map is None
+ and info_name is not None
+ and parent is not None
+ and parent.default_map is not None
+ ):
+ default_map = parent.default_map.get(info_name)
+
+ self.default_map: cabc.MutableMapping[str, t.Any] | None = default_map
+
+ #: This flag indicates if a subcommand is going to be executed. A
+ #: group callback can use this information to figure out if it's
+ #: being executed directly or because the execution flow passes
+ #: onwards to a subcommand. By default it's None, but it can be
+ #: the name of the subcommand to execute.
+ #:
+ #: If chaining is enabled this will be set to ``'*'`` in case
+ #: any commands are executed. It is however not possible to
+ #: figure out which ones. If you require this knowledge you
+ #: should use a :func:`result_callback`.
+ self.invoked_subcommand: str | None = None
+
+ if terminal_width is None and parent is not None:
+ terminal_width = parent.terminal_width
+
+ #: The width of the terminal (None is autodetection).
+ self.terminal_width: int | None = terminal_width
+
+ if max_content_width is None and parent is not None:
+ max_content_width = parent.max_content_width
+
+ #: The maximum width of formatted content (None implies a sensible
+ #: default which is 80 for most things).
+ self.max_content_width: int | None = max_content_width
+
+ if allow_extra_args is None:
+ allow_extra_args = command.allow_extra_args
+
+ #: Indicates if the context allows extra args or if it should
+ #: fail on parsing.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_extra_args = allow_extra_args
+
+ if allow_interspersed_args is None:
+ allow_interspersed_args = command.allow_interspersed_args
+
+ #: Indicates if the context allows mixing of arguments and
+ #: options or not.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_interspersed_args: bool = allow_interspersed_args
+
+ if ignore_unknown_options is None:
+ ignore_unknown_options = command.ignore_unknown_options
+
+ #: Instructs click to ignore options that a command does not
+ #: understand and will store it on the context for later
+ #: processing. This is primarily useful for situations where you
+ #: want to call into external programs. Generally this pattern is
+ #: strongly discouraged because it's not possibly to losslessly
+ #: forward all arguments.
+ #:
+ #: .. versionadded:: 4.0
+ self.ignore_unknown_options: bool = ignore_unknown_options
+
+ if help_option_names is None:
+ if parent is not None:
+ help_option_names = parent.help_option_names
+ else:
+ help_option_names = ["--help"]
+
+ #: The names for the help options.
+ self.help_option_names: list[str] = help_option_names
+
+ if token_normalize_func is None and parent is not None:
+ token_normalize_func = parent.token_normalize_func
+
+ #: An optional normalization function for tokens. This is
+ #: options, choices, commands etc.
+ self.token_normalize_func: t.Callable[[str], str] | None = token_normalize_func
+
+ #: Indicates if resilient parsing is enabled. In that case Click
+ #: will do its best to not cause any failures and default values
+ #: will be ignored. Useful for completion.
+ self.resilient_parsing: bool = resilient_parsing
+
+ # If there is no envvar prefix yet, but the parent has one and
+ # the command on this level has a name, we can expand the envvar
+ # prefix automatically.
+ if auto_envvar_prefix is None:
+ if (
+ parent is not None
+ and parent.auto_envvar_prefix is not None
+ and self.info_name is not None
+ ):
+ auto_envvar_prefix = (
+ f"{parent.auto_envvar_prefix}_{self.info_name.upper()}"
+ )
+ else:
+ auto_envvar_prefix = auto_envvar_prefix.upper()
+
+ if auto_envvar_prefix is not None:
+ auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
+
+ self.auto_envvar_prefix: str | None = auto_envvar_prefix
+
+ if color is None and parent is not None:
+ color = parent.color
+
+ #: Controls if styling output is wanted or not.
+ self.color: bool | None = color
+
+ if show_default is None and parent is not None:
+ show_default = parent.show_default
+
+ #: Show option default values when formatting help text.
+ self.show_default: bool | None = show_default
+
+ self._close_callbacks: list[t.Callable[[], t.Any]] = []
+ self._depth = 0
+ self._parameter_source: dict[str, ParameterSource] = {}
+ self._exit_stack = ExitStack()
+
+ @property
+ def protected_args(self) -> list[str]:
+ import warnings
+
+ warnings.warn(
+ "'protected_args' is deprecated and will be removed in Click 9.0."
+ " 'args' will contain remaining unparsed tokens.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self._protected_args
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ """Gather information that could be useful for a tool generating
+ user-facing documentation. This traverses the entire CLI
+ structure.
+
+ .. code-block:: python
+
+ with Context(cli) as ctx:
+ info = ctx.to_info_dict()
+
+ .. versionadded:: 8.0
+ """
+ return {
+ "command": self.command.to_info_dict(self),
+ "info_name": self.info_name,
+ "allow_extra_args": self.allow_extra_args,
+ "allow_interspersed_args": self.allow_interspersed_args,
+ "ignore_unknown_options": self.ignore_unknown_options,
+ "auto_envvar_prefix": self.auto_envvar_prefix,
+ }
+
+ def __enter__(self) -> Context:
+ self._depth += 1
+ push_context(self)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> bool | None:
+ self._depth -= 1
+ exit_result: bool | None = None
+ if self._depth == 0:
+ exit_result = self._close_with_exception_info(exc_type, exc_value, tb)
+ pop_context()
+
+ return exit_result
+
+ @contextmanager
+ def scope(self, cleanup: bool = True) -> cabc.Iterator[Context]:
+ """This helper method can be used with the context object to promote
+ it to the current thread local (see :func:`get_current_context`).
+ The default behavior of this is to invoke the cleanup functions which
+ can be disabled by setting `cleanup` to `False`. The cleanup
+ functions are typically used for things such as closing file handles.
+
+ If the cleanup is intended the context object can also be directly
+ used as a context manager.
+
+ Example usage::
+
+ with ctx.scope():
+ assert get_current_context() is ctx
+
+ This is equivalent::
+
+ with ctx:
+ assert get_current_context() is ctx
+
+ .. versionadded:: 5.0
+
+ :param cleanup: controls if the cleanup functions should be run or
+ not. The default is to run these functions. In
+ some situations the context only wants to be
+ temporarily pushed in which case this can be disabled.
+ Nested pushes automatically defer the cleanup.
+ """
+ if not cleanup:
+ self._depth += 1
+ try:
+ with self as rv:
+ yield rv
+ finally:
+ if not cleanup:
+ self._depth -= 1
+
+ @property
+ def meta(self) -> dict[str, t.Any]:
+ """This is a dictionary which is shared with all the contexts
+ that are nested. It exists so that click utilities can store some
+ state here if they need to. It is however the responsibility of
+ that code to manage this dictionary well.
+
+ The keys are supposed to be unique dotted strings. For instance
+ module paths are a good choice for it. What is stored in there is
+ irrelevant for the operation of click. However what is important is
+ that code that places data here adheres to the general semantics of
+ the system.
+
+ Example usage::
+
+ LANG_KEY = f'{__name__}.lang'
+
+ def set_language(value):
+ ctx = get_current_context()
+ ctx.meta[LANG_KEY] = value
+
+ def get_language():
+ return get_current_context().meta.get(LANG_KEY, 'en_US')
+
+ .. versionadded:: 5.0
+ """
+ return self._meta
+
+ def make_formatter(self) -> HelpFormatter:
+ """Creates the :class:`~click.HelpFormatter` for the help and
+ usage output.
+
+ To quickly customize the formatter class used without overriding
+ this method, set the :attr:`formatter_class` attribute.
+
+ .. versionchanged:: 8.0
+ Added the :attr:`formatter_class` attribute.
+ """
+ return self.formatter_class(
+ width=self.terminal_width, max_width=self.max_content_width
+ )
+
+ def with_resource(self, context_manager: AbstractContextManager[V]) -> V:
+ """Register a resource as if it were used in a ``with``
+ statement. The resource will be cleaned up when the context is
+ popped.
+
+ Uses :meth:`contextlib.ExitStack.enter_context`. It calls the
+ resource's ``__enter__()`` method and returns the result. When
+ the context is popped, it closes the stack, which calls the
+ resource's ``__exit__()`` method.
+
+ To register a cleanup function for something that isn't a
+ context manager, use :meth:`call_on_close`. Or use something
+ from :mod:`contextlib` to turn it into a context manager first.
+
+ .. code-block:: python
+
+ @click.group()
+ @click.option("--name")
+ @click.pass_context
+ def cli(ctx):
+ ctx.obj = ctx.with_resource(connect_db(name))
+
+ :param context_manager: The context manager to enter.
+ :return: Whatever ``context_manager.__enter__()`` returns.
+
+ .. versionadded:: 8.0
+ """
+ return self._exit_stack.enter_context(context_manager)
+
+ def call_on_close(self, f: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]:
+ """Register a function to be called when the context tears down.
+
+ This can be used to close resources opened during the script
+ execution. Resources that support Python's context manager
+ protocol which would be used in a ``with`` statement should be
+ registered with :meth:`with_resource` instead.
+
+ :param f: The function to execute on teardown.
+ """
+ return self._exit_stack.callback(f)
+
+ def close(self) -> None:
+ """Invoke all close callbacks registered with
+ :meth:`call_on_close`, and exit all context managers entered
+ with :meth:`with_resource`.
+ """
+ self._close_with_exception_info(None, None, None)
+
+ def _close_with_exception_info(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> bool | None:
+ """Unwind the exit stack by calling its :meth:`__exit__` providing the exception
+ information to allow for exception handling by the various resources registered
+ using :meth;`with_resource`
+
+ :return: Whatever ``exit_stack.__exit__()`` returns.
+ """
+ exit_result = self._exit_stack.__exit__(exc_type, exc_value, tb)
+ # In case the context is reused, create a new exit stack.
+ self._exit_stack = ExitStack()
+
+ return exit_result
+
+ @property
+ def command_path(self) -> str:
+ """The computed command path. This is used for the ``usage``
+ information on the help page. It's automatically created by
+ combining the info names of the chain of contexts to the root.
+ """
+ rv = ""
+ if self.info_name is not None:
+ rv = self.info_name
+ if self.parent is not None:
+ parent_command_path = [self.parent.command_path]
+
+ if isinstance(self.parent.command, Command):
+ for param in self.parent.command.get_params(self):
+ parent_command_path.extend(param.get_usage_pieces(self))
+
+ rv = f"{' '.join(parent_command_path)} {rv}"
+ return rv.lstrip()
+
+ def find_root(self) -> Context:
+ """Finds the outermost context."""
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ def find_object(self, object_type: type[V]) -> V | None:
+ """Finds the closest object of a given type."""
+ node: Context | None = self
+
+ while node is not None:
+ if isinstance(node.obj, object_type):
+ return node.obj
+
+ node = node.parent
+
+ return None
+
+ def ensure_object(self, object_type: type[V]) -> V:
+ """Like :meth:`find_object` but sets the innermost object to a
+ new instance of `object_type` if it does not exist.
+ """
+ rv = self.find_object(object_type)
+ if rv is None:
+ self.obj = rv = object_type()
+ return rv
+
+ @t.overload
+ def lookup_default(
+ self, name: str, call: t.Literal[True] = True
+ ) -> t.Any | None: ...
+
+ @t.overload
+ def lookup_default(
+ self, name: str, call: t.Literal[False] = ...
+ ) -> t.Any | t.Callable[[], t.Any] | None: ...
+
+ def lookup_default(self, name: str, call: bool = True) -> t.Any | None:
+ """Get the default for a parameter from :attr:`default_map`.
+
+ :param name: Name of the parameter.
+ :param call: If the default is a callable, call it. Disable to
+ return the callable instead.
+
+ .. versionchanged:: 8.0
+ Added the ``call`` parameter.
+ """
+ if self.default_map is not None:
+ value = self.default_map.get(name, UNSET)
+
+ if call and callable(value):
+ return value()
+
+ return value
+
+ return UNSET
+
+ def fail(self, message: str) -> t.NoReturn:
+ """Aborts the execution of the program with a specific error
+ message.
+
+ :param message: the error message to fail with.
+ """
+ raise UsageError(message, self)
+
+ def abort(self) -> t.NoReturn:
+ """Aborts the script."""
+ raise Abort()
+
+ def exit(self, code: int = 0) -> t.NoReturn:
+ """Exits the application with a given exit code.
+
+ .. versionchanged:: 8.2
+ Callbacks and context managers registered with :meth:`call_on_close`
+ and :meth:`with_resource` are closed before exiting.
+ """
+ self.close()
+ raise Exit(code)
+
+ def get_usage(self) -> str:
+ """Helper method to get formatted usage string for the current
+ context and command.
+ """
+ return self.command.get_usage(self)
+
+ def get_help(self) -> str:
+ """Helper method to get formatted help page for the current
+ context and command.
+ """
+ return self.command.get_help(self)
+
+ def _make_sub_context(self, command: Command) -> Context:
+ """Create a new context of the same type as this context, but
+ for a new command.
+
+ :meta private:
+ """
+ return type(self)(command, info_name=command.name, parent=self)
+
+ @t.overload
+ def invoke(
+ self, callback: t.Callable[..., V], /, *args: t.Any, **kwargs: t.Any
+ ) -> V: ...
+
+ @t.overload
+ def invoke(self, callback: Command, /, *args: t.Any, **kwargs: t.Any) -> t.Any: ...
+
+ def invoke(
+ self, callback: Command | t.Callable[..., V], /, *args: t.Any, **kwargs: t.Any
+ ) -> t.Any | V:
+ """Invokes a command callback in exactly the way it expects. There
+ are two ways to invoke this method:
+
+ 1. the first argument can be a callback and all other arguments and
+ keyword arguments are forwarded directly to the function.
+ 2. the first argument is a click command object. In that case all
+ arguments are forwarded as well but proper click parameters
+ (options and click arguments) must be keyword arguments and Click
+ will fill in defaults.
+
+ .. versionchanged:: 8.0
+ All ``kwargs`` are tracked in :attr:`params` so they will be
+ passed if :meth:`forward` is called at multiple levels.
+
+ .. versionchanged:: 3.2
+ A new context is created, and missing arguments use default values.
+ """
+ if isinstance(callback, Command):
+ other_cmd = callback
+
+ if other_cmd.callback is None:
+ raise TypeError(
+ "The given command does not have a callback that can be invoked."
+ )
+ else:
+ callback = t.cast("t.Callable[..., V]", other_cmd.callback)
+
+ ctx = self._make_sub_context(other_cmd)
+
+ for param in other_cmd.params:
+ if param.name not in kwargs and param.expose_value:
+ kwargs[param.name] = param.type_cast_value( # type: ignore
+ ctx, param.get_default(ctx)
+ )
+
+ # Track all kwargs as params, so that forward() will pass
+ # them on in subsequent calls.
+ ctx.params.update(kwargs)
+ else:
+ ctx = self
+
+ with augment_usage_errors(self):
+ with ctx:
+ return callback(*args, **kwargs)
+
+ def forward(self, cmd: Command, /, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ """Similar to :meth:`invoke` but fills in default keyword
+ arguments from the current context if the other command expects
+ it. This cannot invoke callbacks directly, only other commands.
+
+ .. versionchanged:: 8.0
+ All ``kwargs`` are tracked in :attr:`params` so they will be
+ passed if ``forward`` is called at multiple levels.
+ """
+ # Can only forward to other commands, not direct callbacks.
+ if not isinstance(cmd, Command):
+ raise TypeError("Callback is not a command.")
+
+ for param in self.params:
+ if param not in kwargs:
+ kwargs[param] = self.params[param]
+
+ return self.invoke(cmd, *args, **kwargs)
+
+ def set_parameter_source(self, name: str, source: ParameterSource) -> None:
+ """Set the source of a parameter. This indicates the location
+ from which the value of the parameter was obtained.
+
+ :param name: The name of the parameter.
+ :param source: A member of :class:`~click.core.ParameterSource`.
+ """
+ self._parameter_source[name] = source
+
+ def get_parameter_source(self, name: str) -> ParameterSource | None:
+ """Get the source of a parameter. This indicates the location
+ from which the value of the parameter was obtained.
+
+ This can be useful for determining when a user specified a value
+ on the command line that is the same as the default value. It
+ will be :attr:`~click.core.ParameterSource.DEFAULT` only if the
+ value was actually taken from the default.
+
+ :param name: The name of the parameter.
+ :rtype: ParameterSource
+
+ .. versionchanged:: 8.0
+ Returns ``None`` if the parameter was not provided from any
+ source.
+ """
+ return self._parameter_source.get(name)
+
+
+class Command:
+ """Commands are the basic building block of command line interfaces in
+ Click. A basic command handles command line parsing and might dispatch
+ more parsing to commands nested below it.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ :param callback: the callback to invoke. This is optional.
+ :param params: the parameters to register with this command. This can
+ be either :class:`Option` or :class:`Argument` objects.
+ :param help: the help string to use for this command.
+ :param epilog: like the help string but it's printed at the end of the
+ help page after everything else.
+ :param short_help: the short help to use for this command. This is
+ shown on the command listing of the parent command.
+ :param add_help_option: by default each command registers a ``--help``
+ option. This can be disabled by this parameter.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is disabled by default.
+ If enabled this will add ``--help`` as argument
+ if no arguments are passed
+ :param hidden: hide this command from help outputs.
+ :param deprecated: If ``True`` or non-empty string, issues a message
+ indicating that the command is deprecated and highlights
+ its deprecation in --help. The message can be customized
+ by using a string as the value.
+
+ .. versionchanged:: 8.2
+ This is the base class for all commands, not ``BaseCommand``.
+ ``deprecated`` can be set to a string as well to customize the
+ deprecation message.
+
+ .. versionchanged:: 8.1
+ ``help``, ``epilog``, and ``short_help`` are stored unprocessed,
+ all formatting is done when outputting help text, not at init,
+ and is done even if not using the ``@command`` decorator.
+
+ .. versionchanged:: 8.0
+ Added a ``repr`` showing the command name.
+
+ .. versionchanged:: 7.1
+ Added the ``no_args_is_help`` parameter.
+
+ .. versionchanged:: 2.0
+ Added the ``context_settings`` parameter.
+ """
+
+ #: The context class to create with :meth:`make_context`.
+ #:
+ #: .. versionadded:: 8.0
+ context_class: type[Context] = Context
+
+ #: the default for the :attr:`Context.allow_extra_args` flag.
+ allow_extra_args = False
+
+ #: the default for the :attr:`Context.allow_interspersed_args` flag.
+ allow_interspersed_args = True
+
+ #: the default for the :attr:`Context.ignore_unknown_options` flag.
+ ignore_unknown_options = False
+
+ def __init__(
+ self,
+ name: str | None,
+ context_settings: cabc.MutableMapping[str, t.Any] | None = None,
+ callback: t.Callable[..., t.Any] | None = None,
+ params: list[Parameter] | None = None,
+ help: str | None = None,
+ epilog: str | None = None,
+ short_help: str | None = None,
+ options_metavar: str | None = "[OPTIONS]",
+ add_help_option: bool = True,
+ no_args_is_help: bool = False,
+ hidden: bool = False,
+ deprecated: bool | str = False,
+ ) -> None:
+ #: the name the command thinks it has. Upon registering a command
+ #: on a :class:`Group` the group will default the command name
+ #: with this information. You should instead use the
+ #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
+ self.name = name
+
+ if context_settings is None:
+ context_settings = {}
+
+ #: an optional dictionary with defaults passed to the context.
+ self.context_settings: cabc.MutableMapping[str, t.Any] = context_settings
+
+ #: the callback to execute when the command fires. This might be
+ #: `None` in which case nothing happens.
+ self.callback = callback
+ #: the list of parameters for this command in the order they
+ #: should show up in the help page and execute. Eager parameters
+ #: will automatically be handled before non eager ones.
+ self.params: list[Parameter] = params or []
+ self.help = help
+ self.epilog = epilog
+ self.options_metavar = options_metavar
+ self.short_help = short_help
+ self.add_help_option = add_help_option
+ self._help_option = None
+ self.no_args_is_help = no_args_is_help
+ self.hidden = hidden
+ self.deprecated = deprecated
+
+ def to_info_dict(self, ctx: Context) -> dict[str, t.Any]:
+ return {
+ "name": self.name,
+ "params": [param.to_info_dict() for param in self.get_params(ctx)],
+ "help": self.help,
+ "epilog": self.epilog,
+ "short_help": self.short_help,
+ "hidden": self.hidden,
+ "deprecated": self.deprecated,
+ }
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__} {self.name}>"
+
+ def get_usage(self, ctx: Context) -> str:
+ """Formats the usage line into a string and returns it.
+
+ Calls :meth:`format_usage` internally.
+ """
+ formatter = ctx.make_formatter()
+ self.format_usage(ctx, formatter)
+ return formatter.getvalue().rstrip("\n")
+
+ def get_params(self, ctx: Context) -> list[Parameter]:
+ params = self.params
+ help_option = self.get_help_option(ctx)
+
+ if help_option is not None:
+ params = [*params, help_option]
+
+ if __debug__:
+ import warnings
+
+ opts = [opt for param in params for opt in param.opts]
+ opts_counter = Counter(opts)
+ duplicate_opts = (opt for opt, count in opts_counter.items() if count > 1)
+
+ for duplicate_opt in duplicate_opts:
+ warnings.warn(
+ (
+ f"The parameter {duplicate_opt} is used more than once. "
+ "Remove its duplicate as parameters should be unique."
+ ),
+ stacklevel=3,
+ )
+
+ return params
+
+ def format_usage(self, ctx: Context, formatter: HelpFormatter) -> None:
+ """Writes the usage line into the formatter.
+
+ This is a low-level method called by :meth:`get_usage`.
+ """
+ pieces = self.collect_usage_pieces(ctx)
+ formatter.write_usage(ctx.command_path, " ".join(pieces))
+
+ def collect_usage_pieces(self, ctx: Context) -> list[str]:
+ """Returns all the pieces that go into the usage line and returns
+ it as a list of strings.
+ """
+ rv = [self.options_metavar] if self.options_metavar else []
+
+ for param in self.get_params(ctx):
+ rv.extend(param.get_usage_pieces(ctx))
+
+ return rv
+
+ def get_help_option_names(self, ctx: Context) -> list[str]:
+ """Returns the names for the help option."""
+ all_names = set(ctx.help_option_names)
+ for param in self.params:
+ all_names.difference_update(param.opts)
+ all_names.difference_update(param.secondary_opts)
+ return list(all_names)
+
+ def get_help_option(self, ctx: Context) -> Option | None:
+ """Returns the help option object.
+
+ Skipped if :attr:`add_help_option` is ``False``.
+
+ .. versionchanged:: 8.1.8
+ The help option is now cached to avoid creating it multiple times.
+ """
+ help_option_names = self.get_help_option_names(ctx)
+
+ if not help_option_names or not self.add_help_option:
+ return None
+
+ # Cache the help option object in private _help_option attribute to
+ # avoid creating it multiple times. Not doing this will break the
+ # callback odering by iter_params_for_processing(), which relies on
+ # object comparison.
+ if self._help_option is None:
+ # Avoid circular import.
+ from .decorators import help_option
+
+ # Apply help_option decorator and pop resulting option
+ help_option(*help_option_names)(self)
+ self._help_option = self.params.pop() # type: ignore[assignment]
+
+ return self._help_option
+
+ def make_parser(self, ctx: Context) -> _OptionParser:
+ """Creates the underlying option parser for this command."""
+ parser = _OptionParser(ctx)
+ for param in self.get_params(ctx):
+ param.add_to_parser(parser, ctx)
+ return parser
+
+ def get_help(self, ctx: Context) -> str:
+ """Formats the help into a string and returns it.
+
+ Calls :meth:`format_help` internally.
+ """
+ formatter = ctx.make_formatter()
+ self.format_help(ctx, formatter)
+ return formatter.getvalue().rstrip("\n")
+
+ def get_short_help_str(self, limit: int = 45) -> str:
+ """Gets short help for the command or makes it by shortening the
+ long help string.
+ """
+ if self.short_help:
+ text = inspect.cleandoc(self.short_help)
+ elif self.help:
+ text = make_default_short_help(self.help, limit)
+ else:
+ text = ""
+
+ if self.deprecated:
+ deprecated_message = (
+ f"(DEPRECATED: {self.deprecated})"
+ if isinstance(self.deprecated, str)
+ else "(DEPRECATED)"
+ )
+ text = _("{text} {deprecated_message}").format(
+ text=text, deprecated_message=deprecated_message
+ )
+
+ return text.strip()
+
+ def format_help(self, ctx: Context, formatter: HelpFormatter) -> None:
+ """Writes the help into the formatter if it exists.
+
+ This is a low-level method called by :meth:`get_help`.
+
+ This calls the following methods:
+
+ - :meth:`format_usage`
+ - :meth:`format_help_text`
+ - :meth:`format_options`
+ - :meth:`format_epilog`
+ """
+ self.format_usage(ctx, formatter)
+ self.format_help_text(ctx, formatter)
+ self.format_options(ctx, formatter)
+ self.format_epilog(ctx, formatter)
+
+ def format_help_text(self, ctx: Context, formatter: HelpFormatter) -> None:
+ """Writes the help text to the formatter if it exists."""
+ if self.help is not None:
+ # truncate the help text to the first form feed
+ text = inspect.cleandoc(self.help).partition("\f")[0]
+ else:
+ text = ""
+
+ if self.deprecated:
+ deprecated_message = (
+ f"(DEPRECATED: {self.deprecated})"
+ if isinstance(self.deprecated, str)
+ else "(DEPRECATED)"
+ )
+ text = _("{text} {deprecated_message}").format(
+ text=text, deprecated_message=deprecated_message
+ )
+
+ if text:
+ formatter.write_paragraph()
+
+ with formatter.indentation():
+ formatter.write_text(text)
+
+ def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
+ """Writes all the options into the formatter if they exist."""
+ opts = []
+ for param in self.get_params(ctx):
+ rv = param.get_help_record(ctx)
+ if rv is not None:
+ opts.append(rv)
+
+ if opts:
+ with formatter.section(_("Options")):
+ formatter.write_dl(opts)
+
+ def format_epilog(self, ctx: Context, formatter: HelpFormatter) -> None:
+ """Writes the epilog into the formatter if it exists."""
+ if self.epilog:
+ epilog = inspect.cleandoc(self.epilog)
+ formatter.write_paragraph()
+
+ with formatter.indentation():
+ formatter.write_text(epilog)
+
+ def make_context(
+ self,
+ info_name: str | None,
+ args: list[str],
+ parent: Context | None = None,
+ **extra: t.Any,
+ ) -> Context:
+ """This function when given an info name and arguments will kick
+ off the parsing and create a new :class:`Context`. It does not
+ invoke the actual command callback though.
+
+ To quickly customize the context class used without overriding
+ this method, set the :attr:`context_class` attribute.
+
+ :param info_name: the info name for this invocation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it's usually
+ the name of the script, for commands below it's
+ the name of the command.
+ :param args: the arguments to parse as list of strings.
+ :param parent: the parent context if available.
+ :param extra: extra keyword arguments forwarded to the context
+ constructor.
+
+ .. versionchanged:: 8.0
+ Added the :attr:`context_class` attribute.
+ """
+ for key, value in self.context_settings.items():
+ if key not in extra:
+ extra[key] = value
+
+ ctx = self.context_class(self, info_name=info_name, parent=parent, **extra)
+
+ with ctx.scope(cleanup=False):
+ self.parse_args(ctx, args)
+ return ctx
+
+ def parse_args(self, ctx: Context, args: list[str]) -> list[str]:
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ raise NoArgsIsHelpError(ctx)
+
+ parser = self.make_parser(ctx)
+ opts, args, param_order = parser.parse_args(args=args)
+
+ for param in iter_params_for_processing(param_order, self.get_params(ctx)):
+ _, args = param.handle_parse_result(ctx, opts, args)
+
+ if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
+ ctx.fail(
+ ngettext(
+ "Got unexpected extra argument ({args})",
+ "Got unexpected extra arguments ({args})",
+ len(args),
+ ).format(args=" ".join(map(str, args)))
+ )
+
+ ctx.args = args
+ ctx._opt_prefixes.update(parser._opt_prefixes)
+ return args
+
+ def invoke(self, ctx: Context) -> t.Any:
+ """Given a context, this invokes the attached callback (if it exists)
+ in the right way.
+ """
+ if self.deprecated:
+ extra_message = (
+ f" {self.deprecated}" if isinstance(self.deprecated, str) else ""
+ )
+ message = _(
+ "DeprecationWarning: The command {name!r} is deprecated.{extra_message}"
+ ).format(name=self.name, extra_message=extra_message)
+ echo(style(message, fg="red"), err=True)
+
+ if self.callback is not None:
+ return ctx.invoke(self.callback, **ctx.params)
+
+ def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]:
+ """Return a list of completions for the incomplete value. Looks
+ at the names of options and chained multi-commands.
+
+ Any command could be part of a chained multi-command, so sibling
+ commands are valid at any point during command completion.
+
+ :param ctx: Invocation context for this command.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ from click.shell_completion import CompletionItem
+
+ results: list[CompletionItem] = []
+
+ if incomplete and not incomplete[0].isalnum():
+ for param in self.get_params(ctx):
+ if (
+ not isinstance(param, Option)
+ or param.hidden
+ or (
+ not param.multiple
+ and ctx.get_parameter_source(param.name) # type: ignore
+ is ParameterSource.COMMANDLINE
+ )
+ ):
+ continue
+
+ results.extend(
+ CompletionItem(name, help=param.help)
+ for name in [*param.opts, *param.secondary_opts]
+ if name.startswith(incomplete)
+ )
+
+ while ctx.parent is not None:
+ ctx = ctx.parent
+
+ if isinstance(ctx.command, Group) and ctx.command.chain:
+ results.extend(
+ CompletionItem(name, help=command.get_short_help_str())
+ for name, command in _complete_visible_commands(ctx, incomplete)
+ if name not in ctx._protected_args
+ )
+
+ return results
+
+ @t.overload
+ def main(
+ self,
+ args: cabc.Sequence[str] | None = None,
+ prog_name: str | None = None,
+ complete_var: str | None = None,
+ standalone_mode: t.Literal[True] = True,
+ **extra: t.Any,
+ ) -> t.NoReturn: ...
+
+ @t.overload
+ def main(
+ self,
+ args: cabc.Sequence[str] | None = None,
+ prog_name: str | None = None,
+ complete_var: str | None = None,
+ standalone_mode: bool = ...,
+ **extra: t.Any,
+ ) -> t.Any: ...
+
+ def main(
+ self,
+ args: cabc.Sequence[str] | None = None,
+ prog_name: str | None = None,
+ complete_var: str | None = None,
+ standalone_mode: bool = True,
+ windows_expand_args: bool = True,
+ **extra: t.Any,
+ ) -> t.Any:
+ """This is the way to invoke a script with all the bells and
+ whistles as a command line application. This will always terminate
+ the application after a call. If this is not wanted, ``SystemExit``
+ needs to be caught.
+
+ This method is also available by directly calling the instance of
+ a :class:`Command`.
+
+ :param args: the arguments that should be used for parsing. If not
+ provided, ``sys.argv[1:]`` is used.
+ :param prog_name: the program name that should be used. By default
+ the program name is constructed by taking the file
+ name from ``sys.argv[0]``.
+ :param complete_var: the environment variable that controls the
+ bash completion support. The default is
+ ``"__COMPLETE"`` with prog_name in
+ uppercase.
+ :param standalone_mode: the default behavior is to invoke the script
+ in standalone mode. Click will then
+ handle exceptions and convert them into
+ error messages and the function will never
+ return but shut down the interpreter. If
+ this is set to `False` they will be
+ propagated to the caller and the return
+ value of this function is the return value
+ of :meth:`invoke`.
+ :param windows_expand_args: Expand glob patterns, user dir, and
+ env vars in command line args on Windows.
+ :param extra: extra keyword arguments are forwarded to the context
+ constructor. See :class:`Context` for more information.
+
+ .. versionchanged:: 8.0.1
+ Added the ``windows_expand_args`` parameter to allow
+ disabling command line arg expansion on Windows.
+
+ .. versionchanged:: 8.0
+ When taking arguments from ``sys.argv`` on Windows, glob
+ patterns, user dir, and env vars are expanded.
+
+ .. versionchanged:: 3.0
+ Added the ``standalone_mode`` parameter.
+ """
+ if args is None:
+ args = sys.argv[1:]
+
+ if os.name == "nt" and windows_expand_args:
+ args = _expand_args(args)
+ else:
+ args = list(args)
+
+ if prog_name is None:
+ prog_name = _detect_program_name()
+
+ # Process shell completion requests and exit early.
+ self._main_shell_completion(extra, prog_name, complete_var)
+
+ try:
+ try:
+ with self.make_context(prog_name, args, **extra) as ctx:
+ rv = self.invoke(ctx)
+ if not standalone_mode:
+ return rv
+ # it's not safe to `ctx.exit(rv)` here!
+ # note that `rv` may actually contain data like "1" which
+ # has obvious effects
+ # more subtle case: `rv=[None, None]` can come out of
+ # chained commands which all returned `None` -- so it's not
+ # even always obvious that `rv` indicates success/failure
+ # by its truthiness/falsiness
+ ctx.exit()
+ except (EOFError, KeyboardInterrupt) as e:
+ echo(file=sys.stderr)
+ raise Abort() from e
+ except ClickException as e:
+ if not standalone_mode:
+ raise
+ e.show()
+ sys.exit(e.exit_code)
+ except OSError as e:
+ if e.errno == errno.EPIPE:
+ sys.stdout = t.cast(t.TextIO, PacifyFlushWrapper(sys.stdout))
+ sys.stderr = t.cast(t.TextIO, PacifyFlushWrapper(sys.stderr))
+ sys.exit(1)
+ else:
+ raise
+ except Exit as e:
+ if standalone_mode:
+ sys.exit(e.exit_code)
+ else:
+ # in non-standalone mode, return the exit code
+ # note that this is only reached if `self.invoke` above raises
+ # an Exit explicitly -- thus bypassing the check there which
+ # would return its result
+ # the results of non-standalone execution may therefore be
+ # somewhat ambiguous: if there are codepaths which lead to
+ # `ctx.exit(1)` and to `return 1`, the caller won't be able to
+ # tell the difference between the two
+ return e.exit_code
+ except Abort:
+ if not standalone_mode:
+ raise
+ echo(_("Aborted!"), file=sys.stderr)
+ sys.exit(1)
+
+ def _main_shell_completion(
+ self,
+ ctx_args: cabc.MutableMapping[str, t.Any],
+ prog_name: str,
+ complete_var: str | None = None,
+ ) -> None:
+ """Check if the shell is asking for tab completion, process
+ that, then exit early. Called from :meth:`main` before the
+ program is invoked.
+
+ :param prog_name: Name of the executable in the shell.
+ :param complete_var: Name of the environment variable that holds
+ the completion instruction. Defaults to
+ ``_{PROG_NAME}_COMPLETE``.
+
+ .. versionchanged:: 8.2.0
+ Dots (``.``) in ``prog_name`` are replaced with underscores (``_``).
+ """
+ if complete_var is None:
+ complete_name = prog_name.replace("-", "_").replace(".", "_")
+ complete_var = f"_{complete_name}_COMPLETE".upper()
+
+ instruction = os.environ.get(complete_var)
+
+ if not instruction:
+ return
+
+ from .shell_completion import shell_complete
+
+ rv = shell_complete(self, ctx_args, prog_name, complete_var, instruction)
+ sys.exit(rv)
+
+ def __call__(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ """Alias for :meth:`main`."""
+ return self.main(*args, **kwargs)
+
+
+class _FakeSubclassCheck(type):
+ def __subclasscheck__(cls, subclass: type) -> bool:
+ return issubclass(subclass, cls.__bases__[0])
+
+ def __instancecheck__(cls, instance: t.Any) -> bool:
+ return isinstance(instance, cls.__bases__[0])
+
+
+class _BaseCommand(Command, metaclass=_FakeSubclassCheck):
+ """
+ .. deprecated:: 8.2
+ Will be removed in Click 9.0. Use ``Command`` instead.
+ """
+
+
+class Group(Command):
+ """A group is a command that nests other commands (or more groups).
+
+ :param name: The name of the group command.
+ :param commands: Map names to :class:`Command` objects. Can be a list, which
+ will use :attr:`Command.name` as the keys.
+ :param invoke_without_command: Invoke the group's callback even if a
+ subcommand is not given.
+ :param no_args_is_help: If no arguments are given, show the group's help and
+ exit. Defaults to the opposite of ``invoke_without_command``.
+ :param subcommand_metavar: How to represent the subcommand argument in help.
+ The default will represent whether ``chain`` is set or not.
+ :param chain: Allow passing more than one subcommand argument. After parsing
+ a command's arguments, if any arguments remain another command will be
+ matched, and so on.
+ :param result_callback: A function to call after the group's and
+ subcommand's callbacks. The value returned by the subcommand is passed.
+ If ``chain`` is enabled, the value will be a list of values returned by
+ all the commands. If ``invoke_without_command`` is enabled, the value
+ will be the value returned by the group's callback, or an empty list if
+ ``chain`` is enabled.
+ :param kwargs: Other arguments passed to :class:`Command`.
+
+ .. versionchanged:: 8.0
+ The ``commands`` argument can be a list of command objects.
+
+ .. versionchanged:: 8.2
+ Merged with and replaces the ``MultiCommand`` base class.
+ """
+
+ allow_extra_args = True
+ allow_interspersed_args = False
+
+ #: If set, this is used by the group's :meth:`command` decorator
+ #: as the default :class:`Command` class. This is useful to make all
+ #: subcommands use a custom command class.
+ #:
+ #: .. versionadded:: 8.0
+ command_class: type[Command] | None = None
+
+ #: If set, this is used by the group's :meth:`group` decorator
+ #: as the default :class:`Group` class. This is useful to make all
+ #: subgroups use a custom group class.
+ #:
+ #: If set to the special value :class:`type` (literally
+ #: ``group_class = type``), this group's class will be used as the
+ #: default class. This makes a custom group class continue to make
+ #: custom groups.
+ #:
+ #: .. versionadded:: 8.0
+ group_class: type[Group] | type[type] | None = None
+ # Literal[type] isn't valid, so use Type[type]
+
+ def __init__(
+ self,
+ name: str | None = None,
+ commands: cabc.MutableMapping[str, Command]
+ | cabc.Sequence[Command]
+ | None = None,
+ invoke_without_command: bool = False,
+ no_args_is_help: bool | None = None,
+ subcommand_metavar: str | None = None,
+ chain: bool = False,
+ result_callback: t.Callable[..., t.Any] | None = None,
+ **kwargs: t.Any,
+ ) -> None:
+ super().__init__(name, **kwargs)
+
+ if commands is None:
+ commands = {}
+ elif isinstance(commands, abc.Sequence):
+ commands = {c.name: c for c in commands if c.name is not None}
+
+ #: The registered subcommands by their exported names.
+ self.commands: cabc.MutableMapping[str, Command] = commands
+
+ if no_args_is_help is None:
+ no_args_is_help = not invoke_without_command
+
+ self.no_args_is_help = no_args_is_help
+ self.invoke_without_command = invoke_without_command
+
+ if subcommand_metavar is None:
+ if chain:
+ subcommand_metavar = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
+ else:
+ subcommand_metavar = "COMMAND [ARGS]..."
+
+ self.subcommand_metavar = subcommand_metavar
+ self.chain = chain
+ # The result callback that is stored. This can be set or
+ # overridden with the :func:`result_callback` decorator.
+ self._result_callback = result_callback
+
+ if self.chain:
+ for param in self.params:
+ if isinstance(param, Argument) and not param.required:
+ raise RuntimeError(
+ "A group in chain mode cannot have optional arguments."
+ )
+
+ def to_info_dict(self, ctx: Context) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict(ctx)
+ commands = {}
+
+ for name in self.list_commands(ctx):
+ command = self.get_command(ctx, name)
+
+ if command is None:
+ continue
+
+ sub_ctx = ctx._make_sub_context(command)
+
+ with sub_ctx.scope(cleanup=False):
+ commands[name] = command.to_info_dict(sub_ctx)
+
+ info_dict.update(commands=commands, chain=self.chain)
+ return info_dict
+
+ def add_command(self, cmd: Command, name: str | None = None) -> None:
+ """Registers another :class:`Command` with this group. If the name
+ is not provided, the name of the command is used.
+ """
+ name = name or cmd.name
+ if name is None:
+ raise TypeError("Command has no name.")
+ _check_nested_chain(self, name, cmd, register=True)
+ self.commands[name] = cmd
+
+ @t.overload
+ def command(self, __func: t.Callable[..., t.Any]) -> Command: ...
+
+ @t.overload
+ def command(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[t.Callable[..., t.Any]], Command]: ...
+
+ def command(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[t.Callable[..., t.Any]], Command] | Command:
+ """A shortcut decorator for declaring and attaching a command to
+ the group. This takes the same arguments as :func:`command` and
+ immediately registers the created command with this group by
+ calling :meth:`add_command`.
+
+ To customize the command class used, set the
+ :attr:`command_class` attribute.
+
+ .. versionchanged:: 8.1
+ This decorator can be applied without parentheses.
+
+ .. versionchanged:: 8.0
+ Added the :attr:`command_class` attribute.
+ """
+ from .decorators import command
+
+ func: t.Callable[..., t.Any] | None = None
+
+ if args and callable(args[0]):
+ assert len(args) == 1 and not kwargs, (
+ "Use 'command(**kwargs)(callable)' to provide arguments."
+ )
+ (func,) = args
+ args = ()
+
+ if self.command_class and kwargs.get("cls") is None:
+ kwargs["cls"] = self.command_class
+
+ def decorator(f: t.Callable[..., t.Any]) -> Command:
+ cmd: Command = command(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+
+ if func is not None:
+ return decorator(func)
+
+ return decorator
+
+ @t.overload
+ def group(self, __func: t.Callable[..., t.Any]) -> Group: ...
+
+ @t.overload
+ def group(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[t.Callable[..., t.Any]], Group]: ...
+
+ def group(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[t.Callable[..., t.Any]], Group] | Group:
+ """A shortcut decorator for declaring and attaching a group to
+ the group. This takes the same arguments as :func:`group` and
+ immediately registers the created group with this group by
+ calling :meth:`add_command`.
+
+ To customize the group class used, set the :attr:`group_class`
+ attribute.
+
+ .. versionchanged:: 8.1
+ This decorator can be applied without parentheses.
+
+ .. versionchanged:: 8.0
+ Added the :attr:`group_class` attribute.
+ """
+ from .decorators import group
+
+ func: t.Callable[..., t.Any] | None = None
+
+ if args and callable(args[0]):
+ assert len(args) == 1 and not kwargs, (
+ "Use 'group(**kwargs)(callable)' to provide arguments."
+ )
+ (func,) = args
+ args = ()
+
+ if self.group_class is not None and kwargs.get("cls") is None:
+ if self.group_class is type:
+ kwargs["cls"] = type(self)
+ else:
+ kwargs["cls"] = self.group_class
+
+ def decorator(f: t.Callable[..., t.Any]) -> Group:
+ cmd: Group = group(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+
+ if func is not None:
+ return decorator(func)
+
+ return decorator
+
+ def result_callback(self, replace: bool = False) -> t.Callable[[F], F]:
+ """Adds a result callback to the command. By default if a
+ result callback is already registered this will chain them but
+ this can be disabled with the `replace` parameter. The result
+ callback is invoked with the return value of the subcommand
+ (or the list of return values from all subcommands if chaining
+ is enabled) as well as the parameters as they would be passed
+ to the main callback.
+
+ Example::
+
+ @click.group()
+ @click.option('-i', '--input', default=23)
+ def cli(input):
+ return 42
+
+ @cli.result_callback()
+ def process_result(result, input):
+ return result + input
+
+ :param replace: if set to `True` an already existing result
+ callback will be removed.
+
+ .. versionchanged:: 8.0
+ Renamed from ``resultcallback``.
+
+ .. versionadded:: 3.0
+ """
+
+ def decorator(f: F) -> F:
+ old_callback = self._result_callback
+
+ if old_callback is None or replace:
+ self._result_callback = f
+ return f
+
+ def function(value: t.Any, /, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ inner = old_callback(value, *args, **kwargs)
+ return f(inner, *args, **kwargs)
+
+ self._result_callback = rv = update_wrapper(t.cast(F, function), f)
+ return rv # type: ignore[return-value]
+
+ return decorator
+
+ def get_command(self, ctx: Context, cmd_name: str) -> Command | None:
+ """Given a context and a command name, this returns a :class:`Command`
+ object if it exists or returns ``None``.
+ """
+ return self.commands.get(cmd_name)
+
+ def list_commands(self, ctx: Context) -> list[str]:
+ """Returns a list of subcommand names in the order they should appear."""
+ return sorted(self.commands)
+
+ def collect_usage_pieces(self, ctx: Context) -> list[str]:
+ rv = super().collect_usage_pieces(ctx)
+ rv.append(self.subcommand_metavar)
+ return rv
+
+ def format_options(self, ctx: Context, formatter: HelpFormatter) -> None:
+ super().format_options(ctx, formatter)
+ self.format_commands(ctx, formatter)
+
+ def format_commands(self, ctx: Context, formatter: HelpFormatter) -> None:
+ """Extra format methods for multi methods that adds all the commands
+ after the options.
+ """
+ commands = []
+ for subcommand in self.list_commands(ctx):
+ cmd = self.get_command(ctx, subcommand)
+ # What is this, the tool lied about a command. Ignore it
+ if cmd is None:
+ continue
+ if cmd.hidden:
+ continue
+
+ commands.append((subcommand, cmd))
+
+ # allow for 3 times the default spacing
+ if len(commands):
+ limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
+
+ rows = []
+ for subcommand, cmd in commands:
+ help = cmd.get_short_help_str(limit)
+ rows.append((subcommand, help))
+
+ if rows:
+ with formatter.section(_("Commands")):
+ formatter.write_dl(rows)
+
+ def parse_args(self, ctx: Context, args: list[str]) -> list[str]:
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ raise NoArgsIsHelpError(ctx)
+
+ rest = super().parse_args(ctx, args)
+
+ if self.chain:
+ ctx._protected_args = rest
+ ctx.args = []
+ elif rest:
+ ctx._protected_args, ctx.args = rest[:1], rest[1:]
+
+ return ctx.args
+
+ def invoke(self, ctx: Context) -> t.Any:
+ def _process_result(value: t.Any) -> t.Any:
+ if self._result_callback is not None:
+ value = ctx.invoke(self._result_callback, value, **ctx.params)
+ return value
+
+ if not ctx._protected_args:
+ if self.invoke_without_command:
+ # No subcommand was invoked, so the result callback is
+ # invoked with the group return value for regular
+ # groups, or an empty list for chained groups.
+ with ctx:
+ rv = super().invoke(ctx)
+ return _process_result([] if self.chain else rv)
+ ctx.fail(_("Missing command."))
+
+ # Fetch args back out
+ args = [*ctx._protected_args, *ctx.args]
+ ctx.args = []
+ ctx._protected_args = []
+
+ # If we're not in chain mode, we only allow the invocation of a
+ # single command but we also inform the current context about the
+ # name of the command to invoke.
+ if not self.chain:
+ # Make sure the context is entered so we do not clean up
+ # resources until the result processor has worked.
+ with ctx:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ assert cmd is not None
+ ctx.invoked_subcommand = cmd_name
+ super().invoke(ctx)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
+ with sub_ctx:
+ return _process_result(sub_ctx.command.invoke(sub_ctx))
+
+ # In chain mode we create the contexts step by step, but after the
+ # base command has been invoked. Because at that point we do not
+ # know the subcommands yet, the invoked subcommand attribute is
+ # set to ``*`` to inform the command that subcommands are executed
+ # but nothing else.
+ with ctx:
+ ctx.invoked_subcommand = "*" if args else None
+ super().invoke(ctx)
+
+ # Otherwise we make every single context and invoke them in a
+ # chain. In that case the return value to the result processor
+ # is the list of all invoked subcommand's results.
+ contexts = []
+ while args:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ assert cmd is not None
+ sub_ctx = cmd.make_context(
+ cmd_name,
+ args,
+ parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False,
+ )
+ contexts.append(sub_ctx)
+ args, sub_ctx.args = sub_ctx.args, []
+
+ rv = []
+ for sub_ctx in contexts:
+ with sub_ctx:
+ rv.append(sub_ctx.command.invoke(sub_ctx))
+ return _process_result(rv)
+
+ def resolve_command(
+ self, ctx: Context, args: list[str]
+ ) -> tuple[str | None, Command | None, list[str]]:
+ cmd_name = make_str(args[0])
+ original_cmd_name = cmd_name
+
+ # Get the command
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we can't find the command but there is a normalization
+ # function available, we try with that one.
+ if cmd is None and ctx.token_normalize_func is not None:
+ cmd_name = ctx.token_normalize_func(cmd_name)
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we don't find the command we want to show an error message
+ # to the user that it was not provided. However, there is
+ # something else we should do: if the first argument looks like
+ # an option we want to kick off parsing again for arguments to
+ # resolve things like --help which now should go to the main
+ # place.
+ if cmd is None and not ctx.resilient_parsing:
+ if _split_opt(cmd_name)[0]:
+ self.parse_args(ctx, args)
+ ctx.fail(_("No such command {name!r}.").format(name=original_cmd_name))
+ return cmd_name if cmd else None, cmd, args[1:]
+
+ def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]:
+ """Return a list of completions for the incomplete value. Looks
+ at the names of options, subcommands, and chained
+ multi-commands.
+
+ :param ctx: Invocation context for this command.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ from click.shell_completion import CompletionItem
+
+ results = [
+ CompletionItem(name, help=command.get_short_help_str())
+ for name, command in _complete_visible_commands(ctx, incomplete)
+ ]
+ results.extend(super().shell_complete(ctx, incomplete))
+ return results
+
+
+class _MultiCommand(Group, metaclass=_FakeSubclassCheck):
+ """
+ .. deprecated:: 8.2
+ Will be removed in Click 9.0. Use ``Group`` instead.
+ """
+
+
+class CommandCollection(Group):
+ """A :class:`Group` that looks up subcommands on other groups. If a command
+ is not found on this group, each registered source is checked in order.
+ Parameters on a source are not added to this group, and a source's callback
+ is not invoked when invoking its commands. In other words, this "flattens"
+ commands in many groups into this one group.
+
+ :param name: The name of the group command.
+ :param sources: A list of :class:`Group` objects to look up commands from.
+ :param kwargs: Other arguments passed to :class:`Group`.
+
+ .. versionchanged:: 8.2
+ This is a subclass of ``Group``. Commands are looked up first on this
+ group, then each of its sources.
+ """
+
+ def __init__(
+ self,
+ name: str | None = None,
+ sources: list[Group] | None = None,
+ **kwargs: t.Any,
+ ) -> None:
+ super().__init__(name, **kwargs)
+ #: The list of registered groups.
+ self.sources: list[Group] = sources or []
+
+ def add_source(self, group: Group) -> None:
+ """Add a group as a source of commands."""
+ self.sources.append(group)
+
+ def get_command(self, ctx: Context, cmd_name: str) -> Command | None:
+ rv = super().get_command(ctx, cmd_name)
+
+ if rv is not None:
+ return rv
+
+ for source in self.sources:
+ rv = source.get_command(ctx, cmd_name)
+
+ if rv is not None:
+ if self.chain:
+ _check_nested_chain(self, cmd_name, rv)
+
+ return rv
+
+ return None
+
+ def list_commands(self, ctx: Context) -> list[str]:
+ rv: set[str] = set(super().list_commands(ctx))
+
+ for source in self.sources:
+ rv.update(source.list_commands(ctx))
+
+ return sorted(rv)
+
+
+def _check_iter(value: t.Any) -> cabc.Iterator[t.Any]:
+ """Check if the value is iterable but not a string. Raises a type
+ error, or return an iterator over the value.
+ """
+ if isinstance(value, str):
+ raise TypeError
+
+ return iter(value)
+
+
+class Parameter:
+ r"""A parameter to a command comes in two versions: they are either
+ :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
+ not supported by design as some of the internals for parsing are
+ intentionally not finalized.
+
+ Some settings are supported by both options and arguments.
+
+ :param param_decls: the parameter declarations for this option or
+ argument. This is a list of flags or argument
+ names.
+ :param type: the type that should be used. Either a :class:`ParamType`
+ or a Python type. The latter is converted into the former
+ automatically if supported.
+ :param required: controls if this is optional or not.
+ :param default: the default value if omitted. This can also be a callable,
+ in which case it's invoked when the default is needed
+ without any arguments.
+ :param callback: A function to further process or validate the value
+ after type conversion. It is called as ``f(ctx, param, value)``
+ and must return the value. It is called for all sources,
+ including prompts.
+ :param nargs: the number of arguments to match. If not ``1`` the return
+ value is a tuple instead of single value. The default for
+ nargs is ``1`` (except if the type is a tuple, then it's
+ the arity of the tuple). If ``nargs=-1``, all remaining
+ parameters are collected.
+ :param metavar: how the value is represented in the help page.
+ :param expose_value: if this is `True` then the value is passed onwards
+ to the command callback and stored on the context,
+ otherwise it's skipped.
+ :param is_eager: eager values are processed before non eager ones. This
+ should not be set for arguments or it will inverse the
+ order of processing.
+ :param envvar: environment variable(s) that are used to provide a default value for
+ this parameter. This can be a string or a sequence of strings. If a sequence is
+ given, only the first non-empty environment variable is used for the parameter.
+ :param shell_complete: A function that returns custom shell
+ completions. Used instead of the param's type completion if
+ given. Takes ``ctx, param, incomplete`` and must return a list
+ of :class:`~click.shell_completion.CompletionItem` or a list of
+ strings.
+ :param deprecated: If ``True`` or non-empty string, issues a message
+ indicating that the argument is deprecated and highlights
+ its deprecation in --help. The message can be customized
+ by using a string as the value. A deprecated parameter
+ cannot be required, a ValueError will be raised otherwise.
+
+ .. versionchanged:: 8.2.0
+ Introduction of ``deprecated``.
+
+ .. versionchanged:: 8.2
+ Adding duplicate parameter names to a :class:`~click.core.Command` will
+ result in a ``UserWarning`` being shown.
+
+ .. versionchanged:: 8.2
+ Adding duplicate parameter names to a :class:`~click.core.Command` will
+ result in a ``UserWarning`` being shown.
+
+ .. versionchanged:: 8.0
+ ``process_value`` validates required parameters and bounded
+ ``nargs``, and invokes the parameter callback before returning
+ the value. This allows the callback to validate prompts.
+ ``full_process_value`` is removed.
+
+ .. versionchanged:: 8.0
+ ``autocompletion`` is renamed to ``shell_complete`` and has new
+ semantics described above. The old name is deprecated and will
+ be removed in 8.1, until then it will be wrapped to match the
+ new requirements.
+
+ .. versionchanged:: 8.0
+ For ``multiple=True, nargs>1``, the default must be a list of
+ tuples.
+
+ .. versionchanged:: 8.0
+ Setting a default is no longer required for ``nargs>1``, it will
+ default to ``None``. ``multiple=True`` or ``nargs=-1`` will
+ default to ``()``.
+
+ .. versionchanged:: 7.1
+ Empty environment variables are ignored rather than taking the
+ empty string value. This makes it possible for scripts to clear
+ variables if they can't unset them.
+
+ .. versionchanged:: 2.0
+ Changed signature for parameter callback to also be passed the
+ parameter. The old callback format will still work, but it will
+ raise a warning to give you a chance to migrate the code easier.
+ """
+
+ param_type_name = "parameter"
+
+ def __init__(
+ self,
+ param_decls: cabc.Sequence[str] | None = None,
+ type: types.ParamType | t.Any | None = None,
+ required: bool = False,
+ # XXX The default historically embed two concepts:
+ # - the declaration of a Parameter object carrying the default (handy to
+ # arbitrage the default value of coupled Parameters sharing the same
+ # self.name, like flag options),
+ # - and the actual value of the default.
+ # It is confusing and is the source of many issues discussed in:
+ # https://github.com/pallets/click/pull/3030
+ # In the future, we might think of splitting it in two, not unlike
+ # Option.is_flag and Option.flag_value: we could have something like
+ # Parameter.is_default and Parameter.default_value.
+ default: t.Any | t.Callable[[], t.Any] | None = UNSET,
+ callback: t.Callable[[Context, Parameter, t.Any], t.Any] | None = None,
+ nargs: int | None = None,
+ multiple: bool = False,
+ metavar: str | None = None,
+ expose_value: bool = True,
+ is_eager: bool = False,
+ envvar: str | cabc.Sequence[str] | None = None,
+ shell_complete: t.Callable[
+ [Context, Parameter, str], list[CompletionItem] | list[str]
+ ]
+ | None = None,
+ deprecated: bool | str = False,
+ ) -> None:
+ self.name: str | None
+ self.opts: list[str]
+ self.secondary_opts: list[str]
+ self.name, self.opts, self.secondary_opts = self._parse_decls(
+ param_decls or (), expose_value
+ )
+ self.type: types.ParamType = types.convert_type(type, default)
+
+ # Default nargs to what the type tells us if we have that
+ # information available.
+ if nargs is None:
+ if self.type.is_composite:
+ nargs = self.type.arity
+ else:
+ nargs = 1
+
+ self.required = required
+ self.callback = callback
+ self.nargs = nargs
+ self.multiple = multiple
+ self.expose_value = expose_value
+ self.default = default
+ self.is_eager = is_eager
+ self.metavar = metavar
+ self.envvar = envvar
+ self._custom_shell_complete = shell_complete
+ self.deprecated = deprecated
+
+ if __debug__:
+ if self.type.is_composite and nargs != self.type.arity:
+ raise ValueError(
+ f"'nargs' must be {self.type.arity} (or None) for"
+ f" type {self.type!r}, but it was {nargs}."
+ )
+
+ if required and deprecated:
+ raise ValueError(
+ f"The {self.param_type_name} '{self.human_readable_name}' "
+ "is deprecated and still required. A deprecated "
+ f"{self.param_type_name} cannot be required."
+ )
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ """Gather information that could be useful for a tool generating
+ user-facing documentation.
+
+ Use :meth:`click.Context.to_info_dict` to traverse the entire
+ CLI structure.
+
+ .. versionchanged:: 8.3.0
+ Returns ``None`` for the :attr:`default` if it was not set.
+
+ .. versionadded:: 8.0
+ """
+ return {
+ "name": self.name,
+ "param_type_name": self.param_type_name,
+ "opts": self.opts,
+ "secondary_opts": self.secondary_opts,
+ "type": self.type.to_info_dict(),
+ "required": self.required,
+ "nargs": self.nargs,
+ "multiple": self.multiple,
+ # We explicitly hide the :attr:`UNSET` value to the user, as we choose to
+ # make it an implementation detail. And because ``to_info_dict`` has been
+ # designed for documentation purposes, we return ``None`` instead.
+ "default": self.default if self.default is not UNSET else None,
+ "envvar": self.envvar,
+ }
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__} {self.name}>"
+
+ def _parse_decls(
+ self, decls: cabc.Sequence[str], expose_value: bool
+ ) -> tuple[str | None, list[str], list[str]]:
+ raise NotImplementedError()
+
+ @property
+ def human_readable_name(self) -> str:
+ """Returns the human readable name of this parameter. This is the
+ same as the name for options, but the metavar for arguments.
+ """
+ return self.name # type: ignore
+
+ def make_metavar(self, ctx: Context) -> str:
+ if self.metavar is not None:
+ return self.metavar
+
+ metavar = self.type.get_metavar(param=self, ctx=ctx)
+
+ if metavar is None:
+ metavar = self.type.name.upper()
+
+ if self.nargs != 1:
+ metavar += "..."
+
+ return metavar
+
+ @t.overload
+ def get_default(
+ self, ctx: Context, call: t.Literal[True] = True
+ ) -> t.Any | None: ...
+
+ @t.overload
+ def get_default(
+ self, ctx: Context, call: bool = ...
+ ) -> t.Any | t.Callable[[], t.Any] | None: ...
+
+ def get_default(
+ self, ctx: Context, call: bool = True
+ ) -> t.Any | t.Callable[[], t.Any] | None:
+ """Get the default for the parameter. Tries
+ :meth:`Context.lookup_default` first, then the local default.
+
+ :param ctx: Current context.
+ :param call: If the default is a callable, call it. Disable to
+ return the callable instead.
+
+ .. versionchanged:: 8.0.2
+ Type casting is no longer performed when getting a default.
+
+ .. versionchanged:: 8.0.1
+ Type casting can fail in resilient parsing mode. Invalid
+ defaults will not prevent showing help text.
+
+ .. versionchanged:: 8.0
+ Looks at ``ctx.default_map`` first.
+
+ .. versionchanged:: 8.0
+ Added the ``call`` parameter.
+ """
+ value = ctx.lookup_default(self.name, call=False) # type: ignore
+
+ if value is UNSET:
+ value = self.default
+
+ if call and callable(value):
+ value = value()
+
+ return value
+
+ def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None:
+ raise NotImplementedError()
+
+ def consume_value(
+ self, ctx: Context, opts: cabc.Mapping[str, t.Any]
+ ) -> tuple[t.Any, ParameterSource]:
+ """Returns the parameter value produced by the parser.
+
+ If the parser did not produce a value from user input, the value is either
+ sourced from the environment variable, the default map, or the parameter's
+ default value. In that order of precedence.
+
+ If no value is found, an internal sentinel value is returned.
+
+ :meta private:
+ """
+ # Collect from the parse the value passed by the user to the CLI.
+ value = opts.get(self.name, UNSET) # type: ignore
+ # If the value is set, it means it was sourced from the command line by the
+ # parser, otherwise it left unset by default.
+ source = (
+ ParameterSource.COMMANDLINE
+ if value is not UNSET
+ else ParameterSource.DEFAULT
+ )
+
+ if value is UNSET:
+ envvar_value = self.value_from_envvar(ctx)
+ if envvar_value is not None:
+ value = envvar_value
+ source = ParameterSource.ENVIRONMENT
+
+ if value is UNSET:
+ default_map_value = ctx.lookup_default(self.name) # type: ignore
+ if default_map_value is not UNSET:
+ value = default_map_value
+ source = ParameterSource.DEFAULT_MAP
+
+ if value is UNSET:
+ default_value = self.get_default(ctx)
+ if default_value is not UNSET:
+ value = default_value
+ source = ParameterSource.DEFAULT
+
+ return value, source
+
+ def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any:
+ """Convert and validate a value against the parameter's
+ :attr:`type`, :attr:`multiple`, and :attr:`nargs`.
+ """
+ if value in (None, UNSET):
+ if self.multiple or self.nargs == -1:
+ return ()
+ else:
+ return value
+
+ def check_iter(value: t.Any) -> cabc.Iterator[t.Any]:
+ try:
+ return _check_iter(value)
+ except TypeError:
+ # This should only happen when passing in args manually,
+ # the parser should construct an iterable when parsing
+ # the command line.
+ raise BadParameter(
+ _("Value must be an iterable."), ctx=ctx, param=self
+ ) from None
+
+ # Define the conversion function based on nargs and type.
+
+ if self.nargs == 1 or self.type.is_composite:
+
+ def convert(value: t.Any) -> t.Any:
+ return self.type(value, param=self, ctx=ctx)
+
+ elif self.nargs == -1:
+
+ def convert(value: t.Any) -> t.Any: # tuple[t.Any, ...]
+ return tuple(self.type(x, self, ctx) for x in check_iter(value))
+
+ else: # nargs > 1
+
+ def convert(value: t.Any) -> t.Any: # tuple[t.Any, ...]
+ value = tuple(check_iter(value))
+
+ if len(value) != self.nargs:
+ raise BadParameter(
+ ngettext(
+ "Takes {nargs} values but 1 was given.",
+ "Takes {nargs} values but {len} were given.",
+ len(value),
+ ).format(nargs=self.nargs, len=len(value)),
+ ctx=ctx,
+ param=self,
+ )
+
+ return tuple(self.type(x, self, ctx) for x in value)
+
+ if self.multiple:
+ return tuple(convert(x) for x in check_iter(value))
+
+ return convert(value)
+
+ def value_is_missing(self, value: t.Any) -> bool:
+ """A value is considered missing if:
+
+ - it is :attr:`UNSET`,
+ - or if it is an empty sequence while the parameter is suppose to have
+ non-single value (i.e. :attr:`nargs` is not ``1`` or :attr:`multiple` is
+ set).
+
+ :meta private:
+ """
+ if value is UNSET:
+ return True
+
+ if (self.nargs != 1 or self.multiple) and value == ():
+ return True
+
+ return False
+
+ def process_value(self, ctx: Context, value: t.Any) -> t.Any:
+ """Process the value of this parameter:
+
+ 1. Type cast the value using :meth:`type_cast_value`.
+ 2. Check if the value is missing (see: :meth:`value_is_missing`), and raise
+ :exc:`MissingParameter` if it is required.
+ 3. If a :attr:`callback` is set, call it to have the value replaced by the
+ result of the callback. If the value was not set, the callback receive
+ ``None``. This keep the legacy behavior as it was before the introduction of
+ the :attr:`UNSET` sentinel.
+
+ :meta private:
+ """
+ value = self.type_cast_value(ctx, value)
+
+ if self.required and self.value_is_missing(value):
+ raise MissingParameter(ctx=ctx, param=self)
+
+ if self.callback is not None:
+ # Legacy case: UNSET is not exposed directly to the callback, but converted
+ # to None.
+ if value is UNSET:
+ value = None
+ value = self.callback(ctx, self, value)
+
+ return value
+
+ def resolve_envvar_value(self, ctx: Context) -> str | None:
+ """Returns the value found in the environment variable(s) attached to this
+ parameter.
+
+ Environment variables values are `always returned as strings
+ `_.
+
+ This method returns ``None`` if:
+
+ - the :attr:`envvar` property is not set on the :class:`Parameter`,
+ - the environment variable is not found in the environment,
+ - the variable is found in the environment but its value is empty (i.e. the
+ environment variable is present but has an empty string).
+
+ If :attr:`envvar` is setup with multiple environment variables,
+ then only the first non-empty value is returned.
+
+ .. caution::
+
+ The raw value extracted from the environment is not normalized and is
+ returned as-is. Any normalization or reconciliation is performed later by
+ the :class:`Parameter`'s :attr:`type`.
+
+ :meta private:
+ """
+ if not self.envvar:
+ return None
+
+ if isinstance(self.envvar, str):
+ rv = os.environ.get(self.envvar)
+
+ if rv:
+ return rv
+ else:
+ for envvar in self.envvar:
+ rv = os.environ.get(envvar)
+
+ # Return the first non-empty value of the list of environment variables.
+ if rv:
+ return rv
+ # Else, absence of value is interpreted as an environment variable that
+ # is not set, so proceed to the next one.
+
+ return None
+
+ def value_from_envvar(self, ctx: Context) -> str | cabc.Sequence[str] | None:
+ """Process the raw environment variable string for this parameter.
+
+ Returns the string as-is or splits it into a sequence of strings if the
+ parameter is expecting multiple values (i.e. its :attr:`nargs` property is set
+ to a value other than ``1``).
+
+ :meta private:
+ """
+ rv = self.resolve_envvar_value(ctx)
+
+ if rv is not None and self.nargs != 1:
+ return self.type.split_envvar_value(rv)
+
+ return rv
+
+ def handle_parse_result(
+ self, ctx: Context, opts: cabc.Mapping[str, t.Any], args: list[str]
+ ) -> tuple[t.Any, list[str]]:
+ """Process the value produced by the parser from user input.
+
+ Always process the value through the Parameter's :attr:`type`, wherever it
+ comes from.
+
+ If the parameter is deprecated, this method warn the user about it. But only if
+ the value has been explicitly set by the user (and as such, is not coming from
+ a default).
+
+ :meta private:
+ """
+ with augment_usage_errors(ctx, param=self):
+ value, source = self.consume_value(ctx, opts)
+
+ ctx.set_parameter_source(self.name, source) # type: ignore
+
+ # Display a deprecation warning if necessary.
+ if (
+ self.deprecated
+ and value is not UNSET
+ and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
+ ):
+ extra_message = (
+ f" {self.deprecated}" if isinstance(self.deprecated, str) else ""
+ )
+ message = _(
+ "DeprecationWarning: The {param_type} {name!r} is deprecated."
+ "{extra_message}"
+ ).format(
+ param_type=self.param_type_name,
+ name=self.human_readable_name,
+ extra_message=extra_message,
+ )
+ echo(style(message, fg="red"), err=True)
+
+ # Process the value through the parameter's type.
+ try:
+ value = self.process_value(ctx, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+ # In resilient parsing mode, we do not want to fail the command if the
+ # value is incompatible with the parameter type, so we reset the value
+ # to UNSET, which will be interpreted as a missing value.
+ value = UNSET
+
+ # Add parameter's value to the context.
+ if (
+ self.expose_value
+ # We skip adding the value if it was previously set by another parameter
+ # targeting the same variable name. This prevents parameters competing for
+ # the same name to override each other.
+ and self.name not in ctx.params
+ ):
+ # Click is logically enforcing that the name is None if the parameter is
+ # not to be exposed. We still assert it here to please the type checker.
+ assert self.name is not None, (
+ f"{self!r} parameter's name should not be None when exposing value."
+ )
+ # Normalize UNSET values to None, as we're about to pass them to the
+ # command function and move them to the pure-Python realm of user-written
+ # code.
+ ctx.params[self.name] = value if value is not UNSET else None
+
+ return value, args
+
+ def get_help_record(self, ctx: Context) -> tuple[str, str] | None:
+ pass
+
+ def get_usage_pieces(self, ctx: Context) -> list[str]:
+ return []
+
+ def get_error_hint(self, ctx: Context) -> str:
+ """Get a stringified version of the param for use in error messages to
+ indicate which param caused the error.
+ """
+ hint_list = self.opts or [self.human_readable_name]
+ return " / ".join(f"'{x}'" for x in hint_list)
+
+ def shell_complete(self, ctx: Context, incomplete: str) -> list[CompletionItem]:
+ """Return a list of completions for the incomplete value. If a
+ ``shell_complete`` function was given during init, it is used.
+ Otherwise, the :attr:`type`
+ :meth:`~click.types.ParamType.shell_complete` function is used.
+
+ :param ctx: Invocation context for this command.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ if self._custom_shell_complete is not None:
+ results = self._custom_shell_complete(ctx, self, incomplete)
+
+ if results and isinstance(results[0], str):
+ from click.shell_completion import CompletionItem
+
+ results = [CompletionItem(c) for c in results]
+
+ return t.cast("list[CompletionItem]", results)
+
+ return self.type.shell_complete(ctx, self, incomplete)
+
+
+class Option(Parameter):
+ """Options are usually optional values on the command line and
+ have some extra features that arguments don't have.
+
+ All other parameters are passed onwards to the parameter constructor.
+
+ :param show_default: Show the default value for this option in its
+ help text. Values are not shown by default, unless
+ :attr:`Context.show_default` is ``True``. If this value is a
+ string, it shows that string in parentheses instead of the
+ actual value. This is particularly useful for dynamic options.
+ For single option boolean flags, the default remains hidden if
+ its value is ``False``.
+ :param show_envvar: Controls if an environment variable should be
+ shown on the help page and error messages.
+ Normally, environment variables are not shown.
+ :param prompt: If set to ``True`` or a non empty string then the
+ user will be prompted for input. If set to ``True`` the prompt
+ will be the option name capitalized. A deprecated option cannot be
+ prompted.
+ :param confirmation_prompt: Prompt a second time to confirm the
+ value if it was prompted for. Can be set to a string instead of
+ ``True`` to customize the message.
+ :param prompt_required: If set to ``False``, the user will be
+ prompted for input only when the option was specified as a flag
+ without a value.
+ :param hide_input: If this is ``True`` then the input on the prompt
+ will be hidden from the user. This is useful for password input.
+ :param is_flag: forces this option to act as a flag. The default is
+ auto detection.
+ :param flag_value: which value should be used for this flag if it's
+ enabled. This is set to a boolean automatically if
+ the option string contains a slash to mark two options.
+ :param multiple: if this is set to `True` then the argument is accepted
+ multiple times and recorded. This is similar to ``nargs``
+ in how it works but supports arbitrary number of
+ arguments.
+ :param count: this flag makes an option increment an integer.
+ :param allow_from_autoenv: if this is enabled then the value of this
+ parameter will be pulled from an environment
+ variable in case a prefix is defined on the
+ context.
+ :param help: the help string.
+ :param hidden: hide this option from help outputs.
+ :param attrs: Other command arguments described in :class:`Parameter`.
+
+ .. versionchanged:: 8.2
+ ``envvar`` used with ``flag_value`` will always use the ``flag_value``,
+ previously it would use the value of the environment variable.
+
+ .. versionchanged:: 8.1
+ Help text indentation is cleaned here instead of only in the
+ ``@option`` decorator.
+
+ .. versionchanged:: 8.1
+ The ``show_default`` parameter overrides
+ ``Context.show_default``.
+
+ .. versionchanged:: 8.1
+ The default of a single option boolean flag is not shown if the
+ default value is ``False``.
+
+ .. versionchanged:: 8.0.1
+ ``type`` is detected from ``flag_value`` if given.
+ """
+
+ param_type_name = "option"
+
+ def __init__(
+ self,
+ param_decls: cabc.Sequence[str] | None = None,
+ show_default: bool | str | None = None,
+ prompt: bool | str = False,
+ confirmation_prompt: bool | str = False,
+ prompt_required: bool = True,
+ hide_input: bool = False,
+ is_flag: bool | None = None,
+ flag_value: t.Any = UNSET,
+ multiple: bool = False,
+ count: bool = False,
+ allow_from_autoenv: bool = True,
+ type: types.ParamType | t.Any | None = None,
+ help: str | None = None,
+ hidden: bool = False,
+ show_choices: bool = True,
+ show_envvar: bool = False,
+ deprecated: bool | str = False,
+ **attrs: t.Any,
+ ) -> None:
+ if help:
+ help = inspect.cleandoc(help)
+
+ super().__init__(
+ param_decls, type=type, multiple=multiple, deprecated=deprecated, **attrs
+ )
+
+ if prompt is True:
+ if self.name is None:
+ raise TypeError("'name' is required with 'prompt=True'.")
+
+ prompt_text: str | None = self.name.replace("_", " ").capitalize()
+ elif prompt is False:
+ prompt_text = None
+ else:
+ prompt_text = prompt
+
+ if deprecated:
+ deprecated_message = (
+ f"(DEPRECATED: {deprecated})"
+ if isinstance(deprecated, str)
+ else "(DEPRECATED)"
+ )
+ help = help + deprecated_message if help is not None else deprecated_message
+
+ self.prompt = prompt_text
+ self.confirmation_prompt = confirmation_prompt
+ self.prompt_required = prompt_required
+ self.hide_input = hide_input
+ self.hidden = hidden
+
+ # The _flag_needs_value property tells the parser that this option is a flag
+ # that cannot be used standalone and needs a value. With this information, the
+ # parser can determine whether to consider the next user-provided argument in
+ # the CLI as a value for this flag or as a new option.
+ # If prompt is enabled but not required, then it opens the possibility for the
+ # option to gets its value from the user.
+ self._flag_needs_value = self.prompt is not None and not self.prompt_required
+
+ # Auto-detect if this is a flag or not.
+ if is_flag is None:
+ # Implicitly a flag because flag_value was set.
+ if flag_value is not UNSET:
+ is_flag = True
+ # Not a flag, but when used as a flag it shows a prompt.
+ elif self._flag_needs_value:
+ is_flag = False
+ # Implicitly a flag because secondary options names were given.
+ elif self.secondary_opts:
+ is_flag = True
+ # The option is explicitly not a flag. But we do not know yet if it needs a
+ # value or not. So we look at the default value to determine it.
+ elif is_flag is False and not self._flag_needs_value:
+ self._flag_needs_value = self.default is UNSET
+
+ if is_flag:
+ # Set missing default for flags if not explicitly required or prompted.
+ if self.default is UNSET and not self.required and not self.prompt:
+ if multiple:
+ self.default = ()
+
+ # Auto-detect the type of the flag based on the flag_value.
+ if type is None:
+ # A flag without a flag_value is a boolean flag.
+ if flag_value is UNSET:
+ self.type = types.BoolParamType()
+ # If the flag value is a boolean, use BoolParamType.
+ elif isinstance(flag_value, bool):
+ self.type = types.BoolParamType()
+ # Otherwise, guess the type from the flag value.
+ else:
+ self.type = types.convert_type(None, flag_value)
+
+ self.is_flag: bool = bool(is_flag)
+ self.is_bool_flag: bool = bool(
+ is_flag and isinstance(self.type, types.BoolParamType)
+ )
+ self.flag_value: t.Any = flag_value
+
+ # Set boolean flag default to False if unset and not required.
+ if self.is_bool_flag:
+ if self.default is UNSET and not self.required:
+ self.default = False
+
+ # Support the special case of aligning the default value with the flag_value
+ # for flags whose default is explicitly set to True. Note that as long as we
+ # have this condition, there is no way a flag can have a default set to True,
+ # and a flag_value set to something else. Refs:
+ # https://github.com/pallets/click/issues/3024#issuecomment-3146199461
+ # https://github.com/pallets/click/pull/3030/commits/06847da
+ if self.default is True and self.flag_value is not UNSET:
+ self.default = self.flag_value
+
+ # Set the default flag_value if it is not set.
+ if self.flag_value is UNSET:
+ if self.is_flag:
+ self.flag_value = True
+ else:
+ self.flag_value = None
+
+ # Counting.
+ self.count = count
+ if count:
+ if type is None:
+ self.type = types.IntRange(min=0)
+ if self.default is UNSET:
+ self.default = 0
+
+ self.allow_from_autoenv = allow_from_autoenv
+ self.help = help
+ self.show_default = show_default
+ self.show_choices = show_choices
+ self.show_envvar = show_envvar
+
+ if __debug__:
+ if deprecated and prompt:
+ raise ValueError("`deprecated` options cannot use `prompt`.")
+
+ if self.nargs == -1:
+ raise TypeError("nargs=-1 is not supported for options.")
+
+ if not self.is_bool_flag and self.secondary_opts:
+ raise TypeError("Secondary flag is not valid for non-boolean flag.")
+
+ if self.is_bool_flag and self.hide_input and self.prompt is not None:
+ raise TypeError(
+ "'prompt' with 'hide_input' is not valid for boolean flag."
+ )
+
+ if self.count:
+ if self.multiple:
+ raise TypeError("'count' is not valid with 'multiple'.")
+
+ if self.is_flag:
+ raise TypeError("'count' is not valid with 'is_flag'.")
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ """
+ .. versionchanged:: 8.3.0
+ Returns ``None`` for the :attr:`flag_value` if it was not set.
+ """
+ info_dict = super().to_info_dict()
+ info_dict.update(
+ help=self.help,
+ prompt=self.prompt,
+ is_flag=self.is_flag,
+ # We explicitly hide the :attr:`UNSET` value to the user, as we choose to
+ # make it an implementation detail. And because ``to_info_dict`` has been
+ # designed for documentation purposes, we return ``None`` instead.
+ flag_value=self.flag_value if self.flag_value is not UNSET else None,
+ count=self.count,
+ hidden=self.hidden,
+ )
+ return info_dict
+
+ def get_error_hint(self, ctx: Context) -> str:
+ result = super().get_error_hint(ctx)
+ if self.show_envvar and self.envvar is not None:
+ result += f" (env var: '{self.envvar}')"
+ return result
+
+ def _parse_decls(
+ self, decls: cabc.Sequence[str], expose_value: bool
+ ) -> tuple[str | None, list[str], list[str]]:
+ opts = []
+ secondary_opts = []
+ name = None
+ possible_names = []
+
+ for decl in decls:
+ if decl.isidentifier():
+ if name is not None:
+ raise TypeError(f"Name '{name}' defined twice")
+ name = decl
+ else:
+ split_char = ";" if decl[:1] == "/" else "/"
+ if split_char in decl:
+ first, second = decl.split(split_char, 1)
+ first = first.rstrip()
+ if first:
+ possible_names.append(_split_opt(first))
+ opts.append(first)
+ second = second.lstrip()
+ if second:
+ secondary_opts.append(second.lstrip())
+ if first == second:
+ raise ValueError(
+ f"Boolean option {decl!r} cannot use the"
+ " same flag for true/false."
+ )
+ else:
+ possible_names.append(_split_opt(decl))
+ opts.append(decl)
+
+ if name is None and possible_names:
+ possible_names.sort(key=lambda x: -len(x[0])) # group long options first
+ name = possible_names[0][1].replace("-", "_").lower()
+ if not name.isidentifier():
+ name = None
+
+ if name is None:
+ if not expose_value:
+ return None, opts, secondary_opts
+ raise TypeError(
+ f"Could not determine name for option with declarations {decls!r}"
+ )
+
+ if not opts and not secondary_opts:
+ raise TypeError(
+ f"No options defined but a name was passed ({name})."
+ " Did you mean to declare an argument instead? Did"
+ f" you mean to pass '--{name}'?"
+ )
+
+ return name, opts, secondary_opts
+
+ def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None:
+ if self.multiple:
+ action = "append"
+ elif self.count:
+ action = "count"
+ else:
+ action = "store"
+
+ if self.is_flag:
+ action = f"{action}_const"
+
+ if self.is_bool_flag and self.secondary_opts:
+ parser.add_option(
+ obj=self, opts=self.opts, dest=self.name, action=action, const=True
+ )
+ parser.add_option(
+ obj=self,
+ opts=self.secondary_opts,
+ dest=self.name,
+ action=action,
+ const=False,
+ )
+ else:
+ parser.add_option(
+ obj=self,
+ opts=self.opts,
+ dest=self.name,
+ action=action,
+ const=self.flag_value,
+ )
+ else:
+ parser.add_option(
+ obj=self,
+ opts=self.opts,
+ dest=self.name,
+ action=action,
+ nargs=self.nargs,
+ )
+
+ def get_help_record(self, ctx: Context) -> tuple[str, str] | None:
+ if self.hidden:
+ return None
+
+ any_prefix_is_slash = False
+
+ def _write_opts(opts: cabc.Sequence[str]) -> str:
+ nonlocal any_prefix_is_slash
+
+ rv, any_slashes = join_options(opts)
+
+ if any_slashes:
+ any_prefix_is_slash = True
+
+ if not self.is_flag and not self.count:
+ rv += f" {self.make_metavar(ctx=ctx)}"
+
+ return rv
+
+ rv = [_write_opts(self.opts)]
+
+ if self.secondary_opts:
+ rv.append(_write_opts(self.secondary_opts))
+
+ help = self.help or ""
+
+ extra = self.get_help_extra(ctx)
+ extra_items = []
+ if "envvars" in extra:
+ extra_items.append(
+ _("env var: {var}").format(var=", ".join(extra["envvars"]))
+ )
+ if "default" in extra:
+ extra_items.append(_("default: {default}").format(default=extra["default"]))
+ if "range" in extra:
+ extra_items.append(extra["range"])
+ if "required" in extra:
+ extra_items.append(_(extra["required"]))
+
+ if extra_items:
+ extra_str = "; ".join(extra_items)
+ help = f"{help} [{extra_str}]" if help else f"[{extra_str}]"
+
+ return ("; " if any_prefix_is_slash else " / ").join(rv), help
+
+ def get_help_extra(self, ctx: Context) -> types.OptionHelpExtra:
+ extra: types.OptionHelpExtra = {}
+
+ if self.show_envvar:
+ envvar = self.envvar
+
+ if envvar is None:
+ if (
+ self.allow_from_autoenv
+ and ctx.auto_envvar_prefix is not None
+ and self.name is not None
+ ):
+ envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
+
+ if envvar is not None:
+ if isinstance(envvar, str):
+ extra["envvars"] = (envvar,)
+ else:
+ extra["envvars"] = tuple(str(d) for d in envvar)
+
+ # Temporarily enable resilient parsing to avoid type casting
+ # failing for the default. Might be possible to extend this to
+ # help formatting in general.
+ resilient = ctx.resilient_parsing
+ ctx.resilient_parsing = True
+
+ try:
+ default_value = self.get_default(ctx, call=False)
+ finally:
+ ctx.resilient_parsing = resilient
+
+ show_default = False
+ show_default_is_str = False
+
+ if self.show_default is not None:
+ if isinstance(self.show_default, str):
+ show_default_is_str = show_default = True
+ else:
+ show_default = self.show_default
+ elif ctx.show_default is not None:
+ show_default = ctx.show_default
+
+ if show_default_is_str or (
+ show_default and (default_value not in (None, UNSET))
+ ):
+ if show_default_is_str:
+ default_string = f"({self.show_default})"
+ elif isinstance(default_value, (list, tuple)):
+ default_string = ", ".join(str(d) for d in default_value)
+ elif isinstance(default_value, enum.Enum):
+ default_string = default_value.name
+ elif inspect.isfunction(default_value):
+ default_string = _("(dynamic)")
+ elif self.is_bool_flag and self.secondary_opts:
+ # For boolean flags that have distinct True/False opts,
+ # use the opt without prefix instead of the value.
+ default_string = _split_opt(
+ (self.opts if default_value else self.secondary_opts)[0]
+ )[1]
+ elif self.is_bool_flag and not self.secondary_opts and not default_value:
+ default_string = ""
+ elif default_value == "":
+ default_string = '""'
+ else:
+ default_string = str(default_value)
+
+ if default_string:
+ extra["default"] = default_string
+
+ if (
+ isinstance(self.type, types._NumberRangeBase)
+ # skip count with default range type
+ and not (self.count and self.type.min == 0 and self.type.max is None)
+ ):
+ range_str = self.type._describe_range()
+
+ if range_str:
+ extra["range"] = range_str
+
+ if self.required:
+ extra["required"] = "required"
+
+ return extra
+
+ def prompt_for_value(self, ctx: Context) -> t.Any:
+ """This is an alternative flow that can be activated in the full
+ value processing if a value does not exist. It will prompt the
+ user until a valid value exists and then returns the processed
+ value as result.
+ """
+ assert self.prompt is not None
+
+ # Calculate the default before prompting anything to lock in the value before
+ # attempting any user interaction.
+ default = self.get_default(ctx)
+
+ # A boolean flag can use a simplified [y/n] confirmation prompt.
+ if self.is_bool_flag:
+ # If we have no boolean default, we force the user to explicitly provide
+ # one.
+ if default in (UNSET, None):
+ default = None
+ # Nothing prevent you to declare an option that is simultaneously:
+ # 1) auto-detected as a boolean flag,
+ # 2) allowed to prompt, and
+ # 3) still declare a non-boolean default.
+ # This forced casting into a boolean is necessary to align any non-boolean
+ # default to the prompt, which is going to be a [y/n]-style confirmation
+ # because the option is still a boolean flag. That way, instead of [y/n],
+ # we get [Y/n] or [y/N] depending on the truthy value of the default.
+ # Refs: https://github.com/pallets/click/pull/3030#discussion_r2289180249
+ else:
+ default = bool(default)
+ return confirm(self.prompt, default)
+
+ # If show_default is set to True/False, provide this to `prompt` as well. For
+ # non-bool values of `show_default`, we use `prompt`'s default behavior
+ prompt_kwargs: t.Any = {}
+ if isinstance(self.show_default, bool):
+ prompt_kwargs["show_default"] = self.show_default
+
+ return prompt(
+ self.prompt,
+ # Use ``None`` to inform the prompt() function to reiterate until a valid
+ # value is provided by the user if we have no default.
+ default=None if default is UNSET else default,
+ type=self.type,
+ hide_input=self.hide_input,
+ show_choices=self.show_choices,
+ confirmation_prompt=self.confirmation_prompt,
+ value_proc=lambda x: self.process_value(ctx, x),
+ **prompt_kwargs,
+ )
+
+ def resolve_envvar_value(self, ctx: Context) -> str | None:
+ """:class:`Option` resolves its environment variable the same way as
+ :func:`Parameter.resolve_envvar_value`, but it also supports
+ :attr:`Context.auto_envvar_prefix`. If we could not find an environment from
+ the :attr:`envvar` property, we fallback on :attr:`Context.auto_envvar_prefix`
+ to build dynamiccaly the environment variable name using the
+ :python:`{ctx.auto_envvar_prefix}_{self.name.upper()}` template.
+
+ :meta private:
+ """
+ rv = super().resolve_envvar_value(ctx)
+
+ if rv is not None:
+ return rv
+
+ if (
+ self.allow_from_autoenv
+ and ctx.auto_envvar_prefix is not None
+ and self.name is not None
+ ):
+ envvar = f"{ctx.auto_envvar_prefix}_{self.name.upper()}"
+ rv = os.environ.get(envvar)
+
+ if rv:
+ return rv
+
+ return None
+
+ def value_from_envvar(self, ctx: Context) -> t.Any:
+ """For :class:`Option`, this method processes the raw environment variable
+ string the same way as :func:`Parameter.value_from_envvar` does.
+
+ But in the case of non-boolean flags, the value is analyzed to determine if the
+ flag is activated or not, and returns a boolean of its activation, or the
+ :attr:`flag_value` if the latter is set.
+
+ This method also takes care of repeated options (i.e. options with
+ :attr:`multiple` set to ``True``).
+
+ :meta private:
+ """
+ rv = self.resolve_envvar_value(ctx)
+
+ # Absent environment variable or an empty string is interpreted as unset.
+ if rv is None:
+ return None
+
+ # Non-boolean flags are more liberal in what they accept. But a flag being a
+ # flag, its envvar value still needs to be analyzed to determine if the flag is
+ # activated or not.
+ if self.is_flag and not self.is_bool_flag:
+ # If the flag_value is set and match the envvar value, return it
+ # directly.
+ if self.flag_value is not UNSET and rv == self.flag_value:
+ return self.flag_value
+ # Analyze the envvar value as a boolean to know if the flag is
+ # activated or not.
+ return types.BoolParamType.str_to_bool(rv)
+
+ # Split the envvar value if it is allowed to be repeated.
+ value_depth = (self.nargs != 1) + bool(self.multiple)
+ if value_depth > 0:
+ multi_rv = self.type.split_envvar_value(rv)
+ if self.multiple and self.nargs != 1:
+ multi_rv = batch(multi_rv, self.nargs) # type: ignore[assignment]
+
+ return multi_rv
+
+ return rv
+
+ def consume_value(
+ self, ctx: Context, opts: cabc.Mapping[str, Parameter]
+ ) -> tuple[t.Any, ParameterSource]:
+ """For :class:`Option`, the value can be collected from an interactive prompt
+ if the option is a flag that needs a value (and the :attr:`prompt` property is
+ set).
+
+ Additionally, this method handles flag option that are activated without a
+ value, in which case the :attr:`flag_value` is returned.
+
+ :meta private:
+ """
+ value, source = super().consume_value(ctx, opts)
+
+ # The parser will emit a sentinel value if the option is allowed to as a flag
+ # without a value.
+ if value is FLAG_NEEDS_VALUE:
+ # If the option allows for a prompt, we start an interaction with the user.
+ if self.prompt is not None and not ctx.resilient_parsing:
+ value = self.prompt_for_value(ctx)
+ source = ParameterSource.PROMPT
+ # Else the flag takes its flag_value as value.
+ else:
+ value = self.flag_value
+ source = ParameterSource.COMMANDLINE
+
+ # A flag which is activated always returns the flag value, unless the value
+ # comes from the explicitly sets default.
+ elif (
+ self.is_flag
+ and value is True
+ and not self.is_bool_flag
+ and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
+ ):
+ value = self.flag_value
+
+ # Re-interpret a multiple option which has been sent as-is by the parser.
+ # Here we replace each occurrence of value-less flags (marked by the
+ # FLAG_NEEDS_VALUE sentinel) with the flag_value.
+ elif (
+ self.multiple
+ and value is not UNSET
+ and source not in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
+ and any(v is FLAG_NEEDS_VALUE for v in value)
+ ):
+ value = [self.flag_value if v is FLAG_NEEDS_VALUE else v for v in value]
+ source = ParameterSource.COMMANDLINE
+
+ # The value wasn't set, or used the param's default, prompt for one to the user
+ # if prompting is enabled.
+ elif (
+ (
+ value is UNSET
+ or source in (ParameterSource.DEFAULT, ParameterSource.DEFAULT_MAP)
+ )
+ and self.prompt is not None
+ and (self.required or self.prompt_required)
+ and not ctx.resilient_parsing
+ ):
+ value = self.prompt_for_value(ctx)
+ source = ParameterSource.PROMPT
+
+ return value, source
+
+ def type_cast_value(self, ctx: Context, value: t.Any) -> t.Any:
+ if self.is_flag and not self.required:
+ if value is UNSET:
+ if self.is_bool_flag:
+ # If the flag is a boolean flag, we return False if it is not set.
+ value = False
+ return super().type_cast_value(ctx, value)
+
+
+class Argument(Parameter):
+ """Arguments are positional parameters to a command. They generally
+ provide fewer features than options but can have infinite ``nargs``
+ and are required by default.
+
+ All parameters are passed onwards to the constructor of :class:`Parameter`.
+ """
+
+ param_type_name = "argument"
+
+ def __init__(
+ self,
+ param_decls: cabc.Sequence[str],
+ required: bool | None = None,
+ **attrs: t.Any,
+ ) -> None:
+ # Auto-detect the requirement status of the argument if not explicitly set.
+ if required is None:
+ # The argument gets automatically required if it has no explicit default
+ # value set and is setup to match at least one value.
+ if attrs.get("default", UNSET) is UNSET:
+ required = attrs.get("nargs", 1) > 0
+ # If the argument has a default value, it is not required.
+ else:
+ required = False
+
+ if "multiple" in attrs:
+ raise TypeError("__init__() got an unexpected keyword argument 'multiple'.")
+
+ super().__init__(param_decls, required=required, **attrs)
+
+ @property
+ def human_readable_name(self) -> str:
+ if self.metavar is not None:
+ return self.metavar
+ return self.name.upper() # type: ignore
+
+ def make_metavar(self, ctx: Context) -> str:
+ if self.metavar is not None:
+ return self.metavar
+ var = self.type.get_metavar(param=self, ctx=ctx)
+ if not var:
+ var = self.name.upper() # type: ignore
+ if self.deprecated:
+ var += "!"
+ if not self.required:
+ var = f"[{var}]"
+ if self.nargs != 1:
+ var += "..."
+ return var
+
+ def _parse_decls(
+ self, decls: cabc.Sequence[str], expose_value: bool
+ ) -> tuple[str | None, list[str], list[str]]:
+ if not decls:
+ if not expose_value:
+ return None, [], []
+ raise TypeError("Argument is marked as exposed, but does not have a name.")
+ if len(decls) == 1:
+ name = arg = decls[0]
+ name = name.replace("-", "_").lower()
+ else:
+ raise TypeError(
+ "Arguments take exactly one parameter declaration, got"
+ f" {len(decls)}: {decls}."
+ )
+ return name, [arg], []
+
+ def get_usage_pieces(self, ctx: Context) -> list[str]:
+ return [self.make_metavar(ctx)]
+
+ def get_error_hint(self, ctx: Context) -> str:
+ return f"'{self.make_metavar(ctx)}'"
+
+ def add_to_parser(self, parser: _OptionParser, ctx: Context) -> None:
+ parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
+
+
+def __getattr__(name: str) -> object:
+ import warnings
+
+ if name == "BaseCommand":
+ warnings.warn(
+ "'BaseCommand' is deprecated and will be removed in Click 9.0. Use"
+ " 'Command' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _BaseCommand
+
+ if name == "MultiCommand":
+ warnings.warn(
+ "'MultiCommand' is deprecated and will be removed in Click 9.0. Use"
+ " 'Group' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _MultiCommand
+
+ raise AttributeError(name)
diff --git a/tapdown/lib/python3.11/site-packages/click/decorators.py b/tapdown/lib/python3.11/site-packages/click/decorators.py
new file mode 100644
index 0000000..21f4c34
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/decorators.py
@@ -0,0 +1,551 @@
+from __future__ import annotations
+
+import inspect
+import typing as t
+from functools import update_wrapper
+from gettext import gettext as _
+
+from .core import Argument
+from .core import Command
+from .core import Context
+from .core import Group
+from .core import Option
+from .core import Parameter
+from .globals import get_current_context
+from .utils import echo
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ P = te.ParamSpec("P")
+
+R = t.TypeVar("R")
+T = t.TypeVar("T")
+_AnyCallable = t.Callable[..., t.Any]
+FC = t.TypeVar("FC", bound="_AnyCallable | Command")
+
+
+def pass_context(f: t.Callable[te.Concatenate[Context, P], R]) -> t.Callable[P, R]:
+ """Marks a callback as wanting to receive the current context
+ object as first argument.
+ """
+
+ def new_func(*args: P.args, **kwargs: P.kwargs) -> R:
+ return f(get_current_context(), *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def pass_obj(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]:
+ """Similar to :func:`pass_context`, but only pass the object on the
+ context onwards (:attr:`Context.obj`). This is useful if that object
+ represents the state of a nested system.
+ """
+
+ def new_func(*args: P.args, **kwargs: P.kwargs) -> R:
+ return f(get_current_context().obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(
+ object_type: type[T], ensure: bool = False
+) -> t.Callable[[t.Callable[te.Concatenate[T, P], R]], t.Callable[P, R]]:
+ """Given an object type this creates a decorator that will work
+ similar to :func:`pass_obj` but instead of passing the object of the
+ current context, it will find the innermost context of type
+ :func:`object_type`.
+
+ This generates a decorator that works roughly like this::
+
+ from functools import update_wrapper
+
+ def decorator(f):
+ @pass_context
+ def new_func(ctx, *args, **kwargs):
+ obj = ctx.find_object(object_type)
+ return ctx.invoke(f, obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+ :param object_type: the type of the object to pass.
+ :param ensure: if set to `True`, a new object will be created and
+ remembered on the context if it's not there yet.
+ """
+
+ def decorator(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]:
+ def new_func(*args: P.args, **kwargs: P.kwargs) -> R:
+ ctx = get_current_context()
+
+ obj: T | None
+ if ensure:
+ obj = ctx.ensure_object(object_type)
+ else:
+ obj = ctx.find_object(object_type)
+
+ if obj is None:
+ raise RuntimeError(
+ "Managed to invoke callback without a context"
+ f" object of type {object_type.__name__!r}"
+ " existing."
+ )
+
+ return ctx.invoke(f, obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+ return decorator
+
+
+def pass_meta_key(
+ key: str, *, doc_description: str | None = None
+) -> t.Callable[[t.Callable[te.Concatenate[T, P], R]], t.Callable[P, R]]:
+ """Create a decorator that passes a key from
+ :attr:`click.Context.meta` as the first argument to the decorated
+ function.
+
+ :param key: Key in ``Context.meta`` to pass.
+ :param doc_description: Description of the object being passed,
+ inserted into the decorator's docstring. Defaults to "the 'key'
+ key from Context.meta".
+
+ .. versionadded:: 8.0
+ """
+
+ def decorator(f: t.Callable[te.Concatenate[T, P], R]) -> t.Callable[P, R]:
+ def new_func(*args: P.args, **kwargs: P.kwargs) -> R:
+ ctx = get_current_context()
+ obj = ctx.meta[key]
+ return ctx.invoke(f, obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+ if doc_description is None:
+ doc_description = f"the {key!r} key from :attr:`click.Context.meta`"
+
+ decorator.__doc__ = (
+ f"Decorator that passes {doc_description} as the first argument"
+ " to the decorated function."
+ )
+ return decorator
+
+
+CmdType = t.TypeVar("CmdType", bound=Command)
+
+
+# variant: no call, directly as decorator for a function.
+@t.overload
+def command(name: _AnyCallable) -> Command: ...
+
+
+# variant: with positional name and with positional or keyword cls argument:
+# @command(namearg, CommandCls, ...) or @command(namearg, cls=CommandCls, ...)
+@t.overload
+def command(
+ name: str | None,
+ cls: type[CmdType],
+ **attrs: t.Any,
+) -> t.Callable[[_AnyCallable], CmdType]: ...
+
+
+# variant: name omitted, cls _must_ be a keyword argument, @command(cls=CommandCls, ...)
+@t.overload
+def command(
+ name: None = None,
+ *,
+ cls: type[CmdType],
+ **attrs: t.Any,
+) -> t.Callable[[_AnyCallable], CmdType]: ...
+
+
+# variant: with optional string name, no cls argument provided.
+@t.overload
+def command(
+ name: str | None = ..., cls: None = None, **attrs: t.Any
+) -> t.Callable[[_AnyCallable], Command]: ...
+
+
+def command(
+ name: str | _AnyCallable | None = None,
+ cls: type[CmdType] | None = None,
+ **attrs: t.Any,
+) -> Command | t.Callable[[_AnyCallable], Command | CmdType]:
+ r"""Creates a new :class:`Command` and uses the decorated function as
+ callback. This will also automatically attach all decorated
+ :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+ The name of the command defaults to the name of the function, converted to
+ lowercase, with underscores ``_`` replaced by dashes ``-``, and the suffixes
+ ``_command``, ``_cmd``, ``_group``, and ``_grp`` are removed. For example,
+ ``init_data_command`` becomes ``init-data``.
+
+ All keyword arguments are forwarded to the underlying command class.
+ For the ``params`` argument, any decorated params are appended to
+ the end of the list.
+
+ Once decorated the function turns into a :class:`Command` instance
+ that can be invoked as a command line utility or be attached to a
+ command :class:`Group`.
+
+ :param name: The name of the command. Defaults to modifying the function's
+ name as described above.
+ :param cls: The command class to create. Defaults to :class:`Command`.
+
+ .. versionchanged:: 8.2
+ The suffixes ``_command``, ``_cmd``, ``_group``, and ``_grp`` are
+ removed when generating the name.
+
+ .. versionchanged:: 8.1
+ This decorator can be applied without parentheses.
+
+ .. versionchanged:: 8.1
+ The ``params`` argument can be used. Decorated params are
+ appended to the end of the list.
+ """
+
+ func: t.Callable[[_AnyCallable], t.Any] | None = None
+
+ if callable(name):
+ func = name
+ name = None
+ assert cls is None, "Use 'command(cls=cls)(callable)' to specify a class."
+ assert not attrs, "Use 'command(**kwargs)(callable)' to provide arguments."
+
+ if cls is None:
+ cls = t.cast("type[CmdType]", Command)
+
+ def decorator(f: _AnyCallable) -> CmdType:
+ if isinstance(f, Command):
+ raise TypeError("Attempted to convert a callback into a command twice.")
+
+ attr_params = attrs.pop("params", None)
+ params = attr_params if attr_params is not None else []
+
+ try:
+ decorator_params = f.__click_params__ # type: ignore
+ except AttributeError:
+ pass
+ else:
+ del f.__click_params__ # type: ignore
+ params.extend(reversed(decorator_params))
+
+ if attrs.get("help") is None:
+ attrs["help"] = f.__doc__
+
+ if t.TYPE_CHECKING:
+ assert cls is not None
+ assert not callable(name)
+
+ if name is not None:
+ cmd_name = name
+ else:
+ cmd_name = f.__name__.lower().replace("_", "-")
+ cmd_left, sep, suffix = cmd_name.rpartition("-")
+
+ if sep and suffix in {"command", "cmd", "group", "grp"}:
+ cmd_name = cmd_left
+
+ cmd = cls(name=cmd_name, callback=f, params=params, **attrs)
+ cmd.__doc__ = f.__doc__
+ return cmd
+
+ if func is not None:
+ return decorator(func)
+
+ return decorator
+
+
+GrpType = t.TypeVar("GrpType", bound=Group)
+
+
+# variant: no call, directly as decorator for a function.
+@t.overload
+def group(name: _AnyCallable) -> Group: ...
+
+
+# variant: with positional name and with positional or keyword cls argument:
+# @group(namearg, GroupCls, ...) or @group(namearg, cls=GroupCls, ...)
+@t.overload
+def group(
+ name: str | None,
+ cls: type[GrpType],
+ **attrs: t.Any,
+) -> t.Callable[[_AnyCallable], GrpType]: ...
+
+
+# variant: name omitted, cls _must_ be a keyword argument, @group(cmd=GroupCls, ...)
+@t.overload
+def group(
+ name: None = None,
+ *,
+ cls: type[GrpType],
+ **attrs: t.Any,
+) -> t.Callable[[_AnyCallable], GrpType]: ...
+
+
+# variant: with optional string name, no cls argument provided.
+@t.overload
+def group(
+ name: str | None = ..., cls: None = None, **attrs: t.Any
+) -> t.Callable[[_AnyCallable], Group]: ...
+
+
+def group(
+ name: str | _AnyCallable | None = None,
+ cls: type[GrpType] | None = None,
+ **attrs: t.Any,
+) -> Group | t.Callable[[_AnyCallable], Group | GrpType]:
+ """Creates a new :class:`Group` with a function as callback. This
+ works otherwise the same as :func:`command` just that the `cls`
+ parameter is set to :class:`Group`.
+
+ .. versionchanged:: 8.1
+ This decorator can be applied without parentheses.
+ """
+ if cls is None:
+ cls = t.cast("type[GrpType]", Group)
+
+ if callable(name):
+ return command(cls=cls, **attrs)(name)
+
+ return command(name, cls, **attrs)
+
+
+def _param_memo(f: t.Callable[..., t.Any], param: Parameter) -> None:
+ if isinstance(f, Command):
+ f.params.append(param)
+ else:
+ if not hasattr(f, "__click_params__"):
+ f.__click_params__ = [] # type: ignore
+
+ f.__click_params__.append(param) # type: ignore
+
+
+def argument(
+ *param_decls: str, cls: type[Argument] | None = None, **attrs: t.Any
+) -> t.Callable[[FC], FC]:
+ """Attaches an argument to the command. All positional arguments are
+ passed as parameter declarations to :class:`Argument`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Argument` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ For the default argument class, refer to :class:`Argument` and
+ :class:`Parameter` for descriptions of parameters.
+
+ :param cls: the argument class to instantiate. This defaults to
+ :class:`Argument`.
+ :param param_decls: Passed as positional arguments to the constructor of
+ ``cls``.
+ :param attrs: Passed as keyword arguments to the constructor of ``cls``.
+ """
+ if cls is None:
+ cls = Argument
+
+ def decorator(f: FC) -> FC:
+ _param_memo(f, cls(param_decls, **attrs))
+ return f
+
+ return decorator
+
+
+def option(
+ *param_decls: str, cls: type[Option] | None = None, **attrs: t.Any
+) -> t.Callable[[FC], FC]:
+ """Attaches an option to the command. All positional arguments are
+ passed as parameter declarations to :class:`Option`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Option` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ For the default option class, refer to :class:`Option` and
+ :class:`Parameter` for descriptions of parameters.
+
+ :param cls: the option class to instantiate. This defaults to
+ :class:`Option`.
+ :param param_decls: Passed as positional arguments to the constructor of
+ ``cls``.
+ :param attrs: Passed as keyword arguments to the constructor of ``cls``.
+ """
+ if cls is None:
+ cls = Option
+
+ def decorator(f: FC) -> FC:
+ _param_memo(f, cls(param_decls, **attrs))
+ return f
+
+ return decorator
+
+
+def confirmation_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
+ """Add a ``--yes`` option which shows a prompt before continuing if
+ not passed. If the prompt is declined, the program will exit.
+
+ :param param_decls: One or more option names. Defaults to the single
+ value ``"--yes"``.
+ :param kwargs: Extra arguments are passed to :func:`option`.
+ """
+
+ def callback(ctx: Context, param: Parameter, value: bool) -> None:
+ if not value:
+ ctx.abort()
+
+ if not param_decls:
+ param_decls = ("--yes",)
+
+ kwargs.setdefault("is_flag", True)
+ kwargs.setdefault("callback", callback)
+ kwargs.setdefault("expose_value", False)
+ kwargs.setdefault("prompt", "Do you want to continue?")
+ kwargs.setdefault("help", "Confirm the action without prompting.")
+ return option(*param_decls, **kwargs)
+
+
+def password_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
+ """Add a ``--password`` option which prompts for a password, hiding
+ input and asking to enter the value again for confirmation.
+
+ :param param_decls: One or more option names. Defaults to the single
+ value ``"--password"``.
+ :param kwargs: Extra arguments are passed to :func:`option`.
+ """
+ if not param_decls:
+ param_decls = ("--password",)
+
+ kwargs.setdefault("prompt", True)
+ kwargs.setdefault("confirmation_prompt", True)
+ kwargs.setdefault("hide_input", True)
+ return option(*param_decls, **kwargs)
+
+
+def version_option(
+ version: str | None = None,
+ *param_decls: str,
+ package_name: str | None = None,
+ prog_name: str | None = None,
+ message: str | None = None,
+ **kwargs: t.Any,
+) -> t.Callable[[FC], FC]:
+ """Add a ``--version`` option which immediately prints the version
+ number and exits the program.
+
+ If ``version`` is not provided, Click will try to detect it using
+ :func:`importlib.metadata.version` to get the version for the
+ ``package_name``.
+
+ If ``package_name`` is not provided, Click will try to detect it by
+ inspecting the stack frames. This will be used to detect the
+ version, so it must match the name of the installed package.
+
+ :param version: The version number to show. If not provided, Click
+ will try to detect it.
+ :param param_decls: One or more option names. Defaults to the single
+ value ``"--version"``.
+ :param package_name: The package name to detect the version from. If
+ not provided, Click will try to detect it.
+ :param prog_name: The name of the CLI to show in the message. If not
+ provided, it will be detected from the command.
+ :param message: The message to show. The values ``%(prog)s``,
+ ``%(package)s``, and ``%(version)s`` are available. Defaults to
+ ``"%(prog)s, version %(version)s"``.
+ :param kwargs: Extra arguments are passed to :func:`option`.
+ :raise RuntimeError: ``version`` could not be detected.
+
+ .. versionchanged:: 8.0
+ Add the ``package_name`` parameter, and the ``%(package)s``
+ value for messages.
+
+ .. versionchanged:: 8.0
+ Use :mod:`importlib.metadata` instead of ``pkg_resources``. The
+ version is detected based on the package name, not the entry
+ point name. The Python package name must match the installed
+ package name, or be passed with ``package_name=``.
+ """
+ if message is None:
+ message = _("%(prog)s, version %(version)s")
+
+ if version is None and package_name is None:
+ frame = inspect.currentframe()
+ f_back = frame.f_back if frame is not None else None
+ f_globals = f_back.f_globals if f_back is not None else None
+ # break reference cycle
+ # https://docs.python.org/3/library/inspect.html#the-interpreter-stack
+ del frame
+
+ if f_globals is not None:
+ package_name = f_globals.get("__name__")
+
+ if package_name == "__main__":
+ package_name = f_globals.get("__package__")
+
+ if package_name:
+ package_name = package_name.partition(".")[0]
+
+ def callback(ctx: Context, param: Parameter, value: bool) -> None:
+ if not value or ctx.resilient_parsing:
+ return
+
+ nonlocal prog_name
+ nonlocal version
+
+ if prog_name is None:
+ prog_name = ctx.find_root().info_name
+
+ if version is None and package_name is not None:
+ import importlib.metadata
+
+ try:
+ version = importlib.metadata.version(package_name)
+ except importlib.metadata.PackageNotFoundError:
+ raise RuntimeError(
+ f"{package_name!r} is not installed. Try passing"
+ " 'package_name' instead."
+ ) from None
+
+ if version is None:
+ raise RuntimeError(
+ f"Could not determine the version for {package_name!r} automatically."
+ )
+
+ echo(
+ message % {"prog": prog_name, "package": package_name, "version": version},
+ color=ctx.color,
+ )
+ ctx.exit()
+
+ if not param_decls:
+ param_decls = ("--version",)
+
+ kwargs.setdefault("is_flag", True)
+ kwargs.setdefault("expose_value", False)
+ kwargs.setdefault("is_eager", True)
+ kwargs.setdefault("help", _("Show the version and exit."))
+ kwargs["callback"] = callback
+ return option(*param_decls, **kwargs)
+
+
+def help_option(*param_decls: str, **kwargs: t.Any) -> t.Callable[[FC], FC]:
+ """Pre-configured ``--help`` option which immediately prints the help page
+ and exits the program.
+
+ :param param_decls: One or more option names. Defaults to the single
+ value ``"--help"``.
+ :param kwargs: Extra arguments are passed to :func:`option`.
+ """
+
+ def show_help(ctx: Context, param: Parameter, value: bool) -> None:
+ """Callback that print the help page on ```` and exits."""
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ if not param_decls:
+ param_decls = ("--help",)
+
+ kwargs.setdefault("is_flag", True)
+ kwargs.setdefault("expose_value", False)
+ kwargs.setdefault("is_eager", True)
+ kwargs.setdefault("help", _("Show this message and exit."))
+ kwargs.setdefault("callback", show_help)
+
+ return option(*param_decls, **kwargs)
diff --git a/tapdown/lib/python3.11/site-packages/click/exceptions.py b/tapdown/lib/python3.11/site-packages/click/exceptions.py
new file mode 100644
index 0000000..4d782ee
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/exceptions.py
@@ -0,0 +1,308 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import typing as t
+from gettext import gettext as _
+from gettext import ngettext
+
+from ._compat import get_text_stderr
+from .globals import resolve_color_default
+from .utils import echo
+from .utils import format_filename
+
+if t.TYPE_CHECKING:
+ from .core import Command
+ from .core import Context
+ from .core import Parameter
+
+
+def _join_param_hints(param_hint: cabc.Sequence[str] | str | None) -> str | None:
+ if param_hint is not None and not isinstance(param_hint, str):
+ return " / ".join(repr(x) for x in param_hint)
+
+ return param_hint
+
+
+class ClickException(Exception):
+ """An exception that Click can handle and show to the user."""
+
+ #: The exit code for this exception.
+ exit_code = 1
+
+ def __init__(self, message: str) -> None:
+ super().__init__(message)
+ # The context will be removed by the time we print the message, so cache
+ # the color settings here to be used later on (in `show`)
+ self.show_color: bool | None = resolve_color_default()
+ self.message = message
+
+ def format_message(self) -> str:
+ return self.message
+
+ def __str__(self) -> str:
+ return self.message
+
+ def show(self, file: t.IO[t.Any] | None = None) -> None:
+ if file is None:
+ file = get_text_stderr()
+
+ echo(
+ _("Error: {message}").format(message=self.format_message()),
+ file=file,
+ color=self.show_color,
+ )
+
+
+class UsageError(ClickException):
+ """An internal exception that signals a usage error. This typically
+ aborts any further handling.
+
+ :param message: the error message to display.
+ :param ctx: optionally the context that caused this error. Click will
+ fill in the context automatically in some situations.
+ """
+
+ exit_code = 2
+
+ def __init__(self, message: str, ctx: Context | None = None) -> None:
+ super().__init__(message)
+ self.ctx = ctx
+ self.cmd: Command | None = self.ctx.command if self.ctx else None
+
+ def show(self, file: t.IO[t.Any] | None = None) -> None:
+ if file is None:
+ file = get_text_stderr()
+ color = None
+ hint = ""
+ if (
+ self.ctx is not None
+ and self.ctx.command.get_help_option(self.ctx) is not None
+ ):
+ hint = _("Try '{command} {option}' for help.").format(
+ command=self.ctx.command_path, option=self.ctx.help_option_names[0]
+ )
+ hint = f"{hint}\n"
+ if self.ctx is not None:
+ color = self.ctx.color
+ echo(f"{self.ctx.get_usage()}\n{hint}", file=file, color=color)
+ echo(
+ _("Error: {message}").format(message=self.format_message()),
+ file=file,
+ color=color,
+ )
+
+
+class BadParameter(UsageError):
+ """An exception that formats out a standardized error message for a
+ bad parameter. This is useful when thrown from a callback or type as
+ Click will attach contextual information to it (for instance, which
+ parameter it is).
+
+ .. versionadded:: 2.0
+
+ :param param: the parameter object that caused this error. This can
+ be left out, and Click will attach this info itself
+ if possible.
+ :param param_hint: a string that shows up as parameter name. This
+ can be used as alternative to `param` in cases
+ where custom validation should happen. If it is
+ a string it's used as such, if it's a list then
+ each item is quoted and separated.
+ """
+
+ def __init__(
+ self,
+ message: str,
+ ctx: Context | None = None,
+ param: Parameter | None = None,
+ param_hint: cabc.Sequence[str] | str | None = None,
+ ) -> None:
+ super().__init__(message, ctx)
+ self.param = param
+ self.param_hint = param_hint
+
+ def format_message(self) -> str:
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.get_error_hint(self.ctx) # type: ignore
+ else:
+ return _("Invalid value: {message}").format(message=self.message)
+
+ return _("Invalid value for {param_hint}: {message}").format(
+ param_hint=_join_param_hints(param_hint), message=self.message
+ )
+
+
+class MissingParameter(BadParameter):
+ """Raised if click required an option or argument but it was not
+ provided when invoking the script.
+
+ .. versionadded:: 4.0
+
+ :param param_type: a string that indicates the type of the parameter.
+ The default is to inherit the parameter type from
+ the given `param`. Valid values are ``'parameter'``,
+ ``'option'`` or ``'argument'``.
+ """
+
+ def __init__(
+ self,
+ message: str | None = None,
+ ctx: Context | None = None,
+ param: Parameter | None = None,
+ param_hint: cabc.Sequence[str] | str | None = None,
+ param_type: str | None = None,
+ ) -> None:
+ super().__init__(message or "", ctx, param, param_hint)
+ self.param_type = param_type
+
+ def format_message(self) -> str:
+ if self.param_hint is not None:
+ param_hint: cabc.Sequence[str] | str | None = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.get_error_hint(self.ctx) # type: ignore
+ else:
+ param_hint = None
+
+ param_hint = _join_param_hints(param_hint)
+ param_hint = f" {param_hint}" if param_hint else ""
+
+ param_type = self.param_type
+ if param_type is None and self.param is not None:
+ param_type = self.param.param_type_name
+
+ msg = self.message
+ if self.param is not None:
+ msg_extra = self.param.type.get_missing_message(
+ param=self.param, ctx=self.ctx
+ )
+ if msg_extra:
+ if msg:
+ msg += f". {msg_extra}"
+ else:
+ msg = msg_extra
+
+ msg = f" {msg}" if msg else ""
+
+ # Translate param_type for known types.
+ if param_type == "argument":
+ missing = _("Missing argument")
+ elif param_type == "option":
+ missing = _("Missing option")
+ elif param_type == "parameter":
+ missing = _("Missing parameter")
+ else:
+ missing = _("Missing {param_type}").format(param_type=param_type)
+
+ return f"{missing}{param_hint}.{msg}"
+
+ def __str__(self) -> str:
+ if not self.message:
+ param_name = self.param.name if self.param else None
+ return _("Missing parameter: {param_name}").format(param_name=param_name)
+ else:
+ return self.message
+
+
+class NoSuchOption(UsageError):
+ """Raised if click attempted to handle an option that does not
+ exist.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(
+ self,
+ option_name: str,
+ message: str | None = None,
+ possibilities: cabc.Sequence[str] | None = None,
+ ctx: Context | None = None,
+ ) -> None:
+ if message is None:
+ message = _("No such option: {name}").format(name=option_name)
+
+ super().__init__(message, ctx)
+ self.option_name = option_name
+ self.possibilities = possibilities
+
+ def format_message(self) -> str:
+ if not self.possibilities:
+ return self.message
+
+ possibility_str = ", ".join(sorted(self.possibilities))
+ suggest = ngettext(
+ "Did you mean {possibility}?",
+ "(Possible options: {possibilities})",
+ len(self.possibilities),
+ ).format(possibility=possibility_str, possibilities=possibility_str)
+ return f"{self.message} {suggest}"
+
+
+class BadOptionUsage(UsageError):
+ """Raised if an option is generally supplied but the use of the option
+ was incorrect. This is for instance raised if the number of arguments
+ for an option is not correct.
+
+ .. versionadded:: 4.0
+
+ :param option_name: the name of the option being used incorrectly.
+ """
+
+ def __init__(
+ self, option_name: str, message: str, ctx: Context | None = None
+ ) -> None:
+ super().__init__(message, ctx)
+ self.option_name = option_name
+
+
+class BadArgumentUsage(UsageError):
+ """Raised if an argument is generally supplied but the use of the argument
+ was incorrect. This is for instance raised if the number of values
+ for an argument is not correct.
+
+ .. versionadded:: 6.0
+ """
+
+
+class NoArgsIsHelpError(UsageError):
+ def __init__(self, ctx: Context) -> None:
+ self.ctx: Context
+ super().__init__(ctx.get_help(), ctx=ctx)
+
+ def show(self, file: t.IO[t.Any] | None = None) -> None:
+ echo(self.format_message(), file=file, err=True, color=self.ctx.color)
+
+
+class FileError(ClickException):
+ """Raised if a file cannot be opened."""
+
+ def __init__(self, filename: str, hint: str | None = None) -> None:
+ if hint is None:
+ hint = _("unknown error")
+
+ super().__init__(hint)
+ self.ui_filename: str = format_filename(filename)
+ self.filename = filename
+
+ def format_message(self) -> str:
+ return _("Could not open file {filename!r}: {message}").format(
+ filename=self.ui_filename, message=self.message
+ )
+
+
+class Abort(RuntimeError):
+ """An internal signalling exception that signals Click to abort."""
+
+
+class Exit(RuntimeError):
+ """An exception that indicates that the application should exit with some
+ status code.
+
+ :param code: the status code to exit with.
+ """
+
+ __slots__ = ("exit_code",)
+
+ def __init__(self, code: int = 0) -> None:
+ self.exit_code: int = code
diff --git a/tapdown/lib/python3.11/site-packages/click/formatting.py b/tapdown/lib/python3.11/site-packages/click/formatting.py
new file mode 100644
index 0000000..0b64f83
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/formatting.py
@@ -0,0 +1,301 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+from contextlib import contextmanager
+from gettext import gettext as _
+
+from ._compat import term_len
+from .parser import _split_opt
+
+# Can force a width. This is used by the test system
+FORCED_WIDTH: int | None = None
+
+
+def measure_table(rows: cabc.Iterable[tuple[str, str]]) -> tuple[int, ...]:
+ widths: dict[int, int] = {}
+
+ for row in rows:
+ for idx, col in enumerate(row):
+ widths[idx] = max(widths.get(idx, 0), term_len(col))
+
+ return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(
+ rows: cabc.Iterable[tuple[str, str]], col_count: int
+) -> cabc.Iterator[tuple[str, ...]]:
+ for row in rows:
+ yield row + ("",) * (col_count - len(row))
+
+
+def wrap_text(
+ text: str,
+ width: int = 78,
+ initial_indent: str = "",
+ subsequent_indent: str = "",
+ preserve_paragraphs: bool = False,
+) -> str:
+ """A helper function that intelligently wraps text. By default, it
+ assumes that it operates on a single paragraph of text but if the
+ `preserve_paragraphs` parameter is provided it will intelligently
+ handle paragraphs (defined by two empty lines).
+
+ If paragraphs are handled, a paragraph can be prefixed with an empty
+ line containing the ``\\b`` character (``\\x08``) to indicate that
+ no rewrapping should happen in that block.
+
+ :param text: the text that should be rewrapped.
+ :param width: the maximum width for the text.
+ :param initial_indent: the initial indent that should be placed on the
+ first line as a string.
+ :param subsequent_indent: the indent string that should be placed on
+ each consecutive line.
+ :param preserve_paragraphs: if this flag is set then the wrapping will
+ intelligently handle paragraphs.
+ """
+ from ._textwrap import TextWrapper
+
+ text = text.expandtabs()
+ wrapper = TextWrapper(
+ width,
+ initial_indent=initial_indent,
+ subsequent_indent=subsequent_indent,
+ replace_whitespace=False,
+ )
+ if not preserve_paragraphs:
+ return wrapper.fill(text)
+
+ p: list[tuple[int, bool, str]] = []
+ buf: list[str] = []
+ indent = None
+
+ def _flush_par() -> None:
+ if not buf:
+ return
+ if buf[0].strip() == "\b":
+ p.append((indent or 0, True, "\n".join(buf[1:])))
+ else:
+ p.append((indent or 0, False, " ".join(buf)))
+ del buf[:]
+
+ for line in text.splitlines():
+ if not line:
+ _flush_par()
+ indent = None
+ else:
+ if indent is None:
+ orig_len = term_len(line)
+ line = line.lstrip()
+ indent = orig_len - term_len(line)
+ buf.append(line)
+ _flush_par()
+
+ rv = []
+ for indent, raw, text in p:
+ with wrapper.extra_indent(" " * indent):
+ if raw:
+ rv.append(wrapper.indent_only(text))
+ else:
+ rv.append(wrapper.fill(text))
+
+ return "\n\n".join(rv)
+
+
+class HelpFormatter:
+ """This class helps with formatting text-based help pages. It's
+ usually just needed for very special internal cases, but it's also
+ exposed so that developers can write their own fancy outputs.
+
+ At present, it always writes into memory.
+
+ :param indent_increment: the additional increment for each level.
+ :param width: the width for the text. This defaults to the terminal
+ width clamped to a maximum of 78.
+ """
+
+ def __init__(
+ self,
+ indent_increment: int = 2,
+ width: int | None = None,
+ max_width: int | None = None,
+ ) -> None:
+ self.indent_increment = indent_increment
+ if max_width is None:
+ max_width = 80
+ if width is None:
+ import shutil
+
+ width = FORCED_WIDTH
+ if width is None:
+ width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)
+ self.width = width
+ self.current_indent: int = 0
+ self.buffer: list[str] = []
+
+ def write(self, string: str) -> None:
+ """Writes a unicode string into the internal buffer."""
+ self.buffer.append(string)
+
+ def indent(self) -> None:
+ """Increases the indentation."""
+ self.current_indent += self.indent_increment
+
+ def dedent(self) -> None:
+ """Decreases the indentation."""
+ self.current_indent -= self.indent_increment
+
+ def write_usage(self, prog: str, args: str = "", prefix: str | None = None) -> None:
+ """Writes a usage line into the buffer.
+
+ :param prog: the program name.
+ :param args: whitespace separated list of arguments.
+ :param prefix: The prefix for the first line. Defaults to
+ ``"Usage: "``.
+ """
+ if prefix is None:
+ prefix = f"{_('Usage:')} "
+
+ usage_prefix = f"{prefix:>{self.current_indent}}{prog} "
+ text_width = self.width - self.current_indent
+
+ if text_width >= (term_len(usage_prefix) + 20):
+ # The arguments will fit to the right of the prefix.
+ indent = " " * term_len(usage_prefix)
+ self.write(
+ wrap_text(
+ args,
+ text_width,
+ initial_indent=usage_prefix,
+ subsequent_indent=indent,
+ )
+ )
+ else:
+ # The prefix is too long, put the arguments on the next line.
+ self.write(usage_prefix)
+ self.write("\n")
+ indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
+ self.write(
+ wrap_text(
+ args, text_width, initial_indent=indent, subsequent_indent=indent
+ )
+ )
+
+ self.write("\n")
+
+ def write_heading(self, heading: str) -> None:
+ """Writes a heading into the buffer."""
+ self.write(f"{'':>{self.current_indent}}{heading}:\n")
+
+ def write_paragraph(self) -> None:
+ """Writes a paragraph into the buffer."""
+ if self.buffer:
+ self.write("\n")
+
+ def write_text(self, text: str) -> None:
+ """Writes re-indented text into the buffer. This rewraps and
+ preserves paragraphs.
+ """
+ indent = " " * self.current_indent
+ self.write(
+ wrap_text(
+ text,
+ self.width,
+ initial_indent=indent,
+ subsequent_indent=indent,
+ preserve_paragraphs=True,
+ )
+ )
+ self.write("\n")
+
+ def write_dl(
+ self,
+ rows: cabc.Sequence[tuple[str, str]],
+ col_max: int = 30,
+ col_spacing: int = 2,
+ ) -> None:
+ """Writes a definition list into the buffer. This is how options
+ and commands are usually formatted.
+
+ :param rows: a list of two item tuples for the terms and values.
+ :param col_max: the maximum width of the first column.
+ :param col_spacing: the number of spaces between the first and
+ second column.
+ """
+ rows = list(rows)
+ widths = measure_table(rows)
+ if len(widths) != 2:
+ raise TypeError("Expected two columns for definition list")
+
+ first_col = min(widths[0], col_max) + col_spacing
+
+ for first, second in iter_rows(rows, len(widths)):
+ self.write(f"{'':>{self.current_indent}}{first}")
+ if not second:
+ self.write("\n")
+ continue
+ if term_len(first) <= first_col - col_spacing:
+ self.write(" " * (first_col - term_len(first)))
+ else:
+ self.write("\n")
+ self.write(" " * (first_col + self.current_indent))
+
+ text_width = max(self.width - first_col - 2, 10)
+ wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
+ lines = wrapped_text.splitlines()
+
+ if lines:
+ self.write(f"{lines[0]}\n")
+
+ for line in lines[1:]:
+ self.write(f"{'':>{first_col + self.current_indent}}{line}\n")
+ else:
+ self.write("\n")
+
+ @contextmanager
+ def section(self, name: str) -> cabc.Iterator[None]:
+ """Helpful context manager that writes a paragraph, a heading,
+ and the indents.
+
+ :param name: the section name that is written as heading.
+ """
+ self.write_paragraph()
+ self.write_heading(name)
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ @contextmanager
+ def indentation(self) -> cabc.Iterator[None]:
+ """A context manager that increases the indentation."""
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ def getvalue(self) -> str:
+ """Returns the buffer contents."""
+ return "".join(self.buffer)
+
+
+def join_options(options: cabc.Sequence[str]) -> tuple[str, bool]:
+ """Given a list of option strings this joins them in the most appropriate
+ way and returns them in the form ``(formatted_string,
+ any_prefix_is_slash)`` where the second item in the tuple is a flag that
+ indicates if any of the option prefixes was a slash.
+ """
+ rv = []
+ any_prefix_is_slash = False
+
+ for opt in options:
+ prefix = _split_opt(opt)[0]
+
+ if prefix == "/":
+ any_prefix_is_slash = True
+
+ rv.append((len(prefix), opt))
+
+ rv.sort(key=lambda x: x[0])
+ return ", ".join(x[1] for x in rv), any_prefix_is_slash
diff --git a/tapdown/lib/python3.11/site-packages/click/globals.py b/tapdown/lib/python3.11/site-packages/click/globals.py
new file mode 100644
index 0000000..a2f9172
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/globals.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+import typing as t
+from threading import local
+
+if t.TYPE_CHECKING:
+ from .core import Context
+
+_local = local()
+
+
+@t.overload
+def get_current_context(silent: t.Literal[False] = False) -> Context: ...
+
+
+@t.overload
+def get_current_context(silent: bool = ...) -> Context | None: ...
+
+
+def get_current_context(silent: bool = False) -> Context | None:
+ """Returns the current click context. This can be used as a way to
+ access the current context object from anywhere. This is a more implicit
+ alternative to the :func:`pass_context` decorator. This function is
+ primarily useful for helpers such as :func:`echo` which might be
+ interested in changing its behavior based on the current context.
+
+ To push the current context, :meth:`Context.scope` can be used.
+
+ .. versionadded:: 5.0
+
+ :param silent: if set to `True` the return value is `None` if no context
+ is available. The default behavior is to raise a
+ :exc:`RuntimeError`.
+ """
+ try:
+ return t.cast("Context", _local.stack[-1])
+ except (AttributeError, IndexError) as e:
+ if not silent:
+ raise RuntimeError("There is no active click context.") from e
+
+ return None
+
+
+def push_context(ctx: Context) -> None:
+ """Pushes a new context to the current stack."""
+ _local.__dict__.setdefault("stack", []).append(ctx)
+
+
+def pop_context() -> None:
+ """Removes the top level from the stack."""
+ _local.stack.pop()
+
+
+def resolve_color_default(color: bool | None = None) -> bool | None:
+ """Internal helper to get the default value of the color flag. If a
+ value is passed it's returned unchanged, otherwise it's looked up from
+ the current context.
+ """
+ if color is not None:
+ return color
+
+ ctx = get_current_context(silent=True)
+
+ if ctx is not None:
+ return ctx.color
+
+ return None
diff --git a/tapdown/lib/python3.11/site-packages/click/parser.py b/tapdown/lib/python3.11/site-packages/click/parser.py
new file mode 100644
index 0000000..1ea1f71
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/parser.py
@@ -0,0 +1,532 @@
+"""
+This module started out as largely a copy paste from the stdlib's
+optparse module with the features removed that we do not need from
+optparse because we implement them in Click on a higher level (for
+instance type handling, help formatting and a lot more).
+
+The plan is to remove more and more from here over time.
+
+The reason this is a different module and not optparse from the stdlib
+is that there are differences in 2.x and 3.x about the error messages
+generated and optparse in the stdlib uses gettext for no good reason
+and might cause us issues.
+
+Click uses parts of optparse written by Gregory P. Ward and maintained
+by the Python Software Foundation. This is limited to code in parser.py.
+
+Copyright 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright 2002-2006 Python Software Foundation. All rights reserved.
+"""
+
+# This code uses parts of optparse written by Gregory P. Ward and
+# maintained by the Python Software Foundation.
+# Copyright 2001-2006 Gregory P. Ward
+# Copyright 2002-2006 Python Software Foundation
+from __future__ import annotations
+
+import collections.abc as cabc
+import typing as t
+from collections import deque
+from gettext import gettext as _
+from gettext import ngettext
+
+from ._utils import FLAG_NEEDS_VALUE
+from ._utils import UNSET
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+
+if t.TYPE_CHECKING:
+ from ._utils import T_FLAG_NEEDS_VALUE
+ from ._utils import T_UNSET
+ from .core import Argument as CoreArgument
+ from .core import Context
+ from .core import Option as CoreOption
+ from .core import Parameter as CoreParameter
+
+V = t.TypeVar("V")
+
+
+def _unpack_args(
+ args: cabc.Sequence[str], nargs_spec: cabc.Sequence[int]
+) -> tuple[cabc.Sequence[str | cabc.Sequence[str | None] | None], list[str]]:
+ """Given an iterable of arguments and an iterable of nargs specifications,
+ it returns a tuple with all the unpacked arguments at the first index
+ and all remaining arguments as the second.
+
+ The nargs specification is the number of arguments that should be consumed
+ or `-1` to indicate that this position should eat up all the remainders.
+
+ Missing items are filled with ``UNSET``.
+ """
+ args = deque(args)
+ nargs_spec = deque(nargs_spec)
+ rv: list[str | tuple[str | T_UNSET, ...] | T_UNSET] = []
+ spos: int | None = None
+
+ def _fetch(c: deque[V]) -> V | T_UNSET:
+ try:
+ if spos is None:
+ return c.popleft()
+ else:
+ return c.pop()
+ except IndexError:
+ return UNSET
+
+ while nargs_spec:
+ nargs = _fetch(nargs_spec)
+
+ if nargs is None:
+ continue
+
+ if nargs == 1:
+ rv.append(_fetch(args)) # type: ignore[arg-type]
+ elif nargs > 1:
+ x = [_fetch(args) for _ in range(nargs)]
+
+ # If we're reversed, we're pulling in the arguments in reverse,
+ # so we need to turn them around.
+ if spos is not None:
+ x.reverse()
+
+ rv.append(tuple(x))
+ elif nargs < 0:
+ if spos is not None:
+ raise TypeError("Cannot have two nargs < 0")
+
+ spos = len(rv)
+ rv.append(UNSET)
+
+ # spos is the position of the wildcard (star). If it's not `None`,
+ # we fill it with the remainder.
+ if spos is not None:
+ rv[spos] = tuple(args)
+ args = []
+ rv[spos + 1 :] = reversed(rv[spos + 1 :])
+
+ return tuple(rv), list(args)
+
+
+def _split_opt(opt: str) -> tuple[str, str]:
+ first = opt[:1]
+ if first.isalnum():
+ return "", opt
+ if opt[1:2] == first:
+ return opt[:2], opt[2:]
+ return first, opt[1:]
+
+
+def _normalize_opt(opt: str, ctx: Context | None) -> str:
+ if ctx is None or ctx.token_normalize_func is None:
+ return opt
+ prefix, opt = _split_opt(opt)
+ return f"{prefix}{ctx.token_normalize_func(opt)}"
+
+
+class _Option:
+ def __init__(
+ self,
+ obj: CoreOption,
+ opts: cabc.Sequence[str],
+ dest: str | None,
+ action: str | None = None,
+ nargs: int = 1,
+ const: t.Any | None = None,
+ ):
+ self._short_opts = []
+ self._long_opts = []
+ self.prefixes: set[str] = set()
+
+ for opt in opts:
+ prefix, value = _split_opt(opt)
+ if not prefix:
+ raise ValueError(f"Invalid start character for option ({opt})")
+ self.prefixes.add(prefix[0])
+ if len(prefix) == 1 and len(value) == 1:
+ self._short_opts.append(opt)
+ else:
+ self._long_opts.append(opt)
+ self.prefixes.add(prefix)
+
+ if action is None:
+ action = "store"
+
+ self.dest = dest
+ self.action = action
+ self.nargs = nargs
+ self.const = const
+ self.obj = obj
+
+ @property
+ def takes_value(self) -> bool:
+ return self.action in ("store", "append")
+
+ def process(self, value: t.Any, state: _ParsingState) -> None:
+ if self.action == "store":
+ state.opts[self.dest] = value # type: ignore
+ elif self.action == "store_const":
+ state.opts[self.dest] = self.const # type: ignore
+ elif self.action == "append":
+ state.opts.setdefault(self.dest, []).append(value) # type: ignore
+ elif self.action == "append_const":
+ state.opts.setdefault(self.dest, []).append(self.const) # type: ignore
+ elif self.action == "count":
+ state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 # type: ignore
+ else:
+ raise ValueError(f"unknown action '{self.action}'")
+ state.order.append(self.obj)
+
+
+class _Argument:
+ def __init__(self, obj: CoreArgument, dest: str | None, nargs: int = 1):
+ self.dest = dest
+ self.nargs = nargs
+ self.obj = obj
+
+ def process(
+ self,
+ value: str | cabc.Sequence[str | None] | None | T_UNSET,
+ state: _ParsingState,
+ ) -> None:
+ if self.nargs > 1:
+ assert isinstance(value, cabc.Sequence)
+ holes = sum(1 for x in value if x is UNSET)
+ if holes == len(value):
+ value = UNSET
+ elif holes != 0:
+ raise BadArgumentUsage(
+ _("Argument {name!r} takes {nargs} values.").format(
+ name=self.dest, nargs=self.nargs
+ )
+ )
+
+ # We failed to collect any argument value so we consider the argument as unset.
+ if value == ():
+ value = UNSET
+
+ state.opts[self.dest] = value # type: ignore
+ state.order.append(self.obj)
+
+
+class _ParsingState:
+ def __init__(self, rargs: list[str]) -> None:
+ self.opts: dict[str, t.Any] = {}
+ self.largs: list[str] = []
+ self.rargs = rargs
+ self.order: list[CoreParameter] = []
+
+
+class _OptionParser:
+ """The option parser is an internal class that is ultimately used to
+ parse options and arguments. It's modelled after optparse and brings
+ a similar but vastly simplified API. It should generally not be used
+ directly as the high level Click classes wrap it for you.
+
+ It's not nearly as extensible as optparse or argparse as it does not
+ implement features that are implemented on a higher level (such as
+ types or defaults).
+
+ :param ctx: optionally the :class:`~click.Context` where this parser
+ should go with.
+
+ .. deprecated:: 8.2
+ Will be removed in Click 9.0.
+ """
+
+ def __init__(self, ctx: Context | None = None) -> None:
+ #: The :class:`~click.Context` for this parser. This might be
+ #: `None` for some advanced use cases.
+ self.ctx = ctx
+ #: This controls how the parser deals with interspersed arguments.
+ #: If this is set to `False`, the parser will stop on the first
+ #: non-option. Click uses this to implement nested subcommands
+ #: safely.
+ self.allow_interspersed_args: bool = True
+ #: This tells the parser how to deal with unknown options. By
+ #: default it will error out (which is sensible), but there is a
+ #: second mode where it will ignore it and continue processing
+ #: after shifting all the unknown options into the resulting args.
+ self.ignore_unknown_options: bool = False
+
+ if ctx is not None:
+ self.allow_interspersed_args = ctx.allow_interspersed_args
+ self.ignore_unknown_options = ctx.ignore_unknown_options
+
+ self._short_opt: dict[str, _Option] = {}
+ self._long_opt: dict[str, _Option] = {}
+ self._opt_prefixes = {"-", "--"}
+ self._args: list[_Argument] = []
+
+ def add_option(
+ self,
+ obj: CoreOption,
+ opts: cabc.Sequence[str],
+ dest: str | None,
+ action: str | None = None,
+ nargs: int = 1,
+ const: t.Any | None = None,
+ ) -> None:
+ """Adds a new option named `dest` to the parser. The destination
+ is not inferred (unlike with optparse) and needs to be explicitly
+ provided. Action can be any of ``store``, ``store_const``,
+ ``append``, ``append_const`` or ``count``.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ opts = [_normalize_opt(opt, self.ctx) for opt in opts]
+ option = _Option(obj, opts, dest, action=action, nargs=nargs, const=const)
+ self._opt_prefixes.update(option.prefixes)
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ def add_argument(self, obj: CoreArgument, dest: str | None, nargs: int = 1) -> None:
+ """Adds a positional argument named `dest` to the parser.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ self._args.append(_Argument(obj, dest=dest, nargs=nargs))
+
+ def parse_args(
+ self, args: list[str]
+ ) -> tuple[dict[str, t.Any], list[str], list[CoreParameter]]:
+ """Parses positional arguments and returns ``(values, args, order)``
+ for the parsed options and arguments as well as the leftover
+ arguments if there are any. The order is a list of objects as they
+ appear on the command line. If arguments appear multiple times they
+ will be memorized multiple times as well.
+ """
+ state = _ParsingState(args)
+ try:
+ self._process_args_for_options(state)
+ self._process_args_for_args(state)
+ except UsageError:
+ if self.ctx is None or not self.ctx.resilient_parsing:
+ raise
+ return state.opts, state.largs, state.order
+
+ def _process_args_for_args(self, state: _ParsingState) -> None:
+ pargs, args = _unpack_args(
+ state.largs + state.rargs, [x.nargs for x in self._args]
+ )
+
+ for idx, arg in enumerate(self._args):
+ arg.process(pargs[idx], state)
+
+ state.largs = args
+ state.rargs = []
+
+ def _process_args_for_options(self, state: _ParsingState) -> None:
+ while state.rargs:
+ arg = state.rargs.pop(0)
+ arglen = len(arg)
+ # Double dashes always handled explicitly regardless of what
+ # prefixes are valid.
+ if arg == "--":
+ return
+ elif arg[:1] in self._opt_prefixes and arglen > 1:
+ self._process_opts(arg, state)
+ elif self.allow_interspersed_args:
+ state.largs.append(arg)
+ else:
+ state.rargs.insert(0, arg)
+ return
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(
+ self, opt: str, explicit_value: str | None, state: _ParsingState
+ ) -> None:
+ if opt not in self._long_opt:
+ from difflib import get_close_matches
+
+ possibilities = get_close_matches(opt, self._long_opt)
+ raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
+
+ option = self._long_opt[opt]
+ if option.takes_value:
+ # At this point it's safe to modify rargs by injecting the
+ # explicit value, because no exception is raised in this
+ # branch. This means that the inserted value will be fully
+ # consumed.
+ if explicit_value is not None:
+ state.rargs.insert(0, explicit_value)
+
+ value = self._get_value_from_state(opt, option, state)
+
+ elif explicit_value is not None:
+ raise BadOptionUsage(
+ opt, _("Option {name!r} does not take a value.").format(name=opt)
+ )
+
+ else:
+ value = UNSET
+
+ option.process(value, state)
+
+ def _match_short_opt(self, arg: str, state: _ParsingState) -> None:
+ stop = False
+ i = 1
+ prefix = arg[0]
+ unknown_options = []
+
+ for ch in arg[1:]:
+ opt = _normalize_opt(f"{prefix}{ch}", self.ctx)
+ option = self._short_opt.get(opt)
+ i += 1
+
+ if not option:
+ if self.ignore_unknown_options:
+ unknown_options.append(ch)
+ continue
+ raise NoSuchOption(opt, ctx=self.ctx)
+ if option.takes_value:
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ state.rargs.insert(0, arg[i:])
+ stop = True
+
+ value = self._get_value_from_state(opt, option, state)
+
+ else:
+ value = UNSET
+
+ option.process(value, state)
+
+ if stop:
+ break
+
+ # If we got any unknown options we recombine the string of the
+ # remaining options and re-attach the prefix, then report that
+ # to the state as new larg. This way there is basic combinatorics
+ # that can be achieved while still ignoring unknown arguments.
+ if self.ignore_unknown_options and unknown_options:
+ state.largs.append(f"{prefix}{''.join(unknown_options)}")
+
+ def _get_value_from_state(
+ self, option_name: str, option: _Option, state: _ParsingState
+ ) -> str | cabc.Sequence[str] | T_FLAG_NEEDS_VALUE:
+ nargs = option.nargs
+
+ value: str | cabc.Sequence[str] | T_FLAG_NEEDS_VALUE
+
+ if len(state.rargs) < nargs:
+ if option.obj._flag_needs_value:
+ # Option allows omitting the value.
+ value = FLAG_NEEDS_VALUE
+ else:
+ raise BadOptionUsage(
+ option_name,
+ ngettext(
+ "Option {name!r} requires an argument.",
+ "Option {name!r} requires {nargs} arguments.",
+ nargs,
+ ).format(name=option_name, nargs=nargs),
+ )
+ elif nargs == 1:
+ next_rarg = state.rargs[0]
+
+ if (
+ option.obj._flag_needs_value
+ and isinstance(next_rarg, str)
+ and next_rarg[:1] in self._opt_prefixes
+ and len(next_rarg) > 1
+ ):
+ # The next arg looks like the start of an option, don't
+ # use it as the value if omitting the value is allowed.
+ value = FLAG_NEEDS_VALUE
+ else:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ return value
+
+ def _process_opts(self, arg: str, state: _ParsingState) -> None:
+ explicit_value = None
+ # Long option handling happens in two parts. The first part is
+ # supporting explicitly attached values. In any case, we will try
+ # to long match the option first.
+ if "=" in arg:
+ long_opt, explicit_value = arg.split("=", 1)
+ else:
+ long_opt = arg
+ norm_long_opt = _normalize_opt(long_opt, self.ctx)
+
+ # At this point we will match the (assumed) long option through
+ # the long option matching code. Note that this allows options
+ # like "-foo" to be matched as long options.
+ try:
+ self._match_long_opt(norm_long_opt, explicit_value, state)
+ except NoSuchOption:
+ # At this point the long option matching failed, and we need
+ # to try with short options. However there is a special rule
+ # which says, that if we have a two character options prefix
+ # (applies to "--foo" for instance), we do not dispatch to the
+ # short option code and will instead raise the no option
+ # error.
+ if arg[:2] not in self._opt_prefixes:
+ self._match_short_opt(arg, state)
+ return
+
+ if not self.ignore_unknown_options:
+ raise
+
+ state.largs.append(arg)
+
+
+def __getattr__(name: str) -> object:
+ import warnings
+
+ if name in {
+ "OptionParser",
+ "Argument",
+ "Option",
+ "split_opt",
+ "normalize_opt",
+ "ParsingState",
+ }:
+ warnings.warn(
+ f"'parser.{name}' is deprecated and will be removed in Click 9.0."
+ " The old parser is available in 'optparse'.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return globals()[f"_{name}"]
+
+ if name == "split_arg_string":
+ from .shell_completion import split_arg_string
+
+ warnings.warn(
+ "Importing 'parser.split_arg_string' is deprecated, it will only be"
+ " available in 'shell_completion' in Click 9.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return split_arg_string
+
+ raise AttributeError(name)
diff --git a/tapdown/lib/python3.11/site-packages/click/py.typed b/tapdown/lib/python3.11/site-packages/click/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/click/shell_completion.py b/tapdown/lib/python3.11/site-packages/click/shell_completion.py
new file mode 100644
index 0000000..8f1564c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/shell_completion.py
@@ -0,0 +1,667 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import os
+import re
+import typing as t
+from gettext import gettext as _
+
+from .core import Argument
+from .core import Command
+from .core import Context
+from .core import Group
+from .core import Option
+from .core import Parameter
+from .core import ParameterSource
+from .utils import echo
+
+
+def shell_complete(
+ cli: Command,
+ ctx_args: cabc.MutableMapping[str, t.Any],
+ prog_name: str,
+ complete_var: str,
+ instruction: str,
+) -> int:
+ """Perform shell completion for the given CLI program.
+
+ :param cli: Command being called.
+ :param ctx_args: Extra arguments to pass to
+ ``cli.make_context``.
+ :param prog_name: Name of the executable in the shell.
+ :param complete_var: Name of the environment variable that holds
+ the completion instruction.
+ :param instruction: Value of ``complete_var`` with the completion
+ instruction and shell, in the form ``instruction_shell``.
+ :return: Status code to exit with.
+ """
+ shell, _, instruction = instruction.partition("_")
+ comp_cls = get_completion_class(shell)
+
+ if comp_cls is None:
+ return 1
+
+ comp = comp_cls(cli, ctx_args, prog_name, complete_var)
+
+ if instruction == "source":
+ echo(comp.source())
+ return 0
+
+ if instruction == "complete":
+ echo(comp.complete())
+ return 0
+
+ return 1
+
+
+class CompletionItem:
+ """Represents a completion value and metadata about the value. The
+ default metadata is ``type`` to indicate special shell handling,
+ and ``help`` if a shell supports showing a help string next to the
+ value.
+
+ Arbitrary parameters can be passed when creating the object, and
+ accessed using ``item.attr``. If an attribute wasn't passed,
+ accessing it returns ``None``.
+
+ :param value: The completion suggestion.
+ :param type: Tells the shell script to provide special completion
+ support for the type. Click uses ``"dir"`` and ``"file"``.
+ :param help: String shown next to the value if supported.
+ :param kwargs: Arbitrary metadata. The built-in implementations
+ don't use this, but custom type completions paired with custom
+ shell support could use it.
+ """
+
+ __slots__ = ("value", "type", "help", "_info")
+
+ def __init__(
+ self,
+ value: t.Any,
+ type: str = "plain",
+ help: str | None = None,
+ **kwargs: t.Any,
+ ) -> None:
+ self.value: t.Any = value
+ self.type: str = type
+ self.help: str | None = help
+ self._info = kwargs
+
+ def __getattr__(self, name: str) -> t.Any:
+ return self._info.get(name)
+
+
+# Only Bash >= 4.4 has the nosort option.
+_SOURCE_BASH = """\
+%(complete_func)s() {
+ local IFS=$'\\n'
+ local response
+
+ response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \
+%(complete_var)s=bash_complete $1)
+
+ for completion in $response; do
+ IFS=',' read type value <<< "$completion"
+
+ if [[ $type == 'dir' ]]; then
+ COMPREPLY=()
+ compopt -o dirnames
+ elif [[ $type == 'file' ]]; then
+ COMPREPLY=()
+ compopt -o default
+ elif [[ $type == 'plain' ]]; then
+ COMPREPLY+=($value)
+ fi
+ done
+
+ return 0
+}
+
+%(complete_func)s_setup() {
+ complete -o nosort -F %(complete_func)s %(prog_name)s
+}
+
+%(complete_func)s_setup;
+"""
+
+# See ZshComplete.format_completion below, and issue #2703, before
+# changing this script.
+#
+# (TL;DR: _describe is picky about the format, but this Zsh script snippet
+# is already widely deployed. So freeze this script, and use clever-ish
+# handling of colons in ZshComplet.format_completion.)
+_SOURCE_ZSH = """\
+#compdef %(prog_name)s
+
+%(complete_func)s() {
+ local -a completions
+ local -a completions_with_descriptions
+ local -a response
+ (( ! $+commands[%(prog_name)s] )) && return 1
+
+ response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \
+%(complete_var)s=zsh_complete %(prog_name)s)}")
+
+ for type key descr in ${response}; do
+ if [[ "$type" == "plain" ]]; then
+ if [[ "$descr" == "_" ]]; then
+ completions+=("$key")
+ else
+ completions_with_descriptions+=("$key":"$descr")
+ fi
+ elif [[ "$type" == "dir" ]]; then
+ _path_files -/
+ elif [[ "$type" == "file" ]]; then
+ _path_files -f
+ fi
+ done
+
+ if [ -n "$completions_with_descriptions" ]; then
+ _describe -V unsorted completions_with_descriptions -U
+ fi
+
+ if [ -n "$completions" ]; then
+ compadd -U -V unsorted -a completions
+ fi
+}
+
+if [[ $zsh_eval_context[-1] == loadautofunc ]]; then
+ # autoload from fpath, call function directly
+ %(complete_func)s "$@"
+else
+ # eval/source/. command, register function for later
+ compdef %(complete_func)s %(prog_name)s
+fi
+"""
+
+_SOURCE_FISH = """\
+function %(complete_func)s;
+ set -l response (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \
+COMP_CWORD=(commandline -t) %(prog_name)s);
+
+ for completion in $response;
+ set -l metadata (string split "," $completion);
+
+ if test $metadata[1] = "dir";
+ __fish_complete_directories $metadata[2];
+ else if test $metadata[1] = "file";
+ __fish_complete_path $metadata[2];
+ else if test $metadata[1] = "plain";
+ echo $metadata[2];
+ end;
+ end;
+end;
+
+complete --no-files --command %(prog_name)s --arguments \
+"(%(complete_func)s)";
+"""
+
+
+class ShellComplete:
+ """Base class for providing shell completion support. A subclass for
+ a given shell will override attributes and methods to implement the
+ completion instructions (``source`` and ``complete``).
+
+ :param cli: Command being called.
+ :param prog_name: Name of the executable in the shell.
+ :param complete_var: Name of the environment variable that holds
+ the completion instruction.
+
+ .. versionadded:: 8.0
+ """
+
+ name: t.ClassVar[str]
+ """Name to register the shell as with :func:`add_completion_class`.
+ This is used in completion instructions (``{name}_source`` and
+ ``{name}_complete``).
+ """
+
+ source_template: t.ClassVar[str]
+ """Completion script template formatted by :meth:`source`. This must
+ be provided by subclasses.
+ """
+
+ def __init__(
+ self,
+ cli: Command,
+ ctx_args: cabc.MutableMapping[str, t.Any],
+ prog_name: str,
+ complete_var: str,
+ ) -> None:
+ self.cli = cli
+ self.ctx_args = ctx_args
+ self.prog_name = prog_name
+ self.complete_var = complete_var
+
+ @property
+ def func_name(self) -> str:
+ """The name of the shell function defined by the completion
+ script.
+ """
+ safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), flags=re.ASCII)
+ return f"_{safe_name}_completion"
+
+ def source_vars(self) -> dict[str, t.Any]:
+ """Vars for formatting :attr:`source_template`.
+
+ By default this provides ``complete_func``, ``complete_var``,
+ and ``prog_name``.
+ """
+ return {
+ "complete_func": self.func_name,
+ "complete_var": self.complete_var,
+ "prog_name": self.prog_name,
+ }
+
+ def source(self) -> str:
+ """Produce the shell script that defines the completion
+ function. By default this ``%``-style formats
+ :attr:`source_template` with the dict returned by
+ :meth:`source_vars`.
+ """
+ return self.source_template % self.source_vars()
+
+ def get_completion_args(self) -> tuple[list[str], str]:
+ """Use the env vars defined by the shell script to return a
+ tuple of ``args, incomplete``. This must be implemented by
+ subclasses.
+ """
+ raise NotImplementedError
+
+ def get_completions(self, args: list[str], incomplete: str) -> list[CompletionItem]:
+ """Determine the context and last complete command or parameter
+ from the complete args. Call that object's ``shell_complete``
+ method to get the completions for the incomplete value.
+
+ :param args: List of complete args before the incomplete value.
+ :param incomplete: Value being completed. May be empty.
+ """
+ ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)
+ obj, incomplete = _resolve_incomplete(ctx, args, incomplete)
+ return obj.shell_complete(ctx, incomplete)
+
+ def format_completion(self, item: CompletionItem) -> str:
+ """Format a completion item into the form recognized by the
+ shell script. This must be implemented by subclasses.
+
+ :param item: Completion item to format.
+ """
+ raise NotImplementedError
+
+ def complete(self) -> str:
+ """Produce the completion data to send back to the shell.
+
+ By default this calls :meth:`get_completion_args`, gets the
+ completions, then calls :meth:`format_completion` for each
+ completion.
+ """
+ args, incomplete = self.get_completion_args()
+ completions = self.get_completions(args, incomplete)
+ out = [self.format_completion(item) for item in completions]
+ return "\n".join(out)
+
+
+class BashComplete(ShellComplete):
+ """Shell completion for Bash."""
+
+ name = "bash"
+ source_template = _SOURCE_BASH
+
+ @staticmethod
+ def _check_version() -> None:
+ import shutil
+ import subprocess
+
+ bash_exe = shutil.which("bash")
+
+ if bash_exe is None:
+ match = None
+ else:
+ output = subprocess.run(
+ [bash_exe, "--norc", "-c", 'echo "${BASH_VERSION}"'],
+ stdout=subprocess.PIPE,
+ )
+ match = re.search(r"^(\d+)\.(\d+)\.\d+", output.stdout.decode())
+
+ if match is not None:
+ major, minor = match.groups()
+
+ if major < "4" or major == "4" and minor < "4":
+ echo(
+ _(
+ "Shell completion is not supported for Bash"
+ " versions older than 4.4."
+ ),
+ err=True,
+ )
+ else:
+ echo(
+ _("Couldn't detect Bash version, shell completion is not supported."),
+ err=True,
+ )
+
+ def source(self) -> str:
+ self._check_version()
+ return super().source()
+
+ def get_completion_args(self) -> tuple[list[str], str]:
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ cword = int(os.environ["COMP_CWORD"])
+ args = cwords[1:cword]
+
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ""
+
+ return args, incomplete
+
+ def format_completion(self, item: CompletionItem) -> str:
+ return f"{item.type},{item.value}"
+
+
+class ZshComplete(ShellComplete):
+ """Shell completion for Zsh."""
+
+ name = "zsh"
+ source_template = _SOURCE_ZSH
+
+ def get_completion_args(self) -> tuple[list[str], str]:
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ cword = int(os.environ["COMP_CWORD"])
+ args = cwords[1:cword]
+
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ""
+
+ return args, incomplete
+
+ def format_completion(self, item: CompletionItem) -> str:
+ help_ = item.help or "_"
+ # The zsh completion script uses `_describe` on items with help
+ # texts (which splits the item help from the item value at the
+ # first unescaped colon) and `compadd` on items without help
+ # text (which uses the item value as-is and does not support
+ # colon escaping). So escape colons in the item value if and
+ # only if the item help is not the sentinel "_" value, as used
+ # by the completion script.
+ #
+ # (The zsh completion script is potentially widely deployed, and
+ # thus harder to fix than this method.)
+ #
+ # See issue #1812 and issue #2703 for further context.
+ value = item.value.replace(":", r"\:") if help_ != "_" else item.value
+ return f"{item.type}\n{value}\n{help_}"
+
+
+class FishComplete(ShellComplete):
+ """Shell completion for Fish."""
+
+ name = "fish"
+ source_template = _SOURCE_FISH
+
+ def get_completion_args(self) -> tuple[list[str], str]:
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ incomplete = os.environ["COMP_CWORD"]
+ if incomplete:
+ incomplete = split_arg_string(incomplete)[0]
+ args = cwords[1:]
+
+ # Fish stores the partial word in both COMP_WORDS and
+ # COMP_CWORD, remove it from complete args.
+ if incomplete and args and args[-1] == incomplete:
+ args.pop()
+
+ return args, incomplete
+
+ def format_completion(self, item: CompletionItem) -> str:
+ if item.help:
+ return f"{item.type},{item.value}\t{item.help}"
+
+ return f"{item.type},{item.value}"
+
+
+ShellCompleteType = t.TypeVar("ShellCompleteType", bound="type[ShellComplete]")
+
+
+_available_shells: dict[str, type[ShellComplete]] = {
+ "bash": BashComplete,
+ "fish": FishComplete,
+ "zsh": ZshComplete,
+}
+
+
+def add_completion_class(
+ cls: ShellCompleteType, name: str | None = None
+) -> ShellCompleteType:
+ """Register a :class:`ShellComplete` subclass under the given name.
+ The name will be provided by the completion instruction environment
+ variable during completion.
+
+ :param cls: The completion class that will handle completion for the
+ shell.
+ :param name: Name to register the class under. Defaults to the
+ class's ``name`` attribute.
+ """
+ if name is None:
+ name = cls.name
+
+ _available_shells[name] = cls
+
+ return cls
+
+
+def get_completion_class(shell: str) -> type[ShellComplete] | None:
+ """Look up a registered :class:`ShellComplete` subclass by the name
+ provided by the completion instruction environment variable. If the
+ name isn't registered, returns ``None``.
+
+ :param shell: Name the class is registered under.
+ """
+ return _available_shells.get(shell)
+
+
+def split_arg_string(string: str) -> list[str]:
+ """Split an argument string as with :func:`shlex.split`, but don't
+ fail if the string is incomplete. Ignores a missing closing quote or
+ incomplete escape sequence and uses the partial token as-is.
+
+ .. code-block:: python
+
+ split_arg_string("example 'my file")
+ ["example", "my file"]
+
+ split_arg_string("example my\\")
+ ["example", "my"]
+
+ :param string: String to split.
+
+ .. versionchanged:: 8.2
+ Moved to ``shell_completion`` from ``parser``.
+ """
+ import shlex
+
+ lex = shlex.shlex(string, posix=True)
+ lex.whitespace_split = True
+ lex.commenters = ""
+ out = []
+
+ try:
+ for token in lex:
+ out.append(token)
+ except ValueError:
+ # Raised when end-of-string is reached in an invalid state. Use
+ # the partial token as-is. The quote or escape character is in
+ # lex.state, not lex.token.
+ out.append(lex.token)
+
+ return out
+
+
+def _is_incomplete_argument(ctx: Context, param: Parameter) -> bool:
+ """Determine if the given parameter is an argument that can still
+ accept values.
+
+ :param ctx: Invocation context for the command represented by the
+ parsed complete args.
+ :param param: Argument object being checked.
+ """
+ if not isinstance(param, Argument):
+ return False
+
+ assert param.name is not None
+ # Will be None if expose_value is False.
+ value = ctx.params.get(param.name)
+ return (
+ param.nargs == -1
+ or ctx.get_parameter_source(param.name) is not ParameterSource.COMMANDLINE
+ or (
+ param.nargs > 1
+ and isinstance(value, (tuple, list))
+ and len(value) < param.nargs
+ )
+ )
+
+
+def _start_of_option(ctx: Context, value: str) -> bool:
+ """Check if the value looks like the start of an option."""
+ if not value:
+ return False
+
+ c = value[0]
+ return c in ctx._opt_prefixes
+
+
+def _is_incomplete_option(ctx: Context, args: list[str], param: Parameter) -> bool:
+ """Determine if the given parameter is an option that needs a value.
+
+ :param args: List of complete args before the incomplete value.
+ :param param: Option object being checked.
+ """
+ if not isinstance(param, Option):
+ return False
+
+ if param.is_flag or param.count:
+ return False
+
+ last_option = None
+
+ for index, arg in enumerate(reversed(args)):
+ if index + 1 > param.nargs:
+ break
+
+ if _start_of_option(ctx, arg):
+ last_option = arg
+ break
+
+ return last_option is not None and last_option in param.opts
+
+
+def _resolve_context(
+ cli: Command,
+ ctx_args: cabc.MutableMapping[str, t.Any],
+ prog_name: str,
+ args: list[str],
+) -> Context:
+ """Produce the context hierarchy starting with the command and
+ traversing the complete arguments. This only follows the commands,
+ it doesn't trigger input prompts or callbacks.
+
+ :param cli: Command being called.
+ :param prog_name: Name of the executable in the shell.
+ :param args: List of complete args before the incomplete value.
+ """
+ ctx_args["resilient_parsing"] = True
+ with cli.make_context(prog_name, args.copy(), **ctx_args) as ctx:
+ args = ctx._protected_args + ctx.args
+
+ while args:
+ command = ctx.command
+
+ if isinstance(command, Group):
+ if not command.chain:
+ name, cmd, args = command.resolve_command(ctx, args)
+
+ if cmd is None:
+ return ctx
+
+ with cmd.make_context(
+ name, args, parent=ctx, resilient_parsing=True
+ ) as sub_ctx:
+ ctx = sub_ctx
+ args = ctx._protected_args + ctx.args
+ else:
+ sub_ctx = ctx
+
+ while args:
+ name, cmd, args = command.resolve_command(ctx, args)
+
+ if cmd is None:
+ return ctx
+
+ with cmd.make_context(
+ name,
+ args,
+ parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False,
+ resilient_parsing=True,
+ ) as sub_sub_ctx:
+ sub_ctx = sub_sub_ctx
+ args = sub_ctx.args
+
+ ctx = sub_ctx
+ args = [*sub_ctx._protected_args, *sub_ctx.args]
+ else:
+ break
+
+ return ctx
+
+
+def _resolve_incomplete(
+ ctx: Context, args: list[str], incomplete: str
+) -> tuple[Command | Parameter, str]:
+ """Find the Click object that will handle the completion of the
+ incomplete value. Return the object and the incomplete value.
+
+ :param ctx: Invocation context for the command represented by
+ the parsed complete args.
+ :param args: List of complete args before the incomplete value.
+ :param incomplete: Value being completed. May be empty.
+ """
+ # Different shells treat an "=" between a long option name and
+ # value differently. Might keep the value joined, return the "="
+ # as a separate item, or return the split name and value. Always
+ # split and discard the "=" to make completion easier.
+ if incomplete == "=":
+ incomplete = ""
+ elif "=" in incomplete and _start_of_option(ctx, incomplete):
+ name, _, incomplete = incomplete.partition("=")
+ args.append(name)
+
+ # The "--" marker tells Click to stop treating values as options
+ # even if they start with the option character. If it hasn't been
+ # given and the incomplete arg looks like an option, the current
+ # command will provide option name completions.
+ if "--" not in args and _start_of_option(ctx, incomplete):
+ return ctx.command, incomplete
+
+ params = ctx.command.get_params(ctx)
+
+ # If the last complete arg is an option name with an incomplete
+ # value, the option will provide value completions.
+ for param in params:
+ if _is_incomplete_option(ctx, args, param):
+ return param, incomplete
+
+ # It's not an option name or value. The first argument without a
+ # parsed value will provide value completions.
+ for param in params:
+ if _is_incomplete_argument(ctx, param):
+ return param, incomplete
+
+ # There were no unparsed arguments, the command may be a group that
+ # will provide command name completions.
+ return ctx.command, incomplete
diff --git a/tapdown/lib/python3.11/site-packages/click/termui.py b/tapdown/lib/python3.11/site-packages/click/termui.py
new file mode 100644
index 0000000..dcbb222
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/termui.py
@@ -0,0 +1,877 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import inspect
+import io
+import itertools
+import sys
+import typing as t
+from contextlib import AbstractContextManager
+from gettext import gettext as _
+
+from ._compat import isatty
+from ._compat import strip_ansi
+from .exceptions import Abort
+from .exceptions import UsageError
+from .globals import resolve_color_default
+from .types import Choice
+from .types import convert_type
+from .types import ParamType
+from .utils import echo
+from .utils import LazyFile
+
+if t.TYPE_CHECKING:
+ from ._termui_impl import ProgressBar
+
+V = t.TypeVar("V")
+
+# The prompt functions to use. The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func: t.Callable[[str], str] = input
+
+_ansi_colors = {
+ "black": 30,
+ "red": 31,
+ "green": 32,
+ "yellow": 33,
+ "blue": 34,
+ "magenta": 35,
+ "cyan": 36,
+ "white": 37,
+ "reset": 39,
+ "bright_black": 90,
+ "bright_red": 91,
+ "bright_green": 92,
+ "bright_yellow": 93,
+ "bright_blue": 94,
+ "bright_magenta": 95,
+ "bright_cyan": 96,
+ "bright_white": 97,
+}
+_ansi_reset_all = "\033[0m"
+
+
+def hidden_prompt_func(prompt: str) -> str:
+ import getpass
+
+ return getpass.getpass(prompt)
+
+
+def _build_prompt(
+ text: str,
+ suffix: str,
+ show_default: bool = False,
+ default: t.Any | None = None,
+ show_choices: bool = True,
+ type: ParamType | None = None,
+) -> str:
+ prompt = text
+ if type is not None and show_choices and isinstance(type, Choice):
+ prompt += f" ({', '.join(map(str, type.choices))})"
+ if default is not None and show_default:
+ prompt = f"{prompt} [{_format_default(default)}]"
+ return f"{prompt}{suffix}"
+
+
+def _format_default(default: t.Any) -> t.Any:
+ if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
+ return default.name
+
+ return default
+
+
+def prompt(
+ text: str,
+ default: t.Any | None = None,
+ hide_input: bool = False,
+ confirmation_prompt: bool | str = False,
+ type: ParamType | t.Any | None = None,
+ value_proc: t.Callable[[str], t.Any] | None = None,
+ prompt_suffix: str = ": ",
+ show_default: bool = True,
+ err: bool = False,
+ show_choices: bool = True,
+) -> t.Any:
+ """Prompts a user for input. This is a convenience function that can
+ be used to prompt a user for input later.
+
+ If the user aborts the input by sending an interrupt signal, this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ :param text: the text to show for the prompt.
+ :param default: the default value to use if no input happens. If this
+ is not given it will prompt until it's aborted.
+ :param hide_input: if this is set to true then the input value will
+ be hidden.
+ :param confirmation_prompt: Prompt a second time to confirm the
+ value. Can be set to a string instead of ``True`` to customize
+ the message.
+ :param type: the type to use to check the value against.
+ :param value_proc: if this parameter is provided it's a function that
+ is invoked instead of the type conversion to
+ convert a value.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ :param show_choices: Show or hide choices if the passed type is a Choice.
+ For example if type is a Choice of either day or week,
+ show_choices is true and text is "Group by" then the
+ prompt will be "Group by (day, week): ".
+
+ .. versionadded:: 8.0
+ ``confirmation_prompt`` can be a custom string.
+
+ .. versionadded:: 7.0
+ Added the ``show_choices`` parameter.
+
+ .. versionadded:: 6.0
+ Added unicode support for cmd.exe on Windows.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ """
+
+ def prompt_func(text: str) -> str:
+ f = hidden_prompt_func if hide_input else visible_prompt_func
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(text.rstrip(" "), nl=False, err=err)
+ # Echo a space to stdout to work around an issue where
+ # readline causes backspace to clear the whole line.
+ return f(" ")
+ except (KeyboardInterrupt, EOFError):
+ # getpass doesn't print a newline if the user aborts input with ^C.
+ # Allegedly this behavior is inherited from getpass(3).
+ # A doc bug has been filed at https://bugs.python.org/issue24711
+ if hide_input:
+ echo(None, err=err)
+ raise Abort() from None
+
+ if value_proc is None:
+ value_proc = convert_type(type, default)
+
+ prompt = _build_prompt(
+ text, prompt_suffix, show_default, default, show_choices, type
+ )
+
+ if confirmation_prompt:
+ if confirmation_prompt is True:
+ confirmation_prompt = _("Repeat for confirmation")
+
+ confirmation_prompt = _build_prompt(confirmation_prompt, prompt_suffix)
+
+ while True:
+ while True:
+ value = prompt_func(prompt)
+ if value:
+ break
+ elif default is not None:
+ value = default
+ break
+ try:
+ result = value_proc(value)
+ except UsageError as e:
+ if hide_input:
+ echo(_("Error: The value you entered was invalid."), err=err)
+ else:
+ echo(_("Error: {e.message}").format(e=e), err=err)
+ continue
+ if not confirmation_prompt:
+ return result
+ while True:
+ value2 = prompt_func(confirmation_prompt)
+ is_empty = not value and not value2
+ if value2 or is_empty:
+ break
+ if value == value2:
+ return result
+ echo(_("Error: The two entered values do not match."), err=err)
+
+
+def confirm(
+ text: str,
+ default: bool | None = False,
+ abort: bool = False,
+ prompt_suffix: str = ": ",
+ show_default: bool = True,
+ err: bool = False,
+) -> bool:
+ """Prompts for confirmation (yes/no question).
+
+ If the user aborts the input by sending a interrupt signal this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ :param text: the question to ask.
+ :param default: The default value to use when no input is given. If
+ ``None``, repeat until input is given.
+ :param abort: if this is set to `True` a negative answer aborts the
+ exception by raising :exc:`Abort`.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+
+ .. versionchanged:: 8.0
+ Repeat until input is given if ``default`` is ``None``.
+
+ .. versionadded:: 4.0
+ Added the ``err`` parameter.
+ """
+ prompt = _build_prompt(
+ text,
+ prompt_suffix,
+ show_default,
+ "y/n" if default is None else ("Y/n" if default else "y/N"),
+ )
+
+ while True:
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(prompt.rstrip(" "), nl=False, err=err)
+ # Echo a space to stdout to work around an issue where
+ # readline causes backspace to clear the whole line.
+ value = visible_prompt_func(" ").lower().strip()
+ except (KeyboardInterrupt, EOFError):
+ raise Abort() from None
+ if value in ("y", "yes"):
+ rv = True
+ elif value in ("n", "no"):
+ rv = False
+ elif default is not None and value == "":
+ rv = default
+ else:
+ echo(_("Error: invalid input"), err=err)
+ continue
+ break
+ if abort and not rv:
+ raise Abort()
+ return rv
+
+
+def echo_via_pager(
+ text_or_generator: cabc.Iterable[str] | t.Callable[[], cabc.Iterable[str]] | str,
+ color: bool | None = None,
+) -> None:
+ """This function takes a text and shows it via an environment specific
+ pager on stdout.
+
+ .. versionchanged:: 3.0
+ Added the `color` flag.
+
+ :param text_or_generator: the text to page, or alternatively, a
+ generator emitting the text to page.
+ :param color: controls if the pager supports ANSI colors or not. The
+ default is autodetection.
+ """
+ color = resolve_color_default(color)
+
+ if inspect.isgeneratorfunction(text_or_generator):
+ i = t.cast("t.Callable[[], cabc.Iterable[str]]", text_or_generator)()
+ elif isinstance(text_or_generator, str):
+ i = [text_or_generator]
+ else:
+ i = iter(t.cast("cabc.Iterable[str]", text_or_generator))
+
+ # convert every element of i to a text type if necessary
+ text_generator = (el if isinstance(el, str) else str(el) for el in i)
+
+ from ._termui_impl import pager
+
+ return pager(itertools.chain(text_generator, "\n"), color)
+
+
+@t.overload
+def progressbar(
+ *,
+ length: int,
+ label: str | None = None,
+ hidden: bool = False,
+ show_eta: bool = True,
+ show_percent: bool | None = None,
+ show_pos: bool = False,
+ fill_char: str = "#",
+ empty_char: str = "-",
+ bar_template: str = "%(label)s [%(bar)s] %(info)s",
+ info_sep: str = " ",
+ width: int = 36,
+ file: t.TextIO | None = None,
+ color: bool | None = None,
+ update_min_steps: int = 1,
+) -> ProgressBar[int]: ...
+
+
+@t.overload
+def progressbar(
+ iterable: cabc.Iterable[V] | None = None,
+ length: int | None = None,
+ label: str | None = None,
+ hidden: bool = False,
+ show_eta: bool = True,
+ show_percent: bool | None = None,
+ show_pos: bool = False,
+ item_show_func: t.Callable[[V | None], str | None] | None = None,
+ fill_char: str = "#",
+ empty_char: str = "-",
+ bar_template: str = "%(label)s [%(bar)s] %(info)s",
+ info_sep: str = " ",
+ width: int = 36,
+ file: t.TextIO | None = None,
+ color: bool | None = None,
+ update_min_steps: int = 1,
+) -> ProgressBar[V]: ...
+
+
+def progressbar(
+ iterable: cabc.Iterable[V] | None = None,
+ length: int | None = None,
+ label: str | None = None,
+ hidden: bool = False,
+ show_eta: bool = True,
+ show_percent: bool | None = None,
+ show_pos: bool = False,
+ item_show_func: t.Callable[[V | None], str | None] | None = None,
+ fill_char: str = "#",
+ empty_char: str = "-",
+ bar_template: str = "%(label)s [%(bar)s] %(info)s",
+ info_sep: str = " ",
+ width: int = 36,
+ file: t.TextIO | None = None,
+ color: bool | None = None,
+ update_min_steps: int = 1,
+) -> ProgressBar[V]:
+ """This function creates an iterable context manager that can be used
+ to iterate over something while showing a progress bar. It will
+ either iterate over the `iterable` or `length` items (that are counted
+ up). While iteration happens, this function will print a rendered
+ progress bar to the given `file` (defaults to stdout) and will attempt
+ to calculate remaining time and more. By default, this progress bar
+ will not be rendered if the file is not a terminal.
+
+ The context manager creates the progress bar. When the context
+ manager is entered the progress bar is already created. With every
+ iteration over the progress bar, the iterable passed to the bar is
+ advanced and the bar is updated. When the context manager exits,
+ a newline is printed and the progress bar is finalized on screen.
+
+ Note: The progress bar is currently designed for use cases where the
+ total progress can be expected to take at least several seconds.
+ Because of this, the ProgressBar class object won't display
+ progress that is considered too fast, and progress where the time
+ between steps is less than a second.
+
+ No printing must happen or the progress bar will be unintentionally
+ destroyed.
+
+ Example usage::
+
+ with progressbar(items) as bar:
+ for item in bar:
+ do_something_with(item)
+
+ Alternatively, if no iterable is specified, one can manually update the
+ progress bar through the `update()` method instead of directly
+ iterating over the progress bar. The update method accepts the number
+ of steps to increment the bar with::
+
+ with progressbar(length=chunks.total_bytes) as bar:
+ for chunk in chunks:
+ process_chunk(chunk)
+ bar.update(chunks.bytes)
+
+ The ``update()`` method also takes an optional value specifying the
+ ``current_item`` at the new position. This is useful when used
+ together with ``item_show_func`` to customize the output for each
+ manual step::
+
+ with click.progressbar(
+ length=total_size,
+ label='Unzipping archive',
+ item_show_func=lambda a: a.filename
+ ) as bar:
+ for archive in zip_file:
+ archive.extract()
+ bar.update(archive.size, archive)
+
+ :param iterable: an iterable to iterate over. If not provided the length
+ is required.
+ :param length: the number of items to iterate over. By default the
+ progressbar will attempt to ask the iterator about its
+ length, which might or might not work. If an iterable is
+ also provided this parameter can be used to override the
+ length. If an iterable is not provided the progress bar
+ will iterate over a range of that length.
+ :param label: the label to show next to the progress bar.
+ :param hidden: hide the progressbar. Defaults to ``False``. When no tty is
+ detected, it will only print the progressbar label. Setting this to
+ ``False`` also disables that.
+ :param show_eta: enables or disables the estimated time display. This is
+ automatically disabled if the length cannot be
+ determined.
+ :param show_percent: enables or disables the percentage display. The
+ default is `True` if the iterable has a length or
+ `False` if not.
+ :param show_pos: enables or disables the absolute position display. The
+ default is `False`.
+ :param item_show_func: A function called with the current item which
+ can return a string to show next to the progress bar. If the
+ function returns ``None`` nothing is shown. The current item can
+ be ``None``, such as when entering and exiting the bar.
+ :param fill_char: the character to use to show the filled part of the
+ progress bar.
+ :param empty_char: the character to use to show the non-filled part of
+ the progress bar.
+ :param bar_template: the format string to use as template for the bar.
+ The parameters in it are ``label`` for the label,
+ ``bar`` for the progress bar and ``info`` for the
+ info section.
+ :param info_sep: the separator between multiple info items (eta etc.)
+ :param width: the width of the progress bar in characters, 0 means full
+ terminal width
+ :param file: The file to write to. If this is not a terminal then
+ only the label is printed.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are included anywhere in the progress bar output
+ which is not the case by default.
+ :param update_min_steps: Render only when this many updates have
+ completed. This allows tuning for very fast iterators.
+
+ .. versionadded:: 8.2
+ The ``hidden`` argument.
+
+ .. versionchanged:: 8.0
+ Output is shown even if execution time is less than 0.5 seconds.
+
+ .. versionchanged:: 8.0
+ ``item_show_func`` shows the current item, not the previous one.
+
+ .. versionchanged:: 8.0
+ Labels are echoed if the output is not a TTY. Reverts a change
+ in 7.0 that removed all output.
+
+ .. versionadded:: 8.0
+ The ``update_min_steps`` parameter.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter and ``update`` method.
+
+ .. versionadded:: 2.0
+ """
+ from ._termui_impl import ProgressBar
+
+ color = resolve_color_default(color)
+ return ProgressBar(
+ iterable=iterable,
+ length=length,
+ hidden=hidden,
+ show_eta=show_eta,
+ show_percent=show_percent,
+ show_pos=show_pos,
+ item_show_func=item_show_func,
+ fill_char=fill_char,
+ empty_char=empty_char,
+ bar_template=bar_template,
+ info_sep=info_sep,
+ file=file,
+ label=label,
+ width=width,
+ color=color,
+ update_min_steps=update_min_steps,
+ )
+
+
+def clear() -> None:
+ """Clears the terminal screen. This will have the effect of clearing
+ the whole visible space of the terminal and moving the cursor to the
+ top left. This does not do anything if not connected to a terminal.
+
+ .. versionadded:: 2.0
+ """
+ if not isatty(sys.stdout):
+ return
+
+ # ANSI escape \033[2J clears the screen, \033[1;1H moves the cursor
+ echo("\033[2J\033[1;1H", nl=False)
+
+
+def _interpret_color(color: int | tuple[int, int, int] | str, offset: int = 0) -> str:
+ if isinstance(color, int):
+ return f"{38 + offset};5;{color:d}"
+
+ if isinstance(color, (tuple, list)):
+ r, g, b = color
+ return f"{38 + offset};2;{r:d};{g:d};{b:d}"
+
+ return str(_ansi_colors[color] + offset)
+
+
+def style(
+ text: t.Any,
+ fg: int | tuple[int, int, int] | str | None = None,
+ bg: int | tuple[int, int, int] | str | None = None,
+ bold: bool | None = None,
+ dim: bool | None = None,
+ underline: bool | None = None,
+ overline: bool | None = None,
+ italic: bool | None = None,
+ blink: bool | None = None,
+ reverse: bool | None = None,
+ strikethrough: bool | None = None,
+ reset: bool = True,
+) -> str:
+ """Styles a text with ANSI styles and returns the new string. By
+ default the styling is self contained which means that at the end
+ of the string a reset code is issued. This can be prevented by
+ passing ``reset=False``.
+
+ Examples::
+
+ click.echo(click.style('Hello World!', fg='green'))
+ click.echo(click.style('ATTENTION!', blink=True))
+ click.echo(click.style('Some things', reverse=True, fg='cyan'))
+ click.echo(click.style('More colors', fg=(255, 12, 128), bg=117))
+
+ Supported color names:
+
+ * ``black`` (might be a gray)
+ * ``red``
+ * ``green``
+ * ``yellow`` (might be an orange)
+ * ``blue``
+ * ``magenta``
+ * ``cyan``
+ * ``white`` (might be light gray)
+ * ``bright_black``
+ * ``bright_red``
+ * ``bright_green``
+ * ``bright_yellow``
+ * ``bright_blue``
+ * ``bright_magenta``
+ * ``bright_cyan``
+ * ``bright_white``
+ * ``reset`` (reset the color code only)
+
+ If the terminal supports it, color may also be specified as:
+
+ - An integer in the interval [0, 255]. The terminal must support
+ 8-bit/256-color mode.
+ - An RGB tuple of three integers in [0, 255]. The terminal must
+ support 24-bit/true-color mode.
+
+ See https://en.wikipedia.org/wiki/ANSI_color and
+ https://gist.github.com/XVilka/8346728 for more information.
+
+ :param text: the string to style with ansi codes.
+ :param fg: if provided this will become the foreground color.
+ :param bg: if provided this will become the background color.
+ :param bold: if provided this will enable or disable bold mode.
+ :param dim: if provided this will enable or disable dim mode. This is
+ badly supported.
+ :param underline: if provided this will enable or disable underline.
+ :param overline: if provided this will enable or disable overline.
+ :param italic: if provided this will enable or disable italic.
+ :param blink: if provided this will enable or disable blinking.
+ :param reverse: if provided this will enable or disable inverse
+ rendering (foreground becomes background and the
+ other way round).
+ :param strikethrough: if provided this will enable or disable
+ striking through text.
+ :param reset: by default a reset-all code is added at the end of the
+ string which means that styles do not carry over. This
+ can be disabled to compose styles.
+
+ .. versionchanged:: 8.0
+ A non-string ``message`` is converted to a string.
+
+ .. versionchanged:: 8.0
+ Added support for 256 and RGB color codes.
+
+ .. versionchanged:: 8.0
+ Added the ``strikethrough``, ``italic``, and ``overline``
+ parameters.
+
+ .. versionchanged:: 7.0
+ Added support for bright colors.
+
+ .. versionadded:: 2.0
+ """
+ if not isinstance(text, str):
+ text = str(text)
+
+ bits = []
+
+ if fg:
+ try:
+ bits.append(f"\033[{_interpret_color(fg)}m")
+ except KeyError:
+ raise TypeError(f"Unknown color {fg!r}") from None
+
+ if bg:
+ try:
+ bits.append(f"\033[{_interpret_color(bg, 10)}m")
+ except KeyError:
+ raise TypeError(f"Unknown color {bg!r}") from None
+
+ if bold is not None:
+ bits.append(f"\033[{1 if bold else 22}m")
+ if dim is not None:
+ bits.append(f"\033[{2 if dim else 22}m")
+ if underline is not None:
+ bits.append(f"\033[{4 if underline else 24}m")
+ if overline is not None:
+ bits.append(f"\033[{53 if overline else 55}m")
+ if italic is not None:
+ bits.append(f"\033[{3 if italic else 23}m")
+ if blink is not None:
+ bits.append(f"\033[{5 if blink else 25}m")
+ if reverse is not None:
+ bits.append(f"\033[{7 if reverse else 27}m")
+ if strikethrough is not None:
+ bits.append(f"\033[{9 if strikethrough else 29}m")
+ bits.append(text)
+ if reset:
+ bits.append(_ansi_reset_all)
+ return "".join(bits)
+
+
+def unstyle(text: str) -> str:
+ """Removes ANSI styling information from a string. Usually it's not
+ necessary to use this function as Click's echo function will
+ automatically remove styling if necessary.
+
+ .. versionadded:: 2.0
+
+ :param text: the text to remove style information from.
+ """
+ return strip_ansi(text)
+
+
+def secho(
+ message: t.Any | None = None,
+ file: t.IO[t.AnyStr] | None = None,
+ nl: bool = True,
+ err: bool = False,
+ color: bool | None = None,
+ **styles: t.Any,
+) -> None:
+ """This function combines :func:`echo` and :func:`style` into one
+ call. As such the following two calls are the same::
+
+ click.secho('Hello World!', fg='green')
+ click.echo(click.style('Hello World!', fg='green'))
+
+ All keyword arguments are forwarded to the underlying functions
+ depending on which one they go with.
+
+ Non-string types will be converted to :class:`str`. However,
+ :class:`bytes` are passed directly to :meth:`echo` without applying
+ style. If you want to style bytes that represent text, call
+ :meth:`bytes.decode` first.
+
+ .. versionchanged:: 8.0
+ A non-string ``message`` is converted to a string. Bytes are
+ passed through without style applied.
+
+ .. versionadded:: 2.0
+ """
+ if message is not None and not isinstance(message, (bytes, bytearray)):
+ message = style(message, **styles)
+
+ return echo(message, file=file, nl=nl, err=err, color=color)
+
+
+@t.overload
+def edit(
+ text: bytes | bytearray,
+ editor: str | None = None,
+ env: cabc.Mapping[str, str] | None = None,
+ require_save: bool = False,
+ extension: str = ".txt",
+) -> bytes | None: ...
+
+
+@t.overload
+def edit(
+ text: str,
+ editor: str | None = None,
+ env: cabc.Mapping[str, str] | None = None,
+ require_save: bool = True,
+ extension: str = ".txt",
+) -> str | None: ...
+
+
+@t.overload
+def edit(
+ text: None = None,
+ editor: str | None = None,
+ env: cabc.Mapping[str, str] | None = None,
+ require_save: bool = True,
+ extension: str = ".txt",
+ filename: str | cabc.Iterable[str] | None = None,
+) -> None: ...
+
+
+def edit(
+ text: str | bytes | bytearray | None = None,
+ editor: str | None = None,
+ env: cabc.Mapping[str, str] | None = None,
+ require_save: bool = True,
+ extension: str = ".txt",
+ filename: str | cabc.Iterable[str] | None = None,
+) -> str | bytes | bytearray | None:
+ r"""Edits the given text in the defined editor. If an editor is given
+ (should be the full path to the executable but the regular operating
+ system search path is used for finding the executable) it overrides
+ the detected editor. Optionally, some environment variables can be
+ used. If the editor is closed without changes, `None` is returned. In
+ case a file is edited directly the return value is always `None` and
+ `require_save` and `extension` are ignored.
+
+ If the editor cannot be opened a :exc:`UsageError` is raised.
+
+ Note for Windows: to simplify cross-platform usage, the newlines are
+ automatically converted from POSIX to Windows and vice versa. As such,
+ the message here will have ``\n`` as newline markers.
+
+ :param text: the text to edit.
+ :param editor: optionally the editor to use. Defaults to automatic
+ detection.
+ :param env: environment variables to forward to the editor.
+ :param require_save: if this is true, then not saving in the editor
+ will make the return value become `None`.
+ :param extension: the extension to tell the editor about. This defaults
+ to `.txt` but changing this might change syntax
+ highlighting.
+ :param filename: if provided it will edit this file instead of the
+ provided text contents. It will not use a temporary
+ file as an indirection in that case. If the editor supports
+ editing multiple files at once, a sequence of files may be
+ passed as well. Invoke `click.file` once per file instead
+ if multiple files cannot be managed at once or editing the
+ files serially is desired.
+
+ .. versionchanged:: 8.2.0
+ ``filename`` now accepts any ``Iterable[str]`` in addition to a ``str``
+ if the ``editor`` supports editing multiple files at once.
+
+ """
+ from ._termui_impl import Editor
+
+ ed = Editor(editor=editor, env=env, require_save=require_save, extension=extension)
+
+ if filename is None:
+ return ed.edit(text)
+
+ if isinstance(filename, str):
+ filename = (filename,)
+
+ ed.edit_files(filenames=filename)
+ return None
+
+
+def launch(url: str, wait: bool = False, locate: bool = False) -> int:
+ """This function launches the given URL (or filename) in the default
+ viewer application for this file type. If this is an executable, it
+ might launch the executable in a new session. The return value is
+ the exit code of the launched application. Usually, ``0`` indicates
+ success.
+
+ Examples::
+
+ click.launch('https://click.palletsprojects.com/')
+ click.launch('/my/downloaded/file', locate=True)
+
+ .. versionadded:: 2.0
+
+ :param url: URL or filename of the thing to launch.
+ :param wait: Wait for the program to exit before returning. This
+ only works if the launched program blocks. In particular,
+ ``xdg-open`` on Linux does not block.
+ :param locate: if this is set to `True` then instead of launching the
+ application associated with the URL it will attempt to
+ launch a file manager with the file located. This
+ might have weird effects if the URL does not point to
+ the filesystem.
+ """
+ from ._termui_impl import open_url
+
+ return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead. This is used
+# for unittesting purposes.
+_getchar: t.Callable[[bool], str] | None = None
+
+
+def getchar(echo: bool = False) -> str:
+ """Fetches a single character from the terminal and returns it. This
+ will always return a unicode character and under certain rare
+ circumstances this might return more than one character. The
+ situations which more than one character is returned is when for
+ whatever reason multiple characters end up in the terminal buffer or
+ standard input was not actually a terminal.
+
+ Note that this will always read from the terminal, even if something
+ is piped into the standard input.
+
+ Note for Windows: in rare cases when typing non-ASCII characters, this
+ function might wait for a second character and then return both at once.
+ This is because certain Unicode characters look like special-key markers.
+
+ .. versionadded:: 2.0
+
+ :param echo: if set to `True`, the character read will also show up on
+ the terminal. The default is to not show it.
+ """
+ global _getchar
+
+ if _getchar is None:
+ from ._termui_impl import getchar as f
+
+ _getchar = f
+
+ return _getchar(echo)
+
+
+def raw_terminal() -> AbstractContextManager[int]:
+ from ._termui_impl import raw_terminal as f
+
+ return f()
+
+
+def pause(info: str | None = None, err: bool = False) -> None:
+ """This command stops execution and waits for the user to press any
+ key to continue. This is similar to the Windows batch "pause"
+ command. If the program is not run through a terminal, this command
+ will instead do nothing.
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param info: The message to print before pausing. Defaults to
+ ``"Press any key to continue..."``.
+ :param err: if set to message goes to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ if not isatty(sys.stdin) or not isatty(sys.stdout):
+ return
+
+ if info is None:
+ info = _("Press any key to continue...")
+
+ try:
+ if info:
+ echo(info, nl=False, err=err)
+ try:
+ getchar()
+ except (KeyboardInterrupt, EOFError):
+ pass
+ finally:
+ if info:
+ echo(err=err)
diff --git a/tapdown/lib/python3.11/site-packages/click/testing.py b/tapdown/lib/python3.11/site-packages/click/testing.py
new file mode 100644
index 0000000..f6f60b8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/testing.py
@@ -0,0 +1,577 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import contextlib
+import io
+import os
+import shlex
+import sys
+import tempfile
+import typing as t
+from types import TracebackType
+
+from . import _compat
+from . import formatting
+from . import termui
+from . import utils
+from ._compat import _find_binary_reader
+
+if t.TYPE_CHECKING:
+ from _typeshed import ReadableBuffer
+
+ from .core import Command
+
+
+class EchoingStdin:
+ def __init__(self, input: t.BinaryIO, output: t.BinaryIO) -> None:
+ self._input = input
+ self._output = output
+ self._paused = False
+
+ def __getattr__(self, x: str) -> t.Any:
+ return getattr(self._input, x)
+
+ def _echo(self, rv: bytes) -> bytes:
+ if not self._paused:
+ self._output.write(rv)
+
+ return rv
+
+ def read(self, n: int = -1) -> bytes:
+ return self._echo(self._input.read(n))
+
+ def read1(self, n: int = -1) -> bytes:
+ return self._echo(self._input.read1(n)) # type: ignore
+
+ def readline(self, n: int = -1) -> bytes:
+ return self._echo(self._input.readline(n))
+
+ def readlines(self) -> list[bytes]:
+ return [self._echo(x) for x in self._input.readlines()]
+
+ def __iter__(self) -> cabc.Iterator[bytes]:
+ return iter(self._echo(x) for x in self._input)
+
+ def __repr__(self) -> str:
+ return repr(self._input)
+
+
+@contextlib.contextmanager
+def _pause_echo(stream: EchoingStdin | None) -> cabc.Iterator[None]:
+ if stream is None:
+ yield
+ else:
+ stream._paused = True
+ yield
+ stream._paused = False
+
+
+class BytesIOCopy(io.BytesIO):
+ """Patch ``io.BytesIO`` to let the written stream be copied to another.
+
+ .. versionadded:: 8.2
+ """
+
+ def __init__(self, copy_to: io.BytesIO) -> None:
+ super().__init__()
+ self.copy_to = copy_to
+
+ def flush(self) -> None:
+ super().flush()
+ self.copy_to.flush()
+
+ def write(self, b: ReadableBuffer) -> int:
+ self.copy_to.write(b)
+ return super().write(b)
+
+
+class StreamMixer:
+ """Mixes `` and `` streams.
+
+ The result is available in the ``output`` attribute.
+
+ .. versionadded:: 8.2
+ """
+
+ def __init__(self) -> None:
+ self.output: io.BytesIO = io.BytesIO()
+ self.stdout: io.BytesIO = BytesIOCopy(copy_to=self.output)
+ self.stderr: io.BytesIO = BytesIOCopy(copy_to=self.output)
+
+ def __del__(self) -> None:
+ """
+ Guarantee that embedded file-like objects are closed in a
+ predictable order, protecting against races between
+ self.output being closed and other streams being flushed on close
+
+ .. versionadded:: 8.2.2
+ """
+ self.stderr.close()
+ self.stdout.close()
+ self.output.close()
+
+
+class _NamedTextIOWrapper(io.TextIOWrapper):
+ def __init__(
+ self, buffer: t.BinaryIO, name: str, mode: str, **kwargs: t.Any
+ ) -> None:
+ super().__init__(buffer, **kwargs)
+ self._name = name
+ self._mode = mode
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @property
+ def mode(self) -> str:
+ return self._mode
+
+
+def make_input_stream(
+ input: str | bytes | t.IO[t.Any] | None, charset: str
+) -> t.BinaryIO:
+ # Is already an input stream.
+ if hasattr(input, "read"):
+ rv = _find_binary_reader(t.cast("t.IO[t.Any]", input))
+
+ if rv is not None:
+ return rv
+
+ raise TypeError("Could not find binary reader for input stream.")
+
+ if input is None:
+ input = b""
+ elif isinstance(input, str):
+ input = input.encode(charset)
+
+ return io.BytesIO(input)
+
+
+class Result:
+ """Holds the captured result of an invoked CLI script.
+
+ :param runner: The runner that created the result
+ :param stdout_bytes: The standard output as bytes.
+ :param stderr_bytes: The standard error as bytes.
+ :param output_bytes: A mix of ``stdout_bytes`` and ``stderr_bytes``, as the
+ user would see it in its terminal.
+ :param return_value: The value returned from the invoked command.
+ :param exit_code: The exit code as integer.
+ :param exception: The exception that happened if one did.
+ :param exc_info: Exception information (exception type, exception instance,
+ traceback type).
+
+ .. versionchanged:: 8.2
+ ``stderr_bytes`` no longer optional, ``output_bytes`` introduced and
+ ``mix_stderr`` has been removed.
+
+ .. versionadded:: 8.0
+ Added ``return_value``.
+ """
+
+ def __init__(
+ self,
+ runner: CliRunner,
+ stdout_bytes: bytes,
+ stderr_bytes: bytes,
+ output_bytes: bytes,
+ return_value: t.Any,
+ exit_code: int,
+ exception: BaseException | None,
+ exc_info: tuple[type[BaseException], BaseException, TracebackType]
+ | None = None,
+ ):
+ self.runner = runner
+ self.stdout_bytes = stdout_bytes
+ self.stderr_bytes = stderr_bytes
+ self.output_bytes = output_bytes
+ self.return_value = return_value
+ self.exit_code = exit_code
+ self.exception = exception
+ self.exc_info = exc_info
+
+ @property
+ def output(self) -> str:
+ """The terminal output as unicode string, as the user would see it.
+
+ .. versionchanged:: 8.2
+ No longer a proxy for ``self.stdout``. Now has its own independent stream
+ that is mixing `` and ``, in the order they were written.
+ """
+ return self.output_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ @property
+ def stdout(self) -> str:
+ """The standard output as unicode string."""
+ return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ @property
+ def stderr(self) -> str:
+ """The standard error as unicode string.
+
+ .. versionchanged:: 8.2
+ No longer raise an exception, always returns the `` string.
+ """
+ return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ def __repr__(self) -> str:
+ exc_str = repr(self.exception) if self.exception else "okay"
+ return f"<{type(self).__name__} {exc_str}>"
+
+
+class CliRunner:
+ """The CLI runner provides functionality to invoke a Click command line
+ script for unittesting purposes in a isolated environment. This only
+ works in single-threaded systems without any concurrency as it changes the
+ global interpreter state.
+
+ :param charset: the character set for the input and output data.
+ :param env: a dictionary with environment variables for overriding.
+ :param echo_stdin: if this is set to `True`, then reading from `` writes
+ to ``. This is useful for showing examples in
+ some circumstances. Note that regular prompts
+ will automatically echo the input.
+ :param catch_exceptions: Whether to catch any exceptions other than
+ ``SystemExit`` when running :meth:`~CliRunner.invoke`.
+
+ .. versionchanged:: 8.2
+ Added the ``catch_exceptions`` parameter.
+
+ .. versionchanged:: 8.2
+ ``mix_stderr`` parameter has been removed.
+ """
+
+ def __init__(
+ self,
+ charset: str = "utf-8",
+ env: cabc.Mapping[str, str | None] | None = None,
+ echo_stdin: bool = False,
+ catch_exceptions: bool = True,
+ ) -> None:
+ self.charset = charset
+ self.env: cabc.Mapping[str, str | None] = env or {}
+ self.echo_stdin = echo_stdin
+ self.catch_exceptions = catch_exceptions
+
+ def get_default_prog_name(self, cli: Command) -> str:
+ """Given a command object it will return the default program name
+ for it. The default is the `name` attribute or ``"root"`` if not
+ set.
+ """
+ return cli.name or "root"
+
+ def make_env(
+ self, overrides: cabc.Mapping[str, str | None] | None = None
+ ) -> cabc.Mapping[str, str | None]:
+ """Returns the environment overrides for invoking a script."""
+ rv = dict(self.env)
+ if overrides:
+ rv.update(overrides)
+ return rv
+
+ @contextlib.contextmanager
+ def isolation(
+ self,
+ input: str | bytes | t.IO[t.Any] | None = None,
+ env: cabc.Mapping[str, str | None] | None = None,
+ color: bool = False,
+ ) -> cabc.Iterator[tuple[io.BytesIO, io.BytesIO, io.BytesIO]]:
+ """A context manager that sets up the isolation for invoking of a
+ command line tool. This sets up `` with the given input data
+ and `os.environ` with the overrides from the given dictionary.
+ This also rebinds some internals in Click to be mocked (like the
+ prompt functionality).
+
+ This is automatically done in the :meth:`invoke` method.
+
+ :param input: the input stream to put into `sys.stdin`.
+ :param env: the environment overrides as dictionary.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+
+ .. versionadded:: 8.2
+ An additional output stream is returned, which is a mix of
+ `` and `` streams.
+
+ .. versionchanged:: 8.2
+ Always returns the `` stream.
+
+ .. versionchanged:: 8.0
+ `` is opened with ``errors="backslashreplace"``
+ instead of the default ``"strict"``.
+
+ .. versionchanged:: 4.0
+ Added the ``color`` parameter.
+ """
+ bytes_input = make_input_stream(input, self.charset)
+ echo_input = None
+
+ old_stdin = sys.stdin
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ old_forced_width = formatting.FORCED_WIDTH
+ formatting.FORCED_WIDTH = 80
+
+ env = self.make_env(env)
+
+ stream_mixer = StreamMixer()
+
+ if self.echo_stdin:
+ bytes_input = echo_input = t.cast(
+ t.BinaryIO, EchoingStdin(bytes_input, stream_mixer.stdout)
+ )
+
+ sys.stdin = text_input = _NamedTextIOWrapper(
+ bytes_input, encoding=self.charset, name="", mode="r"
+ )
+
+ if self.echo_stdin:
+ # Force unbuffered reads, otherwise TextIOWrapper reads a
+ # large chunk which is echoed early.
+ text_input._CHUNK_SIZE = 1 # type: ignore
+
+ sys.stdout = _NamedTextIOWrapper(
+ stream_mixer.stdout, encoding=self.charset, name="", mode="w"
+ )
+
+ sys.stderr = _NamedTextIOWrapper(
+ stream_mixer.stderr,
+ encoding=self.charset,
+ name="",
+ mode="w",
+ errors="backslashreplace",
+ )
+
+ @_pause_echo(echo_input) # type: ignore
+ def visible_input(prompt: str | None = None) -> str:
+ sys.stdout.write(prompt or "")
+ try:
+ val = next(text_input).rstrip("\r\n")
+ except StopIteration as e:
+ raise EOFError() from e
+ sys.stdout.write(f"{val}\n")
+ sys.stdout.flush()
+ return val
+
+ @_pause_echo(echo_input) # type: ignore
+ def hidden_input(prompt: str | None = None) -> str:
+ sys.stdout.write(f"{prompt or ''}\n")
+ sys.stdout.flush()
+ try:
+ return next(text_input).rstrip("\r\n")
+ except StopIteration as e:
+ raise EOFError() from e
+
+ @_pause_echo(echo_input) # type: ignore
+ def _getchar(echo: bool) -> str:
+ char = sys.stdin.read(1)
+
+ if echo:
+ sys.stdout.write(char)
+
+ sys.stdout.flush()
+ return char
+
+ default_color = color
+
+ def should_strip_ansi(
+ stream: t.IO[t.Any] | None = None, color: bool | None = None
+ ) -> bool:
+ if color is None:
+ return not default_color
+ return not color
+
+ old_visible_prompt_func = termui.visible_prompt_func
+ old_hidden_prompt_func = termui.hidden_prompt_func
+ old__getchar_func = termui._getchar
+ old_should_strip_ansi = utils.should_strip_ansi # type: ignore
+ old__compat_should_strip_ansi = _compat.should_strip_ansi
+ termui.visible_prompt_func = visible_input
+ termui.hidden_prompt_func = hidden_input
+ termui._getchar = _getchar
+ utils.should_strip_ansi = should_strip_ansi # type: ignore
+ _compat.should_strip_ansi = should_strip_ansi
+
+ old_env = {}
+ try:
+ for key, value in env.items():
+ old_env[key] = os.environ.get(key)
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ yield (stream_mixer.stdout, stream_mixer.stderr, stream_mixer.output)
+ finally:
+ for key, value in old_env.items():
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+ sys.stdin = old_stdin
+ termui.visible_prompt_func = old_visible_prompt_func
+ termui.hidden_prompt_func = old_hidden_prompt_func
+ termui._getchar = old__getchar_func
+ utils.should_strip_ansi = old_should_strip_ansi # type: ignore
+ _compat.should_strip_ansi = old__compat_should_strip_ansi
+ formatting.FORCED_WIDTH = old_forced_width
+
+ def invoke(
+ self,
+ cli: Command,
+ args: str | cabc.Sequence[str] | None = None,
+ input: str | bytes | t.IO[t.Any] | None = None,
+ env: cabc.Mapping[str, str | None] | None = None,
+ catch_exceptions: bool | None = None,
+ color: bool = False,
+ **extra: t.Any,
+ ) -> Result:
+ """Invokes a command in an isolated environment. The arguments are
+ forwarded directly to the command line script, the `extra` keyword
+ arguments are passed to the :meth:`~clickpkg.Command.main` function of
+ the command.
+
+ This returns a :class:`Result` object.
+
+ :param cli: the command to invoke
+ :param args: the arguments to invoke. It may be given as an iterable
+ or a string. When given as string it will be interpreted
+ as a Unix shell command. More details at
+ :func:`shlex.split`.
+ :param input: the input data for `sys.stdin`.
+ :param env: the environment overrides.
+ :param catch_exceptions: Whether to catch any other exceptions than
+ ``SystemExit``. If :data:`None`, the value
+ from :class:`CliRunner` is used.
+ :param extra: the keyword arguments to pass to :meth:`main`.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+
+ .. versionadded:: 8.2
+ The result object has the ``output_bytes`` attribute with
+ the mix of ``stdout_bytes`` and ``stderr_bytes``, as the user would
+ see it in its terminal.
+
+ .. versionchanged:: 8.2
+ The result object always returns the ``stderr_bytes`` stream.
+
+ .. versionchanged:: 8.0
+ The result object has the ``return_value`` attribute with
+ the value returned from the invoked command.
+
+ .. versionchanged:: 4.0
+ Added the ``color`` parameter.
+
+ .. versionchanged:: 3.0
+ Added the ``catch_exceptions`` parameter.
+
+ .. versionchanged:: 3.0
+ The result object has the ``exc_info`` attribute with the
+ traceback if available.
+ """
+ exc_info = None
+ if catch_exceptions is None:
+ catch_exceptions = self.catch_exceptions
+
+ with self.isolation(input=input, env=env, color=color) as outstreams:
+ return_value = None
+ exception: BaseException | None = None
+ exit_code = 0
+
+ if isinstance(args, str):
+ args = shlex.split(args)
+
+ try:
+ prog_name = extra.pop("prog_name")
+ except KeyError:
+ prog_name = self.get_default_prog_name(cli)
+
+ try:
+ return_value = cli.main(args=args or (), prog_name=prog_name, **extra)
+ except SystemExit as e:
+ exc_info = sys.exc_info()
+ e_code = t.cast("int | t.Any | None", e.code)
+
+ if e_code is None:
+ e_code = 0
+
+ if e_code != 0:
+ exception = e
+
+ if not isinstance(e_code, int):
+ sys.stdout.write(str(e_code))
+ sys.stdout.write("\n")
+ e_code = 1
+
+ exit_code = e_code
+
+ except Exception as e:
+ if not catch_exceptions:
+ raise
+ exception = e
+ exit_code = 1
+ exc_info = sys.exc_info()
+ finally:
+ sys.stdout.flush()
+ sys.stderr.flush()
+ stdout = outstreams[0].getvalue()
+ stderr = outstreams[1].getvalue()
+ output = outstreams[2].getvalue()
+
+ return Result(
+ runner=self,
+ stdout_bytes=stdout,
+ stderr_bytes=stderr,
+ output_bytes=output,
+ return_value=return_value,
+ exit_code=exit_code,
+ exception=exception,
+ exc_info=exc_info, # type: ignore
+ )
+
+ @contextlib.contextmanager
+ def isolated_filesystem(
+ self, temp_dir: str | os.PathLike[str] | None = None
+ ) -> cabc.Iterator[str]:
+ """A context manager that creates a temporary directory and
+ changes the current working directory to it. This isolates tests
+ that affect the contents of the CWD to prevent them from
+ interfering with each other.
+
+ :param temp_dir: Create the temporary directory under this
+ directory. If given, the created directory is not removed
+ when exiting.
+
+ .. versionchanged:: 8.0
+ Added the ``temp_dir`` parameter.
+ """
+ cwd = os.getcwd()
+ dt = tempfile.mkdtemp(dir=temp_dir)
+ os.chdir(dt)
+
+ try:
+ yield dt
+ finally:
+ os.chdir(cwd)
+
+ if temp_dir is None:
+ import shutil
+
+ try:
+ shutil.rmtree(dt)
+ except OSError:
+ pass
diff --git a/tapdown/lib/python3.11/site-packages/click/types.py b/tapdown/lib/python3.11/site-packages/click/types.py
new file mode 100644
index 0000000..e71c1c2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/types.py
@@ -0,0 +1,1209 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import enum
+import os
+import stat
+import sys
+import typing as t
+from datetime import datetime
+from gettext import gettext as _
+from gettext import ngettext
+
+from ._compat import _get_argv_encoding
+from ._compat import open_stream
+from .exceptions import BadParameter
+from .utils import format_filename
+from .utils import LazyFile
+from .utils import safecall
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ from .core import Context
+ from .core import Parameter
+ from .shell_completion import CompletionItem
+
+ParamTypeValue = t.TypeVar("ParamTypeValue")
+
+
+class ParamType:
+ """Represents the type of a parameter. Validates and converts values
+ from the command line or Python into the correct type.
+
+ To implement a custom type, subclass and implement at least the
+ following:
+
+ - The :attr:`name` class attribute must be set.
+ - Calling an instance of the type with ``None`` must return
+ ``None``. This is already implemented by default.
+ - :meth:`convert` must convert string values to the correct type.
+ - :meth:`convert` must accept values that are already the correct
+ type.
+ - It must be able to convert a value if the ``ctx`` and ``param``
+ arguments are ``None``. This can occur when converting prompt
+ input.
+ """
+
+ is_composite: t.ClassVar[bool] = False
+ arity: t.ClassVar[int] = 1
+
+ #: the descriptive name of this type
+ name: str
+
+ #: if a list of this type is expected and the value is pulled from a
+ #: string environment variable, this is what splits it up. `None`
+ #: means any whitespace. For all parameters the general rule is that
+ #: whitespace splits them up. The exception are paths and files which
+ #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+ #: Windows).
+ envvar_list_splitter: t.ClassVar[str | None] = None
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ """Gather information that could be useful for a tool generating
+ user-facing documentation.
+
+ Use :meth:`click.Context.to_info_dict` to traverse the entire
+ CLI structure.
+
+ .. versionadded:: 8.0
+ """
+ # The class name without the "ParamType" suffix.
+ param_type = type(self).__name__.partition("ParamType")[0]
+ param_type = param_type.partition("ParameterType")[0]
+
+ # Custom subclasses might not remember to set a name.
+ if hasattr(self, "name"):
+ name = self.name
+ else:
+ name = param_type
+
+ return {"param_type": param_type, "name": name}
+
+ def __call__(
+ self,
+ value: t.Any,
+ param: Parameter | None = None,
+ ctx: Context | None = None,
+ ) -> t.Any:
+ if value is not None:
+ return self.convert(value, param, ctx)
+
+ def get_metavar(self, param: Parameter, ctx: Context) -> str | None:
+ """Returns the metavar default for this param if it provides one."""
+
+ def get_missing_message(self, param: Parameter, ctx: Context | None) -> str | None:
+ """Optionally might return extra information about a missing
+ parameter.
+
+ .. versionadded:: 2.0
+ """
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ """Convert the value to the correct type. This is not called if
+ the value is ``None`` (the missing value).
+
+ This must accept string values from the command line, as well as
+ values that are already the correct type. It may also convert
+ other compatible types.
+
+ The ``param`` and ``ctx`` arguments may be ``None`` in certain
+ situations, such as when converting prompt input.
+
+ If the value cannot be converted, call :meth:`fail` with a
+ descriptive message.
+
+ :param value: The value to convert.
+ :param param: The parameter that is using this type to convert
+ its value. May be ``None``.
+ :param ctx: The current context that arrived at this value. May
+ be ``None``.
+ """
+ return value
+
+ def split_envvar_value(self, rv: str) -> cabc.Sequence[str]:
+ """Given a value from an environment variable this splits it up
+ into small chunks depending on the defined envvar list splitter.
+
+ If the splitter is set to `None`, which means that whitespace splits,
+ then leading and trailing whitespace is ignored. Otherwise, leading
+ and trailing splitters usually lead to empty items being included.
+ """
+ return (rv or "").split(self.envvar_list_splitter)
+
+ def fail(
+ self,
+ message: str,
+ param: Parameter | None = None,
+ ctx: Context | None = None,
+ ) -> t.NoReturn:
+ """Helper method to fail with an invalid value message."""
+ raise BadParameter(message, ctx=ctx, param=param)
+
+ def shell_complete(
+ self, ctx: Context, param: Parameter, incomplete: str
+ ) -> list[CompletionItem]:
+ """Return a list of
+ :class:`~click.shell_completion.CompletionItem` objects for the
+ incomplete value. Most types do not provide completions, but
+ some do, and this allows custom types to provide custom
+ completions as well.
+
+ :param ctx: Invocation context for this command.
+ :param param: The parameter that is requesting completion.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ return []
+
+
+class CompositeParamType(ParamType):
+ is_composite = True
+
+ @property
+ def arity(self) -> int: # type: ignore
+ raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+ def __init__(self, func: t.Callable[[t.Any], t.Any]) -> None:
+ self.name: str = func.__name__
+ self.func = func
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict["func"] = self.func
+ return info_dict
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ try:
+ return self.func(value)
+ except ValueError:
+ try:
+ value = str(value)
+ except UnicodeError:
+ value = value.decode("utf-8", "replace")
+
+ self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+ name = "text"
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ return value
+
+ def __repr__(self) -> str:
+ return "UNPROCESSED"
+
+
+class StringParamType(ParamType):
+ name = "text"
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ if isinstance(value, bytes):
+ enc = _get_argv_encoding()
+ try:
+ value = value.decode(enc)
+ except UnicodeError:
+ fs_enc = sys.getfilesystemencoding()
+ if fs_enc != enc:
+ try:
+ value = value.decode(fs_enc)
+ except UnicodeError:
+ value = value.decode("utf-8", "replace")
+ else:
+ value = value.decode("utf-8", "replace")
+ return value
+ return str(value)
+
+ def __repr__(self) -> str:
+ return "STRING"
+
+
+class Choice(ParamType, t.Generic[ParamTypeValue]):
+ """The choice type allows a value to be checked against a fixed set
+ of supported values.
+
+ You may pass any iterable value which will be converted to a tuple
+ and thus will only be iterated once.
+
+ The resulting value will always be one of the originally passed choices.
+ See :meth:`normalize_choice` for more info on the mapping of strings
+ to choices. See :ref:`choice-opts` for an example.
+
+ :param case_sensitive: Set to false to make choices case
+ insensitive. Defaults to true.
+
+ .. versionchanged:: 8.2.0
+ Non-``str`` ``choices`` are now supported. It can additionally be any
+ iterable. Before you were not recommended to pass anything but a list or
+ tuple.
+
+ .. versionadded:: 8.2.0
+ Choice normalization can be overridden via :meth:`normalize_choice`.
+ """
+
+ name = "choice"
+
+ def __init__(
+ self, choices: cabc.Iterable[ParamTypeValue], case_sensitive: bool = True
+ ) -> None:
+ self.choices: cabc.Sequence[ParamTypeValue] = tuple(choices)
+ self.case_sensitive = case_sensitive
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict["choices"] = self.choices
+ info_dict["case_sensitive"] = self.case_sensitive
+ return info_dict
+
+ def _normalized_mapping(
+ self, ctx: Context | None = None
+ ) -> cabc.Mapping[ParamTypeValue, str]:
+ """
+ Returns mapping where keys are the original choices and the values are
+ the normalized values that are accepted via the command line.
+
+ This is a simple wrapper around :meth:`normalize_choice`, use that
+ instead which is supported.
+ """
+ return {
+ choice: self.normalize_choice(
+ choice=choice,
+ ctx=ctx,
+ )
+ for choice in self.choices
+ }
+
+ def normalize_choice(self, choice: ParamTypeValue, ctx: Context | None) -> str:
+ """
+ Normalize a choice value, used to map a passed string to a choice.
+ Each choice must have a unique normalized value.
+
+ By default uses :meth:`Context.token_normalize_func` and if not case
+ sensitive, convert it to a casefolded value.
+
+ .. versionadded:: 8.2.0
+ """
+ normed_value = choice.name if isinstance(choice, enum.Enum) else str(choice)
+
+ if ctx is not None and ctx.token_normalize_func is not None:
+ normed_value = ctx.token_normalize_func(normed_value)
+
+ if not self.case_sensitive:
+ normed_value = normed_value.casefold()
+
+ return normed_value
+
+ def get_metavar(self, param: Parameter, ctx: Context) -> str | None:
+ if param.param_type_name == "option" and not param.show_choices: # type: ignore
+ choice_metavars = [
+ convert_type(type(choice)).name.upper() for choice in self.choices
+ ]
+ choices_str = "|".join([*dict.fromkeys(choice_metavars)])
+ else:
+ choices_str = "|".join(
+ [str(i) for i in self._normalized_mapping(ctx=ctx).values()]
+ )
+
+ # Use curly braces to indicate a required argument.
+ if param.required and param.param_type_name == "argument":
+ return f"{{{choices_str}}}"
+
+ # Use square braces to indicate an option or optional argument.
+ return f"[{choices_str}]"
+
+ def get_missing_message(self, param: Parameter, ctx: Context | None) -> str:
+ """
+ Message shown when no choice is passed.
+
+ .. versionchanged:: 8.2.0 Added ``ctx`` argument.
+ """
+ return _("Choose from:\n\t{choices}").format(
+ choices=",\n\t".join(self._normalized_mapping(ctx=ctx).values())
+ )
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> ParamTypeValue:
+ """
+ For a given value from the parser, normalize it and find its
+ matching normalized value in the list of choices. Then return the
+ matched "original" choice.
+ """
+ normed_value = self.normalize_choice(choice=value, ctx=ctx)
+ normalized_mapping = self._normalized_mapping(ctx=ctx)
+
+ try:
+ return next(
+ original
+ for original, normalized in normalized_mapping.items()
+ if normalized == normed_value
+ )
+ except StopIteration:
+ self.fail(
+ self.get_invalid_choice_message(value=value, ctx=ctx),
+ param=param,
+ ctx=ctx,
+ )
+
+ def get_invalid_choice_message(self, value: t.Any, ctx: Context | None) -> str:
+ """Get the error message when the given choice is invalid.
+
+ :param value: The invalid value.
+
+ .. versionadded:: 8.2
+ """
+ choices_str = ", ".join(map(repr, self._normalized_mapping(ctx=ctx).values()))
+ return ngettext(
+ "{value!r} is not {choice}.",
+ "{value!r} is not one of {choices}.",
+ len(self.choices),
+ ).format(value=value, choice=choices_str, choices=choices_str)
+
+ def __repr__(self) -> str:
+ return f"Choice({list(self.choices)})"
+
+ def shell_complete(
+ self, ctx: Context, param: Parameter, incomplete: str
+ ) -> list[CompletionItem]:
+ """Complete choices that start with the incomplete value.
+
+ :param ctx: Invocation context for this command.
+ :param param: The parameter that is requesting completion.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ from click.shell_completion import CompletionItem
+
+ str_choices = map(str, self.choices)
+
+ if self.case_sensitive:
+ matched = (c for c in str_choices if c.startswith(incomplete))
+ else:
+ incomplete = incomplete.lower()
+ matched = (c for c in str_choices if c.lower().startswith(incomplete))
+
+ return [CompletionItem(c) for c in matched]
+
+
+class DateTime(ParamType):
+ """The DateTime type converts date strings into `datetime` objects.
+
+ The format strings which are checked are configurable, but default to some
+ common (non-timezone aware) ISO 8601 formats.
+
+ When specifying *DateTime* formats, you should only pass a list or a tuple.
+ Other iterables, like generators, may lead to surprising results.
+
+ The format strings are processed using ``datetime.strptime``, and this
+ consequently defines the format strings which are allowed.
+
+ Parsing is tried using each format, in order, and the first format which
+ parses successfully is used.
+
+ :param formats: A list or tuple of date format strings, in the order in
+ which they should be tried. Defaults to
+ ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
+ ``'%Y-%m-%d %H:%M:%S'``.
+ """
+
+ name = "datetime"
+
+ def __init__(self, formats: cabc.Sequence[str] | None = None):
+ self.formats: cabc.Sequence[str] = formats or [
+ "%Y-%m-%d",
+ "%Y-%m-%dT%H:%M:%S",
+ "%Y-%m-%d %H:%M:%S",
+ ]
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict["formats"] = self.formats
+ return info_dict
+
+ def get_metavar(self, param: Parameter, ctx: Context) -> str | None:
+ return f"[{'|'.join(self.formats)}]"
+
+ def _try_to_convert_date(self, value: t.Any, format: str) -> datetime | None:
+ try:
+ return datetime.strptime(value, format)
+ except ValueError:
+ return None
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ if isinstance(value, datetime):
+ return value
+
+ for format in self.formats:
+ converted = self._try_to_convert_date(value, format)
+
+ if converted is not None:
+ return converted
+
+ formats_str = ", ".join(map(repr, self.formats))
+ self.fail(
+ ngettext(
+ "{value!r} does not match the format {format}.",
+ "{value!r} does not match the formats {formats}.",
+ len(self.formats),
+ ).format(value=value, format=formats_str, formats=formats_str),
+ param,
+ ctx,
+ )
+
+ def __repr__(self) -> str:
+ return "DateTime"
+
+
+class _NumberParamTypeBase(ParamType):
+ _number_class: t.ClassVar[type[t.Any]]
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ try:
+ return self._number_class(value)
+ except ValueError:
+ self.fail(
+ _("{value!r} is not a valid {number_type}.").format(
+ value=value, number_type=self.name
+ ),
+ param,
+ ctx,
+ )
+
+
+class _NumberRangeBase(_NumberParamTypeBase):
+ def __init__(
+ self,
+ min: float | None = None,
+ max: float | None = None,
+ min_open: bool = False,
+ max_open: bool = False,
+ clamp: bool = False,
+ ) -> None:
+ self.min = min
+ self.max = max
+ self.min_open = min_open
+ self.max_open = max_open
+ self.clamp = clamp
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict.update(
+ min=self.min,
+ max=self.max,
+ min_open=self.min_open,
+ max_open=self.max_open,
+ clamp=self.clamp,
+ )
+ return info_dict
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ import operator
+
+ rv = super().convert(value, param, ctx)
+ lt_min: bool = self.min is not None and (
+ operator.le if self.min_open else operator.lt
+ )(rv, self.min)
+ gt_max: bool = self.max is not None and (
+ operator.ge if self.max_open else operator.gt
+ )(rv, self.max)
+
+ if self.clamp:
+ if lt_min:
+ return self._clamp(self.min, 1, self.min_open) # type: ignore
+
+ if gt_max:
+ return self._clamp(self.max, -1, self.max_open) # type: ignore
+
+ if lt_min or gt_max:
+ self.fail(
+ _("{value} is not in the range {range}.").format(
+ value=rv, range=self._describe_range()
+ ),
+ param,
+ ctx,
+ )
+
+ return rv
+
+ def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float:
+ """Find the valid value to clamp to bound in the given
+ direction.
+
+ :param bound: The boundary value.
+ :param dir: 1 or -1 indicating the direction to move.
+ :param open: If true, the range does not include the bound.
+ """
+ raise NotImplementedError
+
+ def _describe_range(self) -> str:
+ """Describe the range for use in help text."""
+ if self.min is None:
+ op = "<" if self.max_open else "<="
+ return f"x{op}{self.max}"
+
+ if self.max is None:
+ op = ">" if self.min_open else ">="
+ return f"x{op}{self.min}"
+
+ lop = "<" if self.min_open else "<="
+ rop = "<" if self.max_open else "<="
+ return f"{self.min}{lop}x{rop}{self.max}"
+
+ def __repr__(self) -> str:
+ clamp = " clamped" if self.clamp else ""
+ return f"<{type(self).__name__} {self._describe_range()}{clamp}>"
+
+
+class IntParamType(_NumberParamTypeBase):
+ name = "integer"
+ _number_class = int
+
+ def __repr__(self) -> str:
+ return "INT"
+
+
+class IntRange(_NumberRangeBase, IntParamType):
+ """Restrict an :data:`click.INT` value to a range of accepted
+ values. See :ref:`ranges`.
+
+ If ``min`` or ``max`` are not passed, any value is accepted in that
+ direction. If ``min_open`` or ``max_open`` are enabled, the
+ corresponding boundary is not included in the range.
+
+ If ``clamp`` is enabled, a value outside the range is clamped to the
+ boundary instead of failing.
+
+ .. versionchanged:: 8.0
+ Added the ``min_open`` and ``max_open`` parameters.
+ """
+
+ name = "integer range"
+
+ def _clamp( # type: ignore
+ self, bound: int, dir: t.Literal[1, -1], open: bool
+ ) -> int:
+ if not open:
+ return bound
+
+ return bound + dir
+
+
+class FloatParamType(_NumberParamTypeBase):
+ name = "float"
+ _number_class = float
+
+ def __repr__(self) -> str:
+ return "FLOAT"
+
+
+class FloatRange(_NumberRangeBase, FloatParamType):
+ """Restrict a :data:`click.FLOAT` value to a range of accepted
+ values. See :ref:`ranges`.
+
+ If ``min`` or ``max`` are not passed, any value is accepted in that
+ direction. If ``min_open`` or ``max_open`` are enabled, the
+ corresponding boundary is not included in the range.
+
+ If ``clamp`` is enabled, a value outside the range is clamped to the
+ boundary instead of failing. This is not supported if either
+ boundary is marked ``open``.
+
+ .. versionchanged:: 8.0
+ Added the ``min_open`` and ``max_open`` parameters.
+ """
+
+ name = "float range"
+
+ def __init__(
+ self,
+ min: float | None = None,
+ max: float | None = None,
+ min_open: bool = False,
+ max_open: bool = False,
+ clamp: bool = False,
+ ) -> None:
+ super().__init__(
+ min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp
+ )
+
+ if (min_open or max_open) and clamp:
+ raise TypeError("Clamping is not supported for open bounds.")
+
+ def _clamp(self, bound: float, dir: t.Literal[1, -1], open: bool) -> float:
+ if not open:
+ return bound
+
+ # Could use math.nextafter here, but clamping an
+ # open float range doesn't seem to be particularly useful. It's
+ # left up to the user to write a callback to do it if needed.
+ raise RuntimeError("Clamping is not supported for open bounds.")
+
+
+class BoolParamType(ParamType):
+ name = "boolean"
+
+ bool_states: dict[str, bool] = {
+ "1": True,
+ "0": False,
+ "yes": True,
+ "no": False,
+ "true": True,
+ "false": False,
+ "on": True,
+ "off": False,
+ "t": True,
+ "f": False,
+ "y": True,
+ "n": False,
+ # Absence of value is considered False.
+ "": False,
+ }
+ """A mapping of string values to boolean states.
+
+ Mapping is inspired by :py:attr:`configparser.ConfigParser.BOOLEAN_STATES`
+ and extends it.
+
+ .. caution::
+ String values are lower-cased, as the ``str_to_bool`` comparison function
+ below is case-insensitive.
+
+ .. warning::
+ The mapping is not exhaustive, and does not cover all possible boolean strings
+ representations. It will remains as it is to avoid endless bikeshedding.
+
+ Future work my be considered to make this mapping user-configurable from public
+ API.
+ """
+
+ @staticmethod
+ def str_to_bool(value: str | bool) -> bool | None:
+ """Convert a string to a boolean value.
+
+ If the value is already a boolean, it is returned as-is. If the value is a
+ string, it is stripped of whitespaces and lower-cased, then checked against
+ the known boolean states pre-defined in the `BoolParamType.bool_states` mapping
+ above.
+
+ Returns `None` if the value does not match any known boolean state.
+ """
+ if isinstance(value, bool):
+ return value
+ return BoolParamType.bool_states.get(value.strip().lower())
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> bool:
+ normalized = self.str_to_bool(value)
+ if normalized is None:
+ self.fail(
+ _(
+ "{value!r} is not a valid boolean. Recognized values: {states}"
+ ).format(value=value, states=", ".join(sorted(self.bool_states))),
+ param,
+ ctx,
+ )
+ return normalized
+
+ def __repr__(self) -> str:
+ return "BOOL"
+
+
+class UUIDParameterType(ParamType):
+ name = "uuid"
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ import uuid
+
+ if isinstance(value, uuid.UUID):
+ return value
+
+ value = value.strip()
+
+ try:
+ return uuid.UUID(value)
+ except ValueError:
+ self.fail(
+ _("{value!r} is not a valid UUID.").format(value=value), param, ctx
+ )
+
+ def __repr__(self) -> str:
+ return "UUID"
+
+
+class File(ParamType):
+ """Declares a parameter to be a file for reading or writing. The file
+ is automatically closed once the context tears down (after the command
+ finished working).
+
+ Files can be opened for reading or writing. The special value ``-``
+ indicates stdin or stdout depending on the mode.
+
+ By default, the file is opened for reading text data, but it can also be
+ opened in binary mode or for writing. The encoding parameter can be used
+ to force a specific encoding.
+
+ The `lazy` flag controls if the file should be opened immediately or upon
+ first IO. The default is to be non-lazy for standard input and output
+ streams as well as files opened for reading, `lazy` otherwise. When opening a
+ file lazily for reading, it is still opened temporarily for validation, but
+ will not be held open until first IO. lazy is mainly useful when opening
+ for writing to avoid creating the file until it is needed.
+
+ Files can also be opened atomically in which case all writes go into a
+ separate file in the same folder and upon completion the file will
+ be moved over to the original location. This is useful if a file
+ regularly read by other users is modified.
+
+ See :ref:`file-args` for more information.
+
+ .. versionchanged:: 2.0
+ Added the ``atomic`` parameter.
+ """
+
+ name = "filename"
+ envvar_list_splitter: t.ClassVar[str] = os.path.pathsep
+
+ def __init__(
+ self,
+ mode: str = "r",
+ encoding: str | None = None,
+ errors: str | None = "strict",
+ lazy: bool | None = None,
+ atomic: bool = False,
+ ) -> None:
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.lazy = lazy
+ self.atomic = atomic
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict.update(mode=self.mode, encoding=self.encoding)
+ return info_dict
+
+ def resolve_lazy_flag(self, value: str | os.PathLike[str]) -> bool:
+ if self.lazy is not None:
+ return self.lazy
+ if os.fspath(value) == "-":
+ return False
+ elif "w" in self.mode:
+ return True
+ return False
+
+ def convert(
+ self,
+ value: str | os.PathLike[str] | t.IO[t.Any],
+ param: Parameter | None,
+ ctx: Context | None,
+ ) -> t.IO[t.Any]:
+ if _is_file_like(value):
+ return value
+
+ value = t.cast("str | os.PathLike[str]", value)
+
+ try:
+ lazy = self.resolve_lazy_flag(value)
+
+ if lazy:
+ lf = LazyFile(
+ value, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+
+ if ctx is not None:
+ ctx.call_on_close(lf.close_intelligently)
+
+ return t.cast("t.IO[t.Any]", lf)
+
+ f, should_close = open_stream(
+ value, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+
+ # If a context is provided, we automatically close the file
+ # at the end of the context execution (or flush out). If a
+ # context does not exist, it's the caller's responsibility to
+ # properly close the file. This for instance happens when the
+ # type is used with prompts.
+ if ctx is not None:
+ if should_close:
+ ctx.call_on_close(safecall(f.close))
+ else:
+ ctx.call_on_close(safecall(f.flush))
+
+ return f
+ except OSError as e:
+ self.fail(f"'{format_filename(value)}': {e.strerror}", param, ctx)
+
+ def shell_complete(
+ self, ctx: Context, param: Parameter, incomplete: str
+ ) -> list[CompletionItem]:
+ """Return a special completion marker that tells the completion
+ system to use the shell to provide file path completions.
+
+ :param ctx: Invocation context for this command.
+ :param param: The parameter that is requesting completion.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ from click.shell_completion import CompletionItem
+
+ return [CompletionItem(incomplete, type="file")]
+
+
+def _is_file_like(value: t.Any) -> te.TypeGuard[t.IO[t.Any]]:
+ return hasattr(value, "read") or hasattr(value, "write")
+
+
+class Path(ParamType):
+ """The ``Path`` type is similar to the :class:`File` type, but
+ returns the filename instead of an open file. Various checks can be
+ enabled to validate the type of file and permissions.
+
+ :param exists: The file or directory needs to exist for the value to
+ be valid. If this is not set to ``True``, and the file does not
+ exist, then all further checks are silently skipped.
+ :param file_okay: Allow a file as a value.
+ :param dir_okay: Allow a directory as a value.
+ :param readable: if true, a readable check is performed.
+ :param writable: if true, a writable check is performed.
+ :param executable: if true, an executable check is performed.
+ :param resolve_path: Make the value absolute and resolve any
+ symlinks. A ``~`` is not expanded, as this is supposed to be
+ done by the shell only.
+ :param allow_dash: Allow a single dash as a value, which indicates
+ a standard stream (but does not open it). Use
+ :func:`~click.open_file` to handle opening this value.
+ :param path_type: Convert the incoming path value to this type. If
+ ``None``, keep Python's default, which is ``str``. Useful to
+ convert to :class:`pathlib.Path`.
+
+ .. versionchanged:: 8.1
+ Added the ``executable`` parameter.
+
+ .. versionchanged:: 8.0
+ Allow passing ``path_type=pathlib.Path``.
+
+ .. versionchanged:: 6.0
+ Added the ``allow_dash`` parameter.
+ """
+
+ envvar_list_splitter: t.ClassVar[str] = os.path.pathsep
+
+ def __init__(
+ self,
+ exists: bool = False,
+ file_okay: bool = True,
+ dir_okay: bool = True,
+ writable: bool = False,
+ readable: bool = True,
+ resolve_path: bool = False,
+ allow_dash: bool = False,
+ path_type: type[t.Any] | None = None,
+ executable: bool = False,
+ ):
+ self.exists = exists
+ self.file_okay = file_okay
+ self.dir_okay = dir_okay
+ self.readable = readable
+ self.writable = writable
+ self.executable = executable
+ self.resolve_path = resolve_path
+ self.allow_dash = allow_dash
+ self.type = path_type
+
+ if self.file_okay and not self.dir_okay:
+ self.name: str = _("file")
+ elif self.dir_okay and not self.file_okay:
+ self.name = _("directory")
+ else:
+ self.name = _("path")
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict.update(
+ exists=self.exists,
+ file_okay=self.file_okay,
+ dir_okay=self.dir_okay,
+ writable=self.writable,
+ readable=self.readable,
+ allow_dash=self.allow_dash,
+ )
+ return info_dict
+
+ def coerce_path_result(
+ self, value: str | os.PathLike[str]
+ ) -> str | bytes | os.PathLike[str]:
+ if self.type is not None and not isinstance(value, self.type):
+ if self.type is str:
+ return os.fsdecode(value)
+ elif self.type is bytes:
+ return os.fsencode(value)
+ else:
+ return t.cast("os.PathLike[str]", self.type(value))
+
+ return value
+
+ def convert(
+ self,
+ value: str | os.PathLike[str],
+ param: Parameter | None,
+ ctx: Context | None,
+ ) -> str | bytes | os.PathLike[str]:
+ rv = value
+
+ is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
+
+ if not is_dash:
+ if self.resolve_path:
+ rv = os.path.realpath(rv)
+
+ try:
+ st = os.stat(rv)
+ except OSError:
+ if not self.exists:
+ return self.coerce_path_result(rv)
+ self.fail(
+ _("{name} {filename!r} does not exist.").format(
+ name=self.name.title(), filename=format_filename(value)
+ ),
+ param,
+ ctx,
+ )
+
+ if not self.file_okay and stat.S_ISREG(st.st_mode):
+ self.fail(
+ _("{name} {filename!r} is a file.").format(
+ name=self.name.title(), filename=format_filename(value)
+ ),
+ param,
+ ctx,
+ )
+ if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+ self.fail(
+ _("{name} {filename!r} is a directory.").format(
+ name=self.name.title(), filename=format_filename(value)
+ ),
+ param,
+ ctx,
+ )
+
+ if self.readable and not os.access(rv, os.R_OK):
+ self.fail(
+ _("{name} {filename!r} is not readable.").format(
+ name=self.name.title(), filename=format_filename(value)
+ ),
+ param,
+ ctx,
+ )
+
+ if self.writable and not os.access(rv, os.W_OK):
+ self.fail(
+ _("{name} {filename!r} is not writable.").format(
+ name=self.name.title(), filename=format_filename(value)
+ ),
+ param,
+ ctx,
+ )
+
+ if self.executable and not os.access(value, os.X_OK):
+ self.fail(
+ _("{name} {filename!r} is not executable.").format(
+ name=self.name.title(), filename=format_filename(value)
+ ),
+ param,
+ ctx,
+ )
+
+ return self.coerce_path_result(rv)
+
+ def shell_complete(
+ self, ctx: Context, param: Parameter, incomplete: str
+ ) -> list[CompletionItem]:
+ """Return a special completion marker that tells the completion
+ system to use the shell to provide path completions for only
+ directories or any paths.
+
+ :param ctx: Invocation context for this command.
+ :param param: The parameter that is requesting completion.
+ :param incomplete: Value being completed. May be empty.
+
+ .. versionadded:: 8.0
+ """
+ from click.shell_completion import CompletionItem
+
+ type = "dir" if self.dir_okay and not self.file_okay else "file"
+ return [CompletionItem(incomplete, type=type)]
+
+
+class Tuple(CompositeParamType):
+ """The default behavior of Click is to apply a type on a value directly.
+ This works well in most cases, except for when `nargs` is set to a fixed
+ count and different types should be used for different items. In this
+ case the :class:`Tuple` type can be used. This type can only be used
+ if `nargs` is set to a fixed number.
+
+ For more information see :ref:`tuple-type`.
+
+ This can be selected by using a Python tuple literal as a type.
+
+ :param types: a list of types that should be used for the tuple items.
+ """
+
+ def __init__(self, types: cabc.Sequence[type[t.Any] | ParamType]) -> None:
+ self.types: cabc.Sequence[ParamType] = [convert_type(ty) for ty in types]
+
+ def to_info_dict(self) -> dict[str, t.Any]:
+ info_dict = super().to_info_dict()
+ info_dict["types"] = [t.to_info_dict() for t in self.types]
+ return info_dict
+
+ @property
+ def name(self) -> str: # type: ignore
+ return f"<{' '.join(ty.name for ty in self.types)}>"
+
+ @property
+ def arity(self) -> int: # type: ignore
+ return len(self.types)
+
+ def convert(
+ self, value: t.Any, param: Parameter | None, ctx: Context | None
+ ) -> t.Any:
+ len_type = len(self.types)
+ len_value = len(value)
+
+ if len_value != len_type:
+ self.fail(
+ ngettext(
+ "{len_type} values are required, but {len_value} was given.",
+ "{len_type} values are required, but {len_value} were given.",
+ len_value,
+ ).format(len_type=len_type, len_value=len_value),
+ param=param,
+ ctx=ctx,
+ )
+
+ return tuple(
+ ty(x, param, ctx) for ty, x in zip(self.types, value, strict=False)
+ )
+
+
+def convert_type(ty: t.Any | None, default: t.Any | None = None) -> ParamType:
+ """Find the most appropriate :class:`ParamType` for the given Python
+ type. If the type isn't provided, it can be inferred from a default
+ value.
+ """
+ guessed_type = False
+
+ if ty is None and default is not None:
+ if isinstance(default, (tuple, list)):
+ # If the default is empty, ty will remain None and will
+ # return STRING.
+ if default:
+ item = default[0]
+
+ # A tuple of tuples needs to detect the inner types.
+ # Can't call convert recursively because that would
+ # incorrectly unwind the tuple to a single type.
+ if isinstance(item, (tuple, list)):
+ ty = tuple(map(type, item))
+ else:
+ ty = type(item)
+ else:
+ ty = type(default)
+
+ guessed_type = True
+
+ if isinstance(ty, tuple):
+ return Tuple(ty)
+
+ if isinstance(ty, ParamType):
+ return ty
+
+ if ty is str or ty is None:
+ return STRING
+
+ if ty is int:
+ return INT
+
+ if ty is float:
+ return FLOAT
+
+ if ty is bool:
+ return BOOL
+
+ if guessed_type:
+ return STRING
+
+ if __debug__:
+ try:
+ if issubclass(ty, ParamType):
+ raise AssertionError(
+ f"Attempted to use an uninstantiated parameter type ({ty})."
+ )
+ except TypeError:
+ # ty is an instance (correct), so issubclass fails.
+ pass
+
+ return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing. From a user's
+#: perspective this appears to just be the same as `STRING` but
+#: internally no string conversion takes place if the input was bytes.
+#: This is usually useful when working with file paths as they can
+#: appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default. This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter. This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter. This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter. This is the default for boolean flags. This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()
+
+
+class OptionHelpExtra(t.TypedDict, total=False):
+ envvars: tuple[str, ...]
+ default: str
+ range: str
+ required: str
diff --git a/tapdown/lib/python3.11/site-packages/click/utils.py b/tapdown/lib/python3.11/site-packages/click/utils.py
new file mode 100644
index 0000000..beae26f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/click/utils.py
@@ -0,0 +1,627 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import os
+import re
+import sys
+import typing as t
+from functools import update_wrapper
+from types import ModuleType
+from types import TracebackType
+
+from ._compat import _default_text_stderr
+from ._compat import _default_text_stdout
+from ._compat import _find_binary_writer
+from ._compat import auto_wrap_for_ansi
+from ._compat import binary_streams
+from ._compat import open_stream
+from ._compat import should_strip_ansi
+from ._compat import strip_ansi
+from ._compat import text_streams
+from ._compat import WIN
+from .globals import resolve_color_default
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ P = te.ParamSpec("P")
+
+R = t.TypeVar("R")
+
+
+def _posixify(name: str) -> str:
+ return "-".join(name.split()).lower()
+
+
+def safecall(func: t.Callable[P, R]) -> t.Callable[P, R | None]:
+ """Wraps a function so that it swallows exceptions."""
+
+ def wrapper(*args: P.args, **kwargs: P.kwargs) -> R | None:
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ pass
+ return None
+
+ return update_wrapper(wrapper, func)
+
+
+def make_str(value: t.Any) -> str:
+ """Converts a value into a valid string."""
+ if isinstance(value, bytes):
+ try:
+ return value.decode(sys.getfilesystemencoding())
+ except UnicodeError:
+ return value.decode("utf-8", "replace")
+ return str(value)
+
+
+def make_default_short_help(help: str, max_length: int = 45) -> str:
+ """Returns a condensed version of help string."""
+ # Consider only the first paragraph.
+ paragraph_end = help.find("\n\n")
+
+ if paragraph_end != -1:
+ help = help[:paragraph_end]
+
+ # Collapse newlines, tabs, and spaces.
+ words = help.split()
+
+ if not words:
+ return ""
+
+ # The first paragraph started with a "no rewrap" marker, ignore it.
+ if words[0] == "\b":
+ words = words[1:]
+
+ total_length = 0
+ last_index = len(words) - 1
+
+ for i, word in enumerate(words):
+ total_length += len(word) + (i > 0)
+
+ if total_length > max_length: # too long, truncate
+ break
+
+ if word[-1] == ".": # sentence end, truncate without "..."
+ return " ".join(words[: i + 1])
+
+ if total_length == max_length and i != last_index:
+ break # not at sentence end, truncate with "..."
+ else:
+ return " ".join(words) # no truncation needed
+
+ # Account for the length of the suffix.
+ total_length += len("...")
+
+ # remove words until the length is short enough
+ while i > 0:
+ total_length -= len(words[i]) + (i > 0)
+
+ if total_length <= max_length:
+ break
+
+ i -= 1
+
+ return " ".join(words[:i]) + "..."
+
+
+class LazyFile:
+ """A lazy file works like a regular file but it does not fully open
+ the file but it does perform some basic checks early to see if the
+ filename parameter does make sense. This is useful for safely opening
+ files for writing.
+ """
+
+ def __init__(
+ self,
+ filename: str | os.PathLike[str],
+ mode: str = "r",
+ encoding: str | None = None,
+ errors: str | None = "strict",
+ atomic: bool = False,
+ ):
+ self.name: str = os.fspath(filename)
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.atomic = atomic
+ self._f: t.IO[t.Any] | None
+ self.should_close: bool
+
+ if self.name == "-":
+ self._f, self.should_close = open_stream(filename, mode, encoding, errors)
+ else:
+ if "r" in mode:
+ # Open and close the file in case we're opening it for
+ # reading so that we can catch at least some errors in
+ # some cases early.
+ open(filename, mode).close()
+ self._f = None
+ self.should_close = True
+
+ def __getattr__(self, name: str) -> t.Any:
+ return getattr(self.open(), name)
+
+ def __repr__(self) -> str:
+ if self._f is not None:
+ return repr(self._f)
+ return f""
+
+ def open(self) -> t.IO[t.Any]:
+ """Opens the file if it's not yet open. This call might fail with
+ a :exc:`FileError`. Not handling this error will produce an error
+ that Click shows.
+ """
+ if self._f is not None:
+ return self._f
+ try:
+ rv, self.should_close = open_stream(
+ self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ except OSError as e:
+ from .exceptions import FileError
+
+ raise FileError(self.name, hint=e.strerror) from e
+ self._f = rv
+ return rv
+
+ def close(self) -> None:
+ """Closes the underlying file, no matter what."""
+ if self._f is not None:
+ self._f.close()
+
+ def close_intelligently(self) -> None:
+ """This function only closes the file if it was opened by the lazy
+ file wrapper. For instance this will never close stdin.
+ """
+ if self.should_close:
+ self.close()
+
+ def __enter__(self) -> LazyFile:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ self.close_intelligently()
+
+ def __iter__(self) -> cabc.Iterator[t.AnyStr]:
+ self.open()
+ return iter(self._f) # type: ignore
+
+
+class KeepOpenFile:
+ def __init__(self, file: t.IO[t.Any]) -> None:
+ self._file: t.IO[t.Any] = file
+
+ def __getattr__(self, name: str) -> t.Any:
+ return getattr(self._file, name)
+
+ def __enter__(self) -> KeepOpenFile:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ pass
+
+ def __repr__(self) -> str:
+ return repr(self._file)
+
+ def __iter__(self) -> cabc.Iterator[t.AnyStr]:
+ return iter(self._file)
+
+
+def echo(
+ message: t.Any | None = None,
+ file: t.IO[t.Any] | None = None,
+ nl: bool = True,
+ err: bool = False,
+ color: bool | None = None,
+) -> None:
+ """Print a message and newline to stdout or a file. This should be
+ used instead of :func:`print` because it provides better support
+ for different data, files, and environments.
+
+ Compared to :func:`print`, this does the following:
+
+ - Ensures that the output encoding is not misconfigured on Linux.
+ - Supports Unicode in the Windows console.
+ - Supports writing to binary outputs, and supports writing bytes
+ to text outputs.
+ - Supports colors and styles on Windows.
+ - Removes ANSI color and style codes if the output does not look
+ like an interactive terminal.
+ - Always flushes the output.
+
+ :param message: The string or bytes to output. Other objects are
+ converted to strings.
+ :param file: The file to write to. Defaults to ``stdout``.
+ :param err: Write to ``stderr`` instead of ``stdout``.
+ :param nl: Print a newline after the message. Enabled by default.
+ :param color: Force showing or hiding colors and other styles. By
+ default Click will remove color if the output does not look like
+ an interactive terminal.
+
+ .. versionchanged:: 6.0
+ Support Unicode output on the Windows console. Click does not
+ modify ``sys.stdout``, so ``sys.stdout.write()`` and ``print()``
+ will still not support Unicode.
+
+ .. versionchanged:: 4.0
+ Added the ``color`` parameter.
+
+ .. versionadded:: 3.0
+ Added the ``err`` parameter.
+
+ .. versionchanged:: 2.0
+ Support colors on Windows if colorama is installed.
+ """
+ if file is None:
+ if err:
+ file = _default_text_stderr()
+ else:
+ file = _default_text_stdout()
+
+ # There are no standard streams attached to write to. For example,
+ # pythonw on Windows.
+ if file is None:
+ return
+
+ # Convert non bytes/text into the native string type.
+ if message is not None and not isinstance(message, (str, bytes, bytearray)):
+ out: str | bytes | bytearray | None = str(message)
+ else:
+ out = message
+
+ if nl:
+ out = out or ""
+ if isinstance(out, str):
+ out += "\n"
+ else:
+ out += b"\n"
+
+ if not out:
+ file.flush()
+ return
+
+ # If there is a message and the value looks like bytes, we manually
+ # need to find the binary stream and write the message in there.
+ # This is done separately so that most stream types will work as you
+ # would expect. Eg: you can write to StringIO for other cases.
+ if isinstance(out, (bytes, bytearray)):
+ binary_file = _find_binary_writer(file)
+
+ if binary_file is not None:
+ file.flush()
+ binary_file.write(out)
+ binary_file.flush()
+ return
+
+ # ANSI style code support. For no message or bytes, nothing happens.
+ # When outputting to a file instead of a terminal, strip codes.
+ else:
+ color = resolve_color_default(color)
+
+ if should_strip_ansi(file, color):
+ out = strip_ansi(out)
+ elif WIN:
+ if auto_wrap_for_ansi is not None:
+ file = auto_wrap_for_ansi(file, color) # type: ignore
+ elif not color:
+ out = strip_ansi(out)
+
+ file.write(out) # type: ignore
+ file.flush()
+
+
+def get_binary_stream(name: t.Literal["stdin", "stdout", "stderr"]) -> t.BinaryIO:
+ """Returns a system stream for byte processing.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ """
+ opener = binary_streams.get(name)
+ if opener is None:
+ raise TypeError(f"Unknown standard stream '{name}'")
+ return opener()
+
+
+def get_text_stream(
+ name: t.Literal["stdin", "stdout", "stderr"],
+ encoding: str | None = None,
+ errors: str | None = "strict",
+) -> t.TextIO:
+ """Returns a system stream for text processing. This usually returns
+ a wrapped stream around a binary stream returned from
+ :func:`get_binary_stream` but it also can take shortcuts for already
+ correctly configured streams.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ :param encoding: overrides the detected default encoding.
+ :param errors: overrides the default error mode.
+ """
+ opener = text_streams.get(name)
+ if opener is None:
+ raise TypeError(f"Unknown standard stream '{name}'")
+ return opener(encoding, errors)
+
+
+def open_file(
+ filename: str | os.PathLike[str],
+ mode: str = "r",
+ encoding: str | None = None,
+ errors: str | None = "strict",
+ lazy: bool = False,
+ atomic: bool = False,
+) -> t.IO[t.Any]:
+ """Open a file, with extra behavior to handle ``'-'`` to indicate
+ a standard stream, lazy open on write, and atomic write. Similar to
+ the behavior of the :class:`~click.File` param type.
+
+ If ``'-'`` is given to open ``stdout`` or ``stdin``, the stream is
+ wrapped so that using it in a context manager will not close it.
+ This makes it possible to use the function without accidentally
+ closing a standard stream:
+
+ .. code-block:: python
+
+ with open_file(filename) as f:
+ ...
+
+ :param filename: The name or Path of the file to open, or ``'-'`` for
+ ``stdin``/``stdout``.
+ :param mode: The mode in which to open the file.
+ :param encoding: The encoding to decode or encode a file opened in
+ text mode.
+ :param errors: The error handling mode.
+ :param lazy: Wait to open the file until it is accessed. For read
+ mode, the file is temporarily opened to raise access errors
+ early, then closed until it is read again.
+ :param atomic: Write to a temporary file and replace the given file
+ on close.
+
+ .. versionadded:: 3.0
+ """
+ if lazy:
+ return t.cast(
+ "t.IO[t.Any]", LazyFile(filename, mode, encoding, errors, atomic=atomic)
+ )
+
+ f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
+
+ if not should_close:
+ f = t.cast("t.IO[t.Any]", KeepOpenFile(f))
+
+ return f
+
+
+def format_filename(
+ filename: str | bytes | os.PathLike[str] | os.PathLike[bytes],
+ shorten: bool = False,
+) -> str:
+ """Format a filename as a string for display. Ensures the filename can be
+ displayed by replacing any invalid bytes or surrogate escapes in the name
+ with the replacement character ``�``.
+
+ Invalid bytes or surrogate escapes will raise an error when written to a
+ stream with ``errors="strict"``. This will typically happen with ``stdout``
+ when the locale is something like ``en_GB.UTF-8``.
+
+ Many scenarios *are* safe to write surrogates though, due to PEP 538 and
+ PEP 540, including:
+
+ - Writing to ``stderr``, which uses ``errors="backslashreplace"``.
+ - The system has ``LANG=C.UTF-8``, ``C``, or ``POSIX``. Python opens
+ stdout and stderr with ``errors="surrogateescape"``.
+ - None of ``LANG/LC_*`` are set. Python assumes ``LANG=C.UTF-8``.
+ - Python is started in UTF-8 mode with ``PYTHONUTF8=1`` or ``-X utf8``.
+ Python opens stdout and stderr with ``errors="surrogateescape"``.
+
+ :param filename: formats a filename for UI display. This will also convert
+ the filename into unicode without failing.
+ :param shorten: this optionally shortens the filename to strip of the
+ path that leads up to it.
+ """
+ if shorten:
+ filename = os.path.basename(filename)
+ else:
+ filename = os.fspath(filename)
+
+ if isinstance(filename, bytes):
+ filename = filename.decode(sys.getfilesystemencoding(), "replace")
+ else:
+ filename = filename.encode("utf-8", "surrogateescape").decode(
+ "utf-8", "replace"
+ )
+
+ return filename
+
+
+def get_app_dir(app_name: str, roaming: bool = True, force_posix: bool = False) -> str:
+ r"""Returns the config folder for the application. The default behavior
+ is to return whatever is most appropriate for the operating system.
+
+ To give you an idea, for an app called ``"Foo Bar"``, something like
+ the following folders could be returned:
+
+ Mac OS X:
+ ``~/Library/Application Support/Foo Bar``
+ Mac OS X (POSIX):
+ ``~/.foo-bar``
+ Unix:
+ ``~/.config/foo-bar``
+ Unix (POSIX):
+ ``~/.foo-bar``
+ Windows (roaming):
+ ``C:\Users\\AppData\Roaming\Foo Bar``
+ Windows (not roaming):
+ ``C:\Users\\AppData\Local\Foo Bar``
+
+ .. versionadded:: 2.0
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param roaming: controls if the folder should be roaming or not on Windows.
+ Has no effect otherwise.
+ :param force_posix: if this is set to `True` then on any POSIX system the
+ folder will be stored in the home folder with a leading
+ dot instead of the XDG config home or darwin's
+ application support folder.
+ """
+ if WIN:
+ key = "APPDATA" if roaming else "LOCALAPPDATA"
+ folder = os.environ.get(key)
+ if folder is None:
+ folder = os.path.expanduser("~")
+ return os.path.join(folder, app_name)
+ if force_posix:
+ return os.path.join(os.path.expanduser(f"~/.{_posixify(app_name)}"))
+ if sys.platform == "darwin":
+ return os.path.join(
+ os.path.expanduser("~/Library/Application Support"), app_name
+ )
+ return os.path.join(
+ os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
+ _posixify(app_name),
+ )
+
+
+class PacifyFlushWrapper:
+ """This wrapper is used to catch and suppress BrokenPipeErrors resulting
+ from ``.flush()`` being called on broken pipe during the shutdown/final-GC
+ of the Python interpreter. Notably ``.flush()`` is always called on
+ ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
+ other cleanup code, and the case where the underlying file is not a broken
+ pipe, all calls and attributes are proxied.
+ """
+
+ def __init__(self, wrapped: t.IO[t.Any]) -> None:
+ self.wrapped = wrapped
+
+ def flush(self) -> None:
+ try:
+ self.wrapped.flush()
+ except OSError as e:
+ import errno
+
+ if e.errno != errno.EPIPE:
+ raise
+
+ def __getattr__(self, attr: str) -> t.Any:
+ return getattr(self.wrapped, attr)
+
+
+def _detect_program_name(
+ path: str | None = None, _main: ModuleType | None = None
+) -> str:
+ """Determine the command used to run the program, for use in help
+ text. If a file or entry point was executed, the file name is
+ returned. If ``python -m`` was used to execute a module or package,
+ ``python -m name`` is returned.
+
+ This doesn't try to be too precise, the goal is to give a concise
+ name for help text. Files are only shown as their name without the
+ path. ``python`` is only shown for modules, and the full path to
+ ``sys.executable`` is not shown.
+
+ :param path: The Python file being executed. Python puts this in
+ ``sys.argv[0]``, which is used by default.
+ :param _main: The ``__main__`` module. This should only be passed
+ during internal testing.
+
+ .. versionadded:: 8.0
+ Based on command args detection in the Werkzeug reloader.
+
+ :meta private:
+ """
+ if _main is None:
+ _main = sys.modules["__main__"]
+
+ if not path:
+ path = sys.argv[0]
+
+ # The value of __package__ indicates how Python was called. It may
+ # not exist if a setuptools script is installed as an egg. It may be
+ # set incorrectly for entry points created with pip on Windows.
+ # It is set to "" inside a Shiv or PEX zipapp.
+ if getattr(_main, "__package__", None) in {None, ""} or (
+ os.name == "nt"
+ and _main.__package__ == ""
+ and not os.path.exists(path)
+ and os.path.exists(f"{path}.exe")
+ ):
+ # Executed a file, like "python app.py".
+ return os.path.basename(path)
+
+ # Executed a module, like "python -m example".
+ # Rewritten by Python from "-m script" to "/path/to/script.py".
+ # Need to look at main module to determine how it was executed.
+ py_module = t.cast(str, _main.__package__)
+ name = os.path.splitext(os.path.basename(path))[0]
+
+ # A submodule like "example.cli".
+ if name != "__main__":
+ py_module = f"{py_module}.{name}"
+
+ return f"python -m {py_module.lstrip('.')}"
+
+
+def _expand_args(
+ args: cabc.Iterable[str],
+ *,
+ user: bool = True,
+ env: bool = True,
+ glob_recursive: bool = True,
+) -> list[str]:
+ """Simulate Unix shell expansion with Python functions.
+
+ See :func:`glob.glob`, :func:`os.path.expanduser`, and
+ :func:`os.path.expandvars`.
+
+ This is intended for use on Windows, where the shell does not do any
+ expansion. It may not exactly match what a Unix shell would do.
+
+ :param args: List of command line arguments to expand.
+ :param user: Expand user home directory.
+ :param env: Expand environment variables.
+ :param glob_recursive: ``**`` matches directories recursively.
+
+ .. versionchanged:: 8.1
+ Invalid glob patterns are treated as empty expansions rather
+ than raising an error.
+
+ .. versionadded:: 8.0
+
+ :meta private:
+ """
+ from glob import glob
+
+ out = []
+
+ for arg in args:
+ if user:
+ arg = os.path.expanduser(arg)
+
+ if env:
+ arg = os.path.expandvars(arg)
+
+ try:
+ matches = glob(arg, recursive=glob_recursive)
+ except re.error:
+ matches = []
+
+ if not matches:
+ out.append(arg)
+ else:
+ out.extend(matches)
+
+ return out
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/__init__.py b/tapdown/lib/python3.11/site-packages/dateutil/__init__.py
new file mode 100644
index 0000000..a2c19c0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/__init__.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+import sys
+
+try:
+ from ._version import version as __version__
+except ImportError:
+ __version__ = 'unknown'
+
+__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz',
+ 'utils', 'zoneinfo']
+
+def __getattr__(name):
+ import importlib
+
+ if name in __all__:
+ return importlib.import_module("." + name, __name__)
+ raise AttributeError(
+ "module {!r} has not attribute {!r}".format(__name__, name)
+ )
+
+
+def __dir__():
+ # __dir__ should include all the lazy-importable modules as well.
+ return [x for x in globals() if x not in sys.modules] + __all__
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/_common.py b/tapdown/lib/python3.11/site-packages/dateutil/_common.py
new file mode 100644
index 0000000..4eb2659
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/_common.py
@@ -0,0 +1,43 @@
+"""
+Common code used in multiple modules.
+"""
+
+
+class weekday(object):
+ __slots__ = ["weekday", "n"]
+
+ def __init__(self, weekday, n=None):
+ self.weekday = weekday
+ self.n = n
+
+ def __call__(self, n):
+ if n == self.n:
+ return self
+ else:
+ return self.__class__(self.weekday, n)
+
+ def __eq__(self, other):
+ try:
+ if self.weekday != other.weekday or self.n != other.n:
+ return False
+ except AttributeError:
+ return False
+ return True
+
+ def __hash__(self):
+ return hash((
+ self.weekday,
+ self.n,
+ ))
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
+ if not self.n:
+ return s
+ else:
+ return "%s(%+d)" % (s, self.n)
+
+# vim:ts=4:sw=4:et
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/_version.py b/tapdown/lib/python3.11/site-packages/dateutil/_version.py
new file mode 100644
index 0000000..ddda980
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/_version.py
@@ -0,0 +1,4 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+__version__ = version = '2.9.0.post0'
+__version_tuple__ = version_tuple = (2, 9, 0)
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/easter.py b/tapdown/lib/python3.11/site-packages/dateutil/easter.py
new file mode 100644
index 0000000..f74d1f7
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/easter.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers a generic Easter computing method for any given year, using
+Western, Orthodox or Julian algorithms.
+"""
+
+import datetime
+
+__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
+
+EASTER_JULIAN = 1
+EASTER_ORTHODOX = 2
+EASTER_WESTERN = 3
+
+
+def easter(year, method=EASTER_WESTERN):
+ """
+ This method was ported from the work done by GM Arts,
+ on top of the algorithm by Claus Tondering, which was
+ based in part on the algorithm of Ouding (1940), as
+ quoted in "Explanatory Supplement to the Astronomical
+ Almanac", P. Kenneth Seidelmann, editor.
+
+ This algorithm implements three different Easter
+ calculation methods:
+
+ 1. Original calculation in Julian calendar, valid in
+ dates after 326 AD
+ 2. Original method, with date converted to Gregorian
+ calendar, valid in years 1583 to 4099
+ 3. Revised method, in Gregorian calendar, valid in
+ years 1583 to 4099 as well
+
+ These methods are represented by the constants:
+
+ * ``EASTER_JULIAN = 1``
+ * ``EASTER_ORTHODOX = 2``
+ * ``EASTER_WESTERN = 3``
+
+ The default method is method 3.
+
+ More about the algorithm may be found at:
+
+ `GM Arts: Easter Algorithms `_
+
+ and
+
+ `The Calendar FAQ: Easter `_
+
+ """
+
+ if not (1 <= method <= 3):
+ raise ValueError("invalid method")
+
+ # g - Golden year - 1
+ # c - Century
+ # h - (23 - Epact) mod 30
+ # i - Number of days from March 21 to Paschal Full Moon
+ # j - Weekday for PFM (0=Sunday, etc)
+ # p - Number of days from March 21 to Sunday on or before PFM
+ # (-6 to 28 methods 1 & 3, to 56 for method 2)
+ # e - Extra days to add for method 2 (converting Julian
+ # date to Gregorian date)
+
+ y = year
+ g = y % 19
+ e = 0
+ if method < 3:
+ # Old method
+ i = (19*g + 15) % 30
+ j = (y + y//4 + i) % 7
+ if method == 2:
+ # Extra dates to convert Julian to Gregorian date
+ e = 10
+ if y > 1600:
+ e = e + y//100 - 16 - (y//100 - 16)//4
+ else:
+ # New method
+ c = y//100
+ h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
+ i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
+ j = (y + y//4 + i + 2 - c + c//4) % 7
+
+ # p can be from -6 to 56 corresponding to dates 22 March to 23 May
+ # (later dates apply to method 2, although 23 May never actually occurs)
+ p = i - j + e
+ d = 1 + (p + 27 + (p + 6)//40) % 31
+ m = 3 + (p + 26)//30
+ return datetime.date(int(y), int(m), int(d))
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/parser/__init__.py b/tapdown/lib/python3.11/site-packages/dateutil/parser/__init__.py
new file mode 100644
index 0000000..d174b0e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/parser/__init__.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+from ._parser import parse, parser, parserinfo, ParserError
+from ._parser import DEFAULTPARSER, DEFAULTTZPARSER
+from ._parser import UnknownTimezoneWarning
+
+from ._parser import __doc__
+
+from .isoparser import isoparser, isoparse
+
+__all__ = ['parse', 'parser', 'parserinfo',
+ 'isoparse', 'isoparser',
+ 'ParserError',
+ 'UnknownTimezoneWarning']
+
+
+###
+# Deprecate portions of the private interface so that downstream code that
+# is improperly relying on it is given *some* notice.
+
+
+def __deprecated_private_func(f):
+ from functools import wraps
+ import warnings
+
+ msg = ('{name} is a private function and may break without warning, '
+ 'it will be moved and or renamed in future versions.')
+ msg = msg.format(name=f.__name__)
+
+ @wraps(f)
+ def deprecated_func(*args, **kwargs):
+ warnings.warn(msg, DeprecationWarning)
+ return f(*args, **kwargs)
+
+ return deprecated_func
+
+def __deprecate_private_class(c):
+ import warnings
+
+ msg = ('{name} is a private class and may break without warning, '
+ 'it will be moved and or renamed in future versions.')
+ msg = msg.format(name=c.__name__)
+
+ class private_class(c):
+ __doc__ = c.__doc__
+
+ def __init__(self, *args, **kwargs):
+ warnings.warn(msg, DeprecationWarning)
+ super(private_class, self).__init__(*args, **kwargs)
+
+ private_class.__name__ = c.__name__
+
+ return private_class
+
+
+from ._parser import _timelex, _resultbase
+from ._parser import _tzparser, _parsetz
+
+_timelex = __deprecate_private_class(_timelex)
+_tzparser = __deprecate_private_class(_tzparser)
+_resultbase = __deprecate_private_class(_resultbase)
+_parsetz = __deprecated_private_func(_parsetz)
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/parser/_parser.py b/tapdown/lib/python3.11/site-packages/dateutil/parser/_parser.py
new file mode 100644
index 0000000..37d1663
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/parser/_parser.py
@@ -0,0 +1,1613 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers a generic date/time string parser which is able to parse
+most known formats to represent a date and/or time.
+
+This module attempts to be forgiving with regards to unlikely input formats,
+returning a datetime object even for dates which are ambiguous. If an element
+of a date/time stamp is omitted, the following rules are applied:
+
+- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
+ on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
+ specified.
+- If a time zone is omitted, a timezone-naive datetime is returned.
+
+If any other elements are missing, they are taken from the
+:class:`datetime.datetime` object passed to the parameter ``default``. If this
+results in a day number exceeding the valid number of days per month, the
+value falls back to the end of the month.
+
+Additional resources about date/time string formats can be found below:
+
+- `A summary of the international standard date and time notation
+ `_
+- `W3C Date and Time Formats `_
+- `Time Formats (Planetary Rings Node) `_
+- `CPAN ParseDate module
+ `_
+- `Java SimpleDateFormat Class
+ `_
+"""
+from __future__ import unicode_literals
+
+import datetime
+import re
+import string
+import time
+import warnings
+
+from calendar import monthrange
+from io import StringIO
+
+import six
+from six import integer_types, text_type
+
+from decimal import Decimal
+
+from warnings import warn
+
+from .. import relativedelta
+from .. import tz
+
+__all__ = ["parse", "parserinfo", "ParserError"]
+
+
+# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
+# making public and/or figuring out if there is something we can
+# take off their plate.
+class _timelex(object):
+ # Fractional seconds are sometimes split by a comma
+ _split_decimal = re.compile("([.,])")
+
+ def __init__(self, instream):
+ if isinstance(instream, (bytes, bytearray)):
+ instream = instream.decode()
+
+ if isinstance(instream, text_type):
+ instream = StringIO(instream)
+ elif getattr(instream, 'read', None) is None:
+ raise TypeError('Parser must be a string or character stream, not '
+ '{itype}'.format(itype=instream.__class__.__name__))
+
+ self.instream = instream
+ self.charstack = []
+ self.tokenstack = []
+ self.eof = False
+
+ def get_token(self):
+ """
+ This function breaks the time string into lexical units (tokens), which
+ can be parsed by the parser. Lexical units are demarcated by changes in
+ the character set, so any continuous string of letters is considered
+ one unit, any continuous string of numbers is considered one unit.
+
+ The main complication arises from the fact that dots ('.') can be used
+ both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
+ "4:30:21.447"). As such, it is necessary to read the full context of
+ any dot-separated strings before breaking it into tokens; as such, this
+ function maintains a "token stack", for when the ambiguous context
+ demands that multiple tokens be parsed at once.
+ """
+ if self.tokenstack:
+ return self.tokenstack.pop(0)
+
+ seenletters = False
+ token = None
+ state = None
+
+ while not self.eof:
+ # We only realize that we've reached the end of a token when we
+ # find a character that's not part of the current token - since
+ # that character may be part of the next token, it's stored in the
+ # charstack.
+ if self.charstack:
+ nextchar = self.charstack.pop(0)
+ else:
+ nextchar = self.instream.read(1)
+ while nextchar == '\x00':
+ nextchar = self.instream.read(1)
+
+ if not nextchar:
+ self.eof = True
+ break
+ elif not state:
+ # First character of the token - determines if we're starting
+ # to parse a word, a number or something else.
+ token = nextchar
+ if self.isword(nextchar):
+ state = 'a'
+ elif self.isnum(nextchar):
+ state = '0'
+ elif self.isspace(nextchar):
+ token = ' '
+ break # emit token
+ else:
+ break # emit token
+ elif state == 'a':
+ # If we've already started reading a word, we keep reading
+ # letters until we find something that's not part of a word.
+ seenletters = True
+ if self.isword(nextchar):
+ token += nextchar
+ elif nextchar == '.':
+ token += nextchar
+ state = 'a.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == '0':
+ # If we've already started reading a number, we keep reading
+ # numbers until we find something that doesn't fit.
+ if self.isnum(nextchar):
+ token += nextchar
+ elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
+ token += nextchar
+ state = '0.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == 'a.':
+ # If we've seen some letters and a dot separator, continue
+ # parsing, and the tokens will be broken up later.
+ seenletters = True
+ if nextchar == '.' or self.isword(nextchar):
+ token += nextchar
+ elif self.isnum(nextchar) and token[-1] == '.':
+ token += nextchar
+ state = '0.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+ elif state == '0.':
+ # If we've seen at least one dot separator, keep going, we'll
+ # break up the tokens later.
+ if nextchar == '.' or self.isnum(nextchar):
+ token += nextchar
+ elif self.isword(nextchar) and token[-1] == '.':
+ token += nextchar
+ state = 'a.'
+ else:
+ self.charstack.append(nextchar)
+ break # emit token
+
+ if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
+ token[-1] in '.,')):
+ l = self._split_decimal.split(token)
+ token = l[0]
+ for tok in l[1:]:
+ if tok:
+ self.tokenstack.append(tok)
+
+ if state == '0.' and token.count('.') == 0:
+ token = token.replace(',', '.')
+
+ return token
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.get_token()
+ if token is None:
+ raise StopIteration
+
+ return token
+
+ def next(self):
+ return self.__next__() # Python 2.x support
+
+ @classmethod
+ def split(cls, s):
+ return list(cls(s))
+
+ @classmethod
+ def isword(cls, nextchar):
+ """ Whether or not the next character is part of a word """
+ return nextchar.isalpha()
+
+ @classmethod
+ def isnum(cls, nextchar):
+ """ Whether the next character is part of a number """
+ return nextchar.isdigit()
+
+ @classmethod
+ def isspace(cls, nextchar):
+ """ Whether the next character is whitespace """
+ return nextchar.isspace()
+
+
+class _resultbase(object):
+
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+
+ def _repr(self, classname):
+ l = []
+ for attr in self.__slots__:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("%s=%s" % (attr, repr(value)))
+ return "%s(%s)" % (classname, ", ".join(l))
+
+ def __len__(self):
+ return (sum(getattr(self, attr) is not None
+ for attr in self.__slots__))
+
+ def __repr__(self):
+ return self._repr(self.__class__.__name__)
+
+
+class parserinfo(object):
+ """
+ Class which handles what inputs are accepted. Subclass this to customize
+ the language and acceptable values for each parameter.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM
+ and YMD. Default is ``False``.
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+ to be the year, otherwise the last number is taken to be the year.
+ Default is ``False``.
+ """
+
+ # m from a.m/p.m, t from ISO T separator
+ JUMP = [" ", ".", ",", ";", "-", "/", "'",
+ "at", "on", "and", "ad", "m", "t", "of",
+ "st", "nd", "rd", "th"]
+
+ WEEKDAYS = [("Mon", "Monday"),
+ ("Tue", "Tuesday"), # TODO: "Tues"
+ ("Wed", "Wednesday"),
+ ("Thu", "Thursday"), # TODO: "Thurs"
+ ("Fri", "Friday"),
+ ("Sat", "Saturday"),
+ ("Sun", "Sunday")]
+ MONTHS = [("Jan", "January"),
+ ("Feb", "February"), # TODO: "Febr"
+ ("Mar", "March"),
+ ("Apr", "April"),
+ ("May", "May"),
+ ("Jun", "June"),
+ ("Jul", "July"),
+ ("Aug", "August"),
+ ("Sep", "Sept", "September"),
+ ("Oct", "October"),
+ ("Nov", "November"),
+ ("Dec", "December")]
+ HMS = [("h", "hour", "hours"),
+ ("m", "minute", "minutes"),
+ ("s", "second", "seconds")]
+ AMPM = [("am", "a"),
+ ("pm", "p")]
+ UTCZONE = ["UTC", "GMT", "Z", "z"]
+ PERTAIN = ["of"]
+ TZOFFSET = {}
+ # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
+ # "Anno Domini", "Year of Our Lord"]
+
+ def __init__(self, dayfirst=False, yearfirst=False):
+ self._jump = self._convert(self.JUMP)
+ self._weekdays = self._convert(self.WEEKDAYS)
+ self._months = self._convert(self.MONTHS)
+ self._hms = self._convert(self.HMS)
+ self._ampm = self._convert(self.AMPM)
+ self._utczone = self._convert(self.UTCZONE)
+ self._pertain = self._convert(self.PERTAIN)
+
+ self.dayfirst = dayfirst
+ self.yearfirst = yearfirst
+
+ self._year = time.localtime().tm_year
+ self._century = self._year // 100 * 100
+
+ def _convert(self, lst):
+ dct = {}
+ for i, v in enumerate(lst):
+ if isinstance(v, tuple):
+ for v in v:
+ dct[v.lower()] = i
+ else:
+ dct[v.lower()] = i
+ return dct
+
+ def jump(self, name):
+ return name.lower() in self._jump
+
+ def weekday(self, name):
+ try:
+ return self._weekdays[name.lower()]
+ except KeyError:
+ pass
+ return None
+
+ def month(self, name):
+ try:
+ return self._months[name.lower()] + 1
+ except KeyError:
+ pass
+ return None
+
+ def hms(self, name):
+ try:
+ return self._hms[name.lower()]
+ except KeyError:
+ return None
+
+ def ampm(self, name):
+ try:
+ return self._ampm[name.lower()]
+ except KeyError:
+ return None
+
+ def pertain(self, name):
+ return name.lower() in self._pertain
+
+ def utczone(self, name):
+ return name.lower() in self._utczone
+
+ def tzoffset(self, name):
+ if name in self._utczone:
+ return 0
+
+ return self.TZOFFSET.get(name)
+
+ def convertyear(self, year, century_specified=False):
+ """
+ Converts two-digit years to year within [-50, 49]
+ range of self._year (current local time)
+ """
+
+ # Function contract is that the year is always positive
+ assert year >= 0
+
+ if year < 100 and not century_specified:
+ # assume current century to start
+ year += self._century
+
+ if year >= self._year + 50: # if too far in future
+ year -= 100
+ elif year < self._year - 50: # if too far in past
+ year += 100
+
+ return year
+
+ def validate(self, res):
+ # move to info
+ if res.year is not None:
+ res.year = self.convertyear(res.year, res.century_specified)
+
+ if ((res.tzoffset == 0 and not res.tzname) or
+ (res.tzname == 'Z' or res.tzname == 'z')):
+ res.tzname = "UTC"
+ res.tzoffset = 0
+ elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
+ res.tzoffset = 0
+ return True
+
+
+class _ymd(list):
+ def __init__(self, *args, **kwargs):
+ super(self.__class__, self).__init__(*args, **kwargs)
+ self.century_specified = False
+ self.dstridx = None
+ self.mstridx = None
+ self.ystridx = None
+
+ @property
+ def has_year(self):
+ return self.ystridx is not None
+
+ @property
+ def has_month(self):
+ return self.mstridx is not None
+
+ @property
+ def has_day(self):
+ return self.dstridx is not None
+
+ def could_be_day(self, value):
+ if self.has_day:
+ return False
+ elif not self.has_month:
+ return 1 <= value <= 31
+ elif not self.has_year:
+ # Be permissive, assume leap year
+ month = self[self.mstridx]
+ return 1 <= value <= monthrange(2000, month)[1]
+ else:
+ month = self[self.mstridx]
+ year = self[self.ystridx]
+ return 1 <= value <= monthrange(year, month)[1]
+
+ def append(self, val, label=None):
+ if hasattr(val, '__len__'):
+ if val.isdigit() and len(val) > 2:
+ self.century_specified = True
+ if label not in [None, 'Y']: # pragma: no cover
+ raise ValueError(label)
+ label = 'Y'
+ elif val > 100:
+ self.century_specified = True
+ if label not in [None, 'Y']: # pragma: no cover
+ raise ValueError(label)
+ label = 'Y'
+
+ super(self.__class__, self).append(int(val))
+
+ if label == 'M':
+ if self.has_month:
+ raise ValueError('Month is already set')
+ self.mstridx = len(self) - 1
+ elif label == 'D':
+ if self.has_day:
+ raise ValueError('Day is already set')
+ self.dstridx = len(self) - 1
+ elif label == 'Y':
+ if self.has_year:
+ raise ValueError('Year is already set')
+ self.ystridx = len(self) - 1
+
+ def _resolve_from_stridxs(self, strids):
+ """
+ Try to resolve the identities of year/month/day elements using
+ ystridx, mstridx, and dstridx, if enough of these are specified.
+ """
+ if len(self) == 3 and len(strids) == 2:
+ # we can back out the remaining stridx value
+ missing = [x for x in range(3) if x not in strids.values()]
+ key = [x for x in ['y', 'm', 'd'] if x not in strids]
+ assert len(missing) == len(key) == 1
+ key = key[0]
+ val = missing[0]
+ strids[key] = val
+
+ assert len(self) == len(strids) # otherwise this should not be called
+ out = {key: self[strids[key]] for key in strids}
+ return (out.get('y'), out.get('m'), out.get('d'))
+
+ def resolve_ymd(self, yearfirst, dayfirst):
+ len_ymd = len(self)
+ year, month, day = (None, None, None)
+
+ strids = (('y', self.ystridx),
+ ('m', self.mstridx),
+ ('d', self.dstridx))
+
+ strids = {key: val for key, val in strids if val is not None}
+ if (len(self) == len(strids) > 0 or
+ (len(self) == 3 and len(strids) == 2)):
+ return self._resolve_from_stridxs(strids)
+
+ mstridx = self.mstridx
+
+ if len_ymd > 3:
+ raise ValueError("More than three YMD values")
+ elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
+ # One member, or two members with a month string
+ if mstridx is not None:
+ month = self[mstridx]
+ # since mstridx is 0 or 1, self[mstridx-1] always
+ # looks up the other element
+ other = self[mstridx - 1]
+ else:
+ other = self[0]
+
+ if len_ymd > 1 or mstridx is None:
+ if other > 31:
+ year = other
+ else:
+ day = other
+
+ elif len_ymd == 2:
+ # Two members with numbers
+ if self[0] > 31:
+ # 99-01
+ year, month = self
+ elif self[1] > 31:
+ # 01-99
+ month, year = self
+ elif dayfirst and self[1] <= 12:
+ # 13-01
+ day, month = self
+ else:
+ # 01-13
+ month, day = self
+
+ elif len_ymd == 3:
+ # Three members
+ if mstridx == 0:
+ if self[1] > 31:
+ # Apr-2003-25
+ month, year, day = self
+ else:
+ month, day, year = self
+ elif mstridx == 1:
+ if self[0] > 31 or (yearfirst and self[2] <= 31):
+ # 99-Jan-01
+ year, month, day = self
+ else:
+ # 01-Jan-01
+ # Give precedence to day-first, since
+ # two-digit years is usually hand-written.
+ day, month, year = self
+
+ elif mstridx == 2:
+ # WTF!?
+ if self[1] > 31:
+ # 01-99-Jan
+ day, year, month = self
+ else:
+ # 99-01-Jan
+ year, day, month = self
+
+ else:
+ if (self[0] > 31 or
+ self.ystridx == 0 or
+ (yearfirst and self[1] <= 12 and self[2] <= 31)):
+ # 99-01-01
+ if dayfirst and self[2] <= 12:
+ year, day, month = self
+ else:
+ year, month, day = self
+ elif self[0] > 12 or (dayfirst and self[1] <= 12):
+ # 13-01-01
+ day, month, year = self
+ else:
+ # 01-13-01
+ month, day, year = self
+
+ return year, month, day
+
+
+class parser(object):
+ def __init__(self, info=None):
+ self.info = info or parserinfo()
+
+ def parse(self, timestr, default=None,
+ ignoretz=False, tzinfos=None, **kwargs):
+ """
+ Parse the date/time string into a :class:`datetime.datetime` object.
+
+ :param timestr:
+ Any date/time string using the supported formats.
+
+ :param default:
+ The default datetime object, if this is a datetime object and not
+ ``None``, elements specified in ``timestr`` replace elements in the
+ default object.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a
+ naive :class:`datetime.datetime` object is returned.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in the
+ string. This argument maps time zone names (and optionally offsets
+ from those time zones) to time zones. This parameter can be a
+ dictionary with timezone aliases mapping time zone names to time
+ zones or a function taking two parameters (``tzname`` and
+ ``tzoffset``) and returning a time zone.
+
+ The timezones to which the names are mapped can be an integer
+ offset from UTC in seconds or a :class:`tzinfo` object.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> from dateutil.parser import parse
+ >>> from dateutil.tz import gettz
+ >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
+ >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
+ >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21,
+ tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+ This parameter is ignored if ``ignoretz`` is set.
+
+ :param \\*\\*kwargs:
+ Keyword arguments as passed to ``_parse()``.
+
+ :return:
+ Returns a :class:`datetime.datetime` object or, if the
+ ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+ first element being a :class:`datetime.datetime` object, the second
+ a tuple containing the fuzzy tokens.
+
+ :raises ParserError:
+ Raised for invalid or unknown string format, if the provided
+ :class:`tzinfo` is not in a valid format, or if an invalid date
+ would be created.
+
+ :raises TypeError:
+ Raised for non-string or character stream input.
+
+ :raises OverflowError:
+ Raised if the parsed date exceeds the largest valid C integer on
+ your system.
+ """
+
+ if default is None:
+ default = datetime.datetime.now().replace(hour=0, minute=0,
+ second=0, microsecond=0)
+
+ res, skipped_tokens = self._parse(timestr, **kwargs)
+
+ if res is None:
+ raise ParserError("Unknown string format: %s", timestr)
+
+ if len(res) == 0:
+ raise ParserError("String does not contain a date: %s", timestr)
+
+ try:
+ ret = self._build_naive(res, default)
+ except ValueError as e:
+ six.raise_from(ParserError(str(e) + ": %s", timestr), e)
+
+ if not ignoretz:
+ ret = self._build_tzaware(ret, res, tzinfos)
+
+ if kwargs.get('fuzzy_with_tokens', False):
+ return ret, skipped_tokens
+ else:
+ return ret
+
+ class _result(_resultbase):
+ __slots__ = ["year", "month", "day", "weekday",
+ "hour", "minute", "second", "microsecond",
+ "tzname", "tzoffset", "ampm","any_unused_tokens"]
+
+ def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
+ fuzzy_with_tokens=False):
+ """
+ Private method which performs the heavy lifting of parsing, called from
+ ``parse()``, which passes on its ``kwargs`` to this function.
+
+ :param timestr:
+ The string to parse.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM
+ and YMD. If set to ``None``, this value is retrieved from the
+ current :class:`parserinfo` object (which itself defaults to
+ ``False``).
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken
+ to be the year, otherwise the last number is taken to be the year.
+ If this is set to ``None``, the value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param fuzzy:
+ Whether to allow fuzzy parsing, allowing for string like "Today is
+ January 1, 2047 at 8:21:00AM".
+
+ :param fuzzy_with_tokens:
+ If ``True``, ``fuzzy`` is automatically set to True, and the parser
+ will return a tuple where the first element is the parsed
+ :class:`datetime.datetime` datetimestamp and the second element is
+ a tuple containing the portions of the string which were ignored:
+
+ .. doctest::
+
+ >>> from dateutil.parser import parse
+ >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+ (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+ """
+ if fuzzy_with_tokens:
+ fuzzy = True
+
+ info = self.info
+
+ if dayfirst is None:
+ dayfirst = info.dayfirst
+
+ if yearfirst is None:
+ yearfirst = info.yearfirst
+
+ res = self._result()
+ l = _timelex.split(timestr) # Splits the timestr into tokens
+
+ skipped_idxs = []
+
+ # year/month/day list
+ ymd = _ymd()
+
+ len_l = len(l)
+ i = 0
+ try:
+ while i < len_l:
+
+ # Check if it's a number
+ value_repr = l[i]
+ try:
+ value = float(value_repr)
+ except ValueError:
+ value = None
+
+ if value is not None:
+ # Numeric token
+ i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
+
+ # Check weekday
+ elif info.weekday(l[i]) is not None:
+ value = info.weekday(l[i])
+ res.weekday = value
+
+ # Check month name
+ elif info.month(l[i]) is not None:
+ value = info.month(l[i])
+ ymd.append(value, 'M')
+
+ if i + 1 < len_l:
+ if l[i + 1] in ('-', '/'):
+ # Jan-01[-99]
+ sep = l[i + 1]
+ ymd.append(l[i + 2])
+
+ if i + 3 < len_l and l[i + 3] == sep:
+ # Jan-01-99
+ ymd.append(l[i + 4])
+ i += 2
+
+ i += 2
+
+ elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
+ info.pertain(l[i + 2])):
+ # Jan of 01
+ # In this case, 01 is clearly year
+ if l[i + 4].isdigit():
+ # Convert it here to become unambiguous
+ value = int(l[i + 4])
+ year = str(info.convertyear(value))
+ ymd.append(year, 'Y')
+ else:
+ # Wrong guess
+ pass
+ # TODO: not hit in tests
+ i += 4
+
+ # Check am/pm
+ elif info.ampm(l[i]) is not None:
+ value = info.ampm(l[i])
+ val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
+
+ if val_is_ampm:
+ res.hour = self._adjust_ampm(res.hour, value)
+ res.ampm = value
+
+ elif fuzzy:
+ skipped_idxs.append(i)
+
+ # Check for a timezone name
+ elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
+ res.tzname = l[i]
+ res.tzoffset = info.tzoffset(res.tzname)
+
+ # Check for something like GMT+3, or BRST+3. Notice
+ # that it doesn't mean "I am 3 hours after GMT", but
+ # "my time +3 is GMT". If found, we reverse the
+ # logic so that timezone parsing code will get it
+ # right.
+ if i + 1 < len_l and l[i + 1] in ('+', '-'):
+ l[i + 1] = ('+', '-')[l[i + 1] == '+']
+ res.tzoffset = None
+ if info.utczone(res.tzname):
+ # With something like GMT+3, the timezone
+ # is *not* GMT.
+ res.tzname = None
+
+ # Check for a numbered timezone
+ elif res.hour is not None and l[i] in ('+', '-'):
+ signal = (-1, 1)[l[i] == '+']
+ len_li = len(l[i + 1])
+
+ # TODO: check that l[i + 1] is integer?
+ if len_li == 4:
+ # -0300
+ hour_offset = int(l[i + 1][:2])
+ min_offset = int(l[i + 1][2:])
+ elif i + 2 < len_l and l[i + 2] == ':':
+ # -03:00
+ hour_offset = int(l[i + 1])
+ min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
+ i += 2
+ elif len_li <= 2:
+ # -[0]3
+ hour_offset = int(l[i + 1][:2])
+ min_offset = 0
+ else:
+ raise ValueError(timestr)
+
+ res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
+
+ # Look for a timezone name between parenthesis
+ if (i + 5 < len_l and
+ info.jump(l[i + 2]) and l[i + 3] == '(' and
+ l[i + 5] == ')' and
+ 3 <= len(l[i + 4]) and
+ self._could_be_tzname(res.hour, res.tzname,
+ None, l[i + 4])):
+ # -0300 (BRST)
+ res.tzname = l[i + 4]
+ i += 4
+
+ i += 1
+
+ # Check jumps
+ elif not (info.jump(l[i]) or fuzzy):
+ raise ValueError(timestr)
+
+ else:
+ skipped_idxs.append(i)
+ i += 1
+
+ # Process year/month/day
+ year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
+
+ res.century_specified = ymd.century_specified
+ res.year = year
+ res.month = month
+ res.day = day
+
+ except (IndexError, ValueError):
+ return None, None
+
+ if not info.validate(res):
+ return None, None
+
+ if fuzzy_with_tokens:
+ skipped_tokens = self._recombine_skipped(l, skipped_idxs)
+ return res, tuple(skipped_tokens)
+ else:
+ return res, None
+
+ def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
+ # Token is a number
+ value_repr = tokens[idx]
+ try:
+ value = self._to_decimal(value_repr)
+ except Exception as e:
+ six.raise_from(ValueError('Unknown numeric token'), e)
+
+ len_li = len(value_repr)
+
+ len_l = len(tokens)
+
+ if (len(ymd) == 3 and len_li in (2, 4) and
+ res.hour is None and
+ (idx + 1 >= len_l or
+ (tokens[idx + 1] != ':' and
+ info.hms(tokens[idx + 1]) is None))):
+ # 19990101T23[59]
+ s = tokens[idx]
+ res.hour = int(s[:2])
+
+ if len_li == 4:
+ res.minute = int(s[2:])
+
+ elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
+ # YYMMDD or HHMMSS[.ss]
+ s = tokens[idx]
+
+ if not ymd and '.' not in tokens[idx]:
+ ymd.append(s[:2])
+ ymd.append(s[2:4])
+ ymd.append(s[4:])
+ else:
+ # 19990101T235959[.59]
+
+ # TODO: Check if res attributes already set.
+ res.hour = int(s[:2])
+ res.minute = int(s[2:4])
+ res.second, res.microsecond = self._parsems(s[4:])
+
+ elif len_li in (8, 12, 14):
+ # YYYYMMDD
+ s = tokens[idx]
+ ymd.append(s[:4], 'Y')
+ ymd.append(s[4:6])
+ ymd.append(s[6:8])
+
+ if len_li > 8:
+ res.hour = int(s[8:10])
+ res.minute = int(s[10:12])
+
+ if len_li > 12:
+ res.second = int(s[12:])
+
+ elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
+ # HH[ ]h or MM[ ]m or SS[.ss][ ]s
+ hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
+ (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
+ if hms is not None:
+ # TODO: checking that hour/minute/second are not
+ # already set?
+ self._assign_hms(res, value_repr, hms)
+
+ elif idx + 2 < len_l and tokens[idx + 1] == ':':
+ # HH:MM[:SS[.ss]]
+ res.hour = int(value)
+ value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
+ (res.minute, res.second) = self._parse_min_sec(value)
+
+ if idx + 4 < len_l and tokens[idx + 3] == ':':
+ res.second, res.microsecond = self._parsems(tokens[idx + 4])
+
+ idx += 2
+
+ idx += 2
+
+ elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
+ sep = tokens[idx + 1]
+ ymd.append(value_repr)
+
+ if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
+ if tokens[idx + 2].isdigit():
+ # 01-01[-01]
+ ymd.append(tokens[idx + 2])
+ else:
+ # 01-Jan[-01]
+ value = info.month(tokens[idx + 2])
+
+ if value is not None:
+ ymd.append(value, 'M')
+ else:
+ raise ValueError()
+
+ if idx + 3 < len_l and tokens[idx + 3] == sep:
+ # We have three members
+ value = info.month(tokens[idx + 4])
+
+ if value is not None:
+ ymd.append(value, 'M')
+ else:
+ ymd.append(tokens[idx + 4])
+ idx += 2
+
+ idx += 1
+ idx += 1
+
+ elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
+ if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
+ # 12 am
+ hour = int(value)
+ res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
+ idx += 1
+ else:
+ # Year, month or day
+ ymd.append(value)
+ idx += 1
+
+ elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
+ # 12am
+ hour = int(value)
+ res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
+ idx += 1
+
+ elif ymd.could_be_day(value):
+ ymd.append(value)
+
+ elif not fuzzy:
+ raise ValueError()
+
+ return idx
+
+ def _find_hms_idx(self, idx, tokens, info, allow_jump):
+ len_l = len(tokens)
+
+ if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
+ # There is an "h", "m", or "s" label following this token. We take
+ # assign the upcoming label to the current token.
+ # e.g. the "12" in 12h"
+ hms_idx = idx + 1
+
+ elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
+ info.hms(tokens[idx+2]) is not None):
+ # There is a space and then an "h", "m", or "s" label.
+ # e.g. the "12" in "12 h"
+ hms_idx = idx + 2
+
+ elif idx > 0 and info.hms(tokens[idx-1]) is not None:
+ # There is a "h", "m", or "s" preceding this token. Since neither
+ # of the previous cases was hit, there is no label following this
+ # token, so we use the previous label.
+ # e.g. the "04" in "12h04"
+ hms_idx = idx-1
+
+ elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
+ info.hms(tokens[idx-2]) is not None):
+ # If we are looking at the final token, we allow for a
+ # backward-looking check to skip over a space.
+ # TODO: Are we sure this is the right condition here?
+ hms_idx = idx - 2
+
+ else:
+ hms_idx = None
+
+ return hms_idx
+
+ def _assign_hms(self, res, value_repr, hms):
+ # See GH issue #427, fixing float rounding
+ value = self._to_decimal(value_repr)
+
+ if hms == 0:
+ # Hour
+ res.hour = int(value)
+ if value % 1:
+ res.minute = int(60*(value % 1))
+
+ elif hms == 1:
+ (res.minute, res.second) = self._parse_min_sec(value)
+
+ elif hms == 2:
+ (res.second, res.microsecond) = self._parsems(value_repr)
+
+ def _could_be_tzname(self, hour, tzname, tzoffset, token):
+ return (hour is not None and
+ tzname is None and
+ tzoffset is None and
+ len(token) <= 5 and
+ (all(x in string.ascii_uppercase for x in token)
+ or token in self.info.UTCZONE))
+
+ def _ampm_valid(self, hour, ampm, fuzzy):
+ """
+ For fuzzy parsing, 'a' or 'am' (both valid English words)
+ may erroneously trigger the AM/PM flag. Deal with that
+ here.
+ """
+ val_is_ampm = True
+
+ # If there's already an AM/PM flag, this one isn't one.
+ if fuzzy and ampm is not None:
+ val_is_ampm = False
+
+ # If AM/PM is found and hour is not, raise a ValueError
+ if hour is None:
+ if fuzzy:
+ val_is_ampm = False
+ else:
+ raise ValueError('No hour specified with AM or PM flag.')
+ elif not 0 <= hour <= 12:
+ # If AM/PM is found, it's a 12 hour clock, so raise
+ # an error for invalid range
+ if fuzzy:
+ val_is_ampm = False
+ else:
+ raise ValueError('Invalid hour specified for 12-hour clock.')
+
+ return val_is_ampm
+
+ def _adjust_ampm(self, hour, ampm):
+ if hour < 12 and ampm == 1:
+ hour += 12
+ elif hour == 12 and ampm == 0:
+ hour = 0
+ return hour
+
+ def _parse_min_sec(self, value):
+ # TODO: Every usage of this function sets res.second to the return
+ # value. Are there any cases where second will be returned as None and
+ # we *don't* want to set res.second = None?
+ minute = int(value)
+ second = None
+
+ sec_remainder = value % 1
+ if sec_remainder:
+ second = int(60 * sec_remainder)
+ return (minute, second)
+
+ def _parse_hms(self, idx, tokens, info, hms_idx):
+ # TODO: Is this going to admit a lot of false-positives for when we
+ # just happen to have digits and "h", "m" or "s" characters in non-date
+ # text? I guess hex hashes won't have that problem, but there's plenty
+ # of random junk out there.
+ if hms_idx is None:
+ hms = None
+ new_idx = idx
+ elif hms_idx > idx:
+ hms = info.hms(tokens[hms_idx])
+ new_idx = hms_idx
+ else:
+ # Looking backwards, increment one.
+ hms = info.hms(tokens[hms_idx]) + 1
+ new_idx = idx
+
+ return (new_idx, hms)
+
+ # ------------------------------------------------------------------
+ # Handling for individual tokens. These are kept as methods instead
+ # of functions for the sake of customizability via subclassing.
+
+ def _parsems(self, value):
+ """Parse a I[.F] seconds value into (seconds, microseconds)."""
+ if "." not in value:
+ return int(value), 0
+ else:
+ i, f = value.split(".")
+ return int(i), int(f.ljust(6, "0")[:6])
+
+ def _to_decimal(self, val):
+ try:
+ decimal_value = Decimal(val)
+ # See GH 662, edge case, infinite value should not be converted
+ # via `_to_decimal`
+ if not decimal_value.is_finite():
+ raise ValueError("Converted decimal value is infinite or NaN")
+ except Exception as e:
+ msg = "Could not convert %s to decimal" % val
+ six.raise_from(ValueError(msg), e)
+ else:
+ return decimal_value
+
+ # ------------------------------------------------------------------
+ # Post-Parsing construction of datetime output. These are kept as
+ # methods instead of functions for the sake of customizability via
+ # subclassing.
+
+ def _build_tzinfo(self, tzinfos, tzname, tzoffset):
+ if callable(tzinfos):
+ tzdata = tzinfos(tzname, tzoffset)
+ else:
+ tzdata = tzinfos.get(tzname)
+ # handle case where tzinfo is paased an options that returns None
+ # eg tzinfos = {'BRST' : None}
+ if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
+ tzinfo = tzdata
+ elif isinstance(tzdata, text_type):
+ tzinfo = tz.tzstr(tzdata)
+ elif isinstance(tzdata, integer_types):
+ tzinfo = tz.tzoffset(tzname, tzdata)
+ else:
+ raise TypeError("Offset must be tzinfo subclass, tz string, "
+ "or int offset.")
+ return tzinfo
+
+ def _build_tzaware(self, naive, res, tzinfos):
+ if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
+ tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
+ aware = naive.replace(tzinfo=tzinfo)
+ aware = self._assign_tzname(aware, res.tzname)
+
+ elif res.tzname and res.tzname in time.tzname:
+ aware = naive.replace(tzinfo=tz.tzlocal())
+
+ # Handle ambiguous local datetime
+ aware = self._assign_tzname(aware, res.tzname)
+
+ # This is mostly relevant for winter GMT zones parsed in the UK
+ if (aware.tzname() != res.tzname and
+ res.tzname in self.info.UTCZONE):
+ aware = aware.replace(tzinfo=tz.UTC)
+
+ elif res.tzoffset == 0:
+ aware = naive.replace(tzinfo=tz.UTC)
+
+ elif res.tzoffset:
+ aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
+
+ elif not res.tzname and not res.tzoffset:
+ # i.e. no timezone information was found.
+ aware = naive
+
+ elif res.tzname:
+ # tz-like string was parsed but we don't know what to do
+ # with it
+ warnings.warn("tzname {tzname} identified but not understood. "
+ "Pass `tzinfos` argument in order to correctly "
+ "return a timezone-aware datetime. In a future "
+ "version, this will raise an "
+ "exception.".format(tzname=res.tzname),
+ category=UnknownTimezoneWarning)
+ aware = naive
+
+ return aware
+
+ def _build_naive(self, res, default):
+ repl = {}
+ for attr in ("year", "month", "day", "hour",
+ "minute", "second", "microsecond"):
+ value = getattr(res, attr)
+ if value is not None:
+ repl[attr] = value
+
+ if 'day' not in repl:
+ # If the default day exceeds the last day of the month, fall back
+ # to the end of the month.
+ cyear = default.year if res.year is None else res.year
+ cmonth = default.month if res.month is None else res.month
+ cday = default.day if res.day is None else res.day
+
+ if cday > monthrange(cyear, cmonth)[1]:
+ repl['day'] = monthrange(cyear, cmonth)[1]
+
+ naive = default.replace(**repl)
+
+ if res.weekday is not None and not res.day:
+ naive = naive + relativedelta.relativedelta(weekday=res.weekday)
+
+ return naive
+
+ def _assign_tzname(self, dt, tzname):
+ if dt.tzname() != tzname:
+ new_dt = tz.enfold(dt, fold=1)
+ if new_dt.tzname() == tzname:
+ return new_dt
+
+ return dt
+
+ def _recombine_skipped(self, tokens, skipped_idxs):
+ """
+ >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
+ >>> skipped_idxs = [0, 1, 2, 5]
+ >>> _recombine_skipped(tokens, skipped_idxs)
+ ["foo bar", "baz"]
+ """
+ skipped_tokens = []
+ for i, idx in enumerate(sorted(skipped_idxs)):
+ if i > 0 and idx - 1 == skipped_idxs[i - 1]:
+ skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
+ else:
+ skipped_tokens.append(tokens[idx])
+
+ return skipped_tokens
+
+
+DEFAULTPARSER = parser()
+
+
+def parse(timestr, parserinfo=None, **kwargs):
+ """
+
+ Parse a string in one of the supported formats, using the
+ ``parserinfo`` parameters.
+
+ :param timestr:
+ A string containing a date/time stamp.
+
+ :param parserinfo:
+ A :class:`parserinfo` object containing parameters for the parser.
+ If ``None``, the default arguments to the :class:`parserinfo`
+ constructor are used.
+
+ The ``**kwargs`` parameter takes the following keyword arguments:
+
+ :param default:
+ The default datetime object, if this is a datetime object and not
+ ``None``, elements specified in ``timestr`` replace elements in the
+ default object.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a naive
+ :class:`datetime` object is returned.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in the
+ string. This argument maps time zone names (and optionally offsets
+ from those time zones) to time zones. This parameter can be a
+ dictionary with timezone aliases mapping time zone names to time
+ zones or a function taking two parameters (``tzname`` and
+ ``tzoffset``) and returning a time zone.
+
+ The timezones to which the names are mapped can be an integer
+ offset from UTC in seconds or a :class:`tzinfo` object.
+
+ .. doctest::
+ :options: +NORMALIZE_WHITESPACE
+
+ >>> from dateutil.parser import parse
+ >>> from dateutil.tz import gettz
+ >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
+ >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
+ >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
+ datetime.datetime(2012, 1, 19, 17, 21,
+ tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
+
+ This parameter is ignored if ``ignoretz`` is set.
+
+ :param dayfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the day (``True``) or month (``False``). If
+ ``yearfirst`` is set to ``True``, this distinguishes between YDM and
+ YMD. If set to ``None``, this value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param yearfirst:
+ Whether to interpret the first value in an ambiguous 3-integer date
+ (e.g. 01/05/09) as the year. If ``True``, the first number is taken to
+ be the year, otherwise the last number is taken to be the year. If
+ this is set to ``None``, the value is retrieved from the current
+ :class:`parserinfo` object (which itself defaults to ``False``).
+
+ :param fuzzy:
+ Whether to allow fuzzy parsing, allowing for string like "Today is
+ January 1, 2047 at 8:21:00AM".
+
+ :param fuzzy_with_tokens:
+ If ``True``, ``fuzzy`` is automatically set to True, and the parser
+ will return a tuple where the first element is the parsed
+ :class:`datetime.datetime` datetimestamp and the second element is
+ a tuple containing the portions of the string which were ignored:
+
+ .. doctest::
+
+ >>> from dateutil.parser import parse
+ >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
+ (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
+
+ :return:
+ Returns a :class:`datetime.datetime` object or, if the
+ ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
+ first element being a :class:`datetime.datetime` object, the second
+ a tuple containing the fuzzy tokens.
+
+ :raises ParserError:
+ Raised for invalid or unknown string formats, if the provided
+ :class:`tzinfo` is not in a valid format, or if an invalid date would
+ be created.
+
+ :raises OverflowError:
+ Raised if the parsed date exceeds the largest valid C integer on
+ your system.
+ """
+ if parserinfo:
+ return parser(parserinfo).parse(timestr, **kwargs)
+ else:
+ return DEFAULTPARSER.parse(timestr, **kwargs)
+
+
+class _tzparser(object):
+
+ class _result(_resultbase):
+
+ __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
+ "start", "end"]
+
+ class _attr(_resultbase):
+ __slots__ = ["month", "week", "weekday",
+ "yday", "jyday", "day", "time"]
+
+ def __repr__(self):
+ return self._repr("")
+
+ def __init__(self):
+ _resultbase.__init__(self)
+ self.start = self._attr()
+ self.end = self._attr()
+
+ def parse(self, tzstr):
+ res = self._result()
+ l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
+ used_idxs = list()
+ try:
+
+ len_l = len(l)
+
+ i = 0
+ while i < len_l:
+ # BRST+3[BRDT[+2]]
+ j = i
+ while j < len_l and not [x for x in l[j]
+ if x in "0123456789:,-+"]:
+ j += 1
+ if j != i:
+ if not res.stdabbr:
+ offattr = "stdoffset"
+ res.stdabbr = "".join(l[i:j])
+ else:
+ offattr = "dstoffset"
+ res.dstabbr = "".join(l[i:j])
+
+ for ii in range(j):
+ used_idxs.append(ii)
+ i = j
+ if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
+ "0123456789")):
+ if l[i] in ('+', '-'):
+ # Yes, that's right. See the TZ variable
+ # documentation.
+ signal = (1, -1)[l[i] == '+']
+ used_idxs.append(i)
+ i += 1
+ else:
+ signal = -1
+ len_li = len(l[i])
+ if len_li == 4:
+ # -0300
+ setattr(res, offattr, (int(l[i][:2]) * 3600 +
+ int(l[i][2:]) * 60) * signal)
+ elif i + 1 < len_l and l[i + 1] == ':':
+ # -03:00
+ setattr(res, offattr,
+ (int(l[i]) * 3600 +
+ int(l[i + 2]) * 60) * signal)
+ used_idxs.append(i)
+ i += 2
+ elif len_li <= 2:
+ # -[0]3
+ setattr(res, offattr,
+ int(l[i][:2]) * 3600 * signal)
+ else:
+ return None
+ used_idxs.append(i)
+ i += 1
+ if res.dstabbr:
+ break
+ else:
+ break
+
+
+ if i < len_l:
+ for j in range(i, len_l):
+ if l[j] == ';':
+ l[j] = ','
+
+ assert l[i] == ','
+
+ i += 1
+
+ if i >= len_l:
+ pass
+ elif (8 <= l.count(',') <= 9 and
+ not [y for x in l[i:] if x != ','
+ for y in x if y not in "0123456789+-"]):
+ # GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
+ for x in (res.start, res.end):
+ x.month = int(l[i])
+ used_idxs.append(i)
+ i += 2
+ if l[i] == '-':
+ value = int(l[i + 1]) * -1
+ used_idxs.append(i)
+ i += 1
+ else:
+ value = int(l[i])
+ used_idxs.append(i)
+ i += 2
+ if value:
+ x.week = value
+ x.weekday = (int(l[i]) - 1) % 7
+ else:
+ x.day = int(l[i])
+ used_idxs.append(i)
+ i += 2
+ x.time = int(l[i])
+ used_idxs.append(i)
+ i += 2
+ if i < len_l:
+ if l[i] in ('-', '+'):
+ signal = (-1, 1)[l[i] == "+"]
+ used_idxs.append(i)
+ i += 1
+ else:
+ signal = 1
+ used_idxs.append(i)
+ res.dstoffset = (res.stdoffset + int(l[i]) * signal)
+
+ # This was a made-up format that is not in normal use
+ warn(('Parsed time zone "%s"' % tzstr) +
+ 'is in a non-standard dateutil-specific format, which ' +
+ 'is now deprecated; support for parsing this format ' +
+ 'will be removed in future versions. It is recommended ' +
+ 'that you switch to a standard format like the GNU ' +
+ 'TZ variable format.', tz.DeprecatedTzFormatWarning)
+ elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
+ not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
+ '.', '-', ':')
+ for y in x if y not in "0123456789"]):
+ for x in (res.start, res.end):
+ if l[i] == 'J':
+ # non-leap year day (1 based)
+ used_idxs.append(i)
+ i += 1
+ x.jyday = int(l[i])
+ elif l[i] == 'M':
+ # month[-.]week[-.]weekday
+ used_idxs.append(i)
+ i += 1
+ x.month = int(l[i])
+ used_idxs.append(i)
+ i += 1
+ assert l[i] in ('-', '.')
+ used_idxs.append(i)
+ i += 1
+ x.week = int(l[i])
+ if x.week == 5:
+ x.week = -1
+ used_idxs.append(i)
+ i += 1
+ assert l[i] in ('-', '.')
+ used_idxs.append(i)
+ i += 1
+ x.weekday = (int(l[i]) - 1) % 7
+ else:
+ # year day (zero based)
+ x.yday = int(l[i]) + 1
+
+ used_idxs.append(i)
+ i += 1
+
+ if i < len_l and l[i] == '/':
+ used_idxs.append(i)
+ i += 1
+ # start time
+ len_li = len(l[i])
+ if len_li == 4:
+ # -0300
+ x.time = (int(l[i][:2]) * 3600 +
+ int(l[i][2:]) * 60)
+ elif i + 1 < len_l and l[i + 1] == ':':
+ # -03:00
+ x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
+ used_idxs.append(i)
+ i += 2
+ if i + 1 < len_l and l[i + 1] == ':':
+ used_idxs.append(i)
+ i += 2
+ x.time += int(l[i])
+ elif len_li <= 2:
+ # -[0]3
+ x.time = (int(l[i][:2]) * 3600)
+ else:
+ return None
+ used_idxs.append(i)
+ i += 1
+
+ assert i == len_l or l[i] == ','
+
+ i += 1
+
+ assert i >= len_l
+
+ except (IndexError, ValueError, AssertionError):
+ return None
+
+ unused_idxs = set(range(len_l)).difference(used_idxs)
+ res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
+ return res
+
+
+DEFAULTTZPARSER = _tzparser()
+
+
+def _parsetz(tzstr):
+ return DEFAULTTZPARSER.parse(tzstr)
+
+
+class ParserError(ValueError):
+ """Exception subclass used for any failure to parse a datetime string.
+
+ This is a subclass of :py:exc:`ValueError`, and should be raised any time
+ earlier versions of ``dateutil`` would have raised ``ValueError``.
+
+ .. versionadded:: 2.8.1
+ """
+ def __str__(self):
+ try:
+ return self.args[0] % self.args[1:]
+ except (TypeError, IndexError):
+ return super(ParserError, self).__str__()
+
+ def __repr__(self):
+ args = ", ".join("'%s'" % arg for arg in self.args)
+ return "%s(%s)" % (self.__class__.__name__, args)
+
+
+class UnknownTimezoneWarning(RuntimeWarning):
+ """Raised when the parser finds a timezone it cannot parse into a tzinfo.
+
+ .. versionadded:: 2.7.0
+ """
+# vim:ts=4:sw=4:et
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/parser/isoparser.py b/tapdown/lib/python3.11/site-packages/dateutil/parser/isoparser.py
new file mode 100644
index 0000000..7060087
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/parser/isoparser.py
@@ -0,0 +1,416 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers a parser for ISO-8601 strings
+
+It is intended to support all valid date, time and datetime formats per the
+ISO-8601 specification.
+
+..versionadded:: 2.7.0
+"""
+from datetime import datetime, timedelta, time, date
+import calendar
+from dateutil import tz
+
+from functools import wraps
+
+import re
+import six
+
+__all__ = ["isoparse", "isoparser"]
+
+
+def _takes_ascii(f):
+ @wraps(f)
+ def func(self, str_in, *args, **kwargs):
+ # If it's a stream, read the whole thing
+ str_in = getattr(str_in, 'read', lambda: str_in)()
+
+ # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
+ if isinstance(str_in, six.text_type):
+ # ASCII is the same in UTF-8
+ try:
+ str_in = str_in.encode('ascii')
+ except UnicodeEncodeError as e:
+ msg = 'ISO-8601 strings should contain only ASCII characters'
+ six.raise_from(ValueError(msg), e)
+
+ return f(self, str_in, *args, **kwargs)
+
+ return func
+
+
+class isoparser(object):
+ def __init__(self, sep=None):
+ """
+ :param sep:
+ A single character that separates date and time portions. If
+ ``None``, the parser will accept any single character.
+ For strict ISO-8601 adherence, pass ``'T'``.
+ """
+ if sep is not None:
+ if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
+ raise ValueError('Separator must be a single, non-numeric ' +
+ 'ASCII character')
+
+ sep = sep.encode('ascii')
+
+ self._sep = sep
+
+ @_takes_ascii
+ def isoparse(self, dt_str):
+ """
+ Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
+
+ An ISO-8601 datetime string consists of a date portion, followed
+ optionally by a time portion - the date and time portions are separated
+ by a single character separator, which is ``T`` in the official
+ standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
+ combined with a time portion.
+
+ Supported date formats are:
+
+ Common:
+
+ - ``YYYY``
+ - ``YYYY-MM``
+ - ``YYYY-MM-DD`` or ``YYYYMMDD``
+
+ Uncommon:
+
+ - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
+ - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
+
+ The ISO week and day numbering follows the same logic as
+ :func:`datetime.date.isocalendar`.
+
+ Supported time formats are:
+
+ - ``hh``
+ - ``hh:mm`` or ``hhmm``
+ - ``hh:mm:ss`` or ``hhmmss``
+ - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits)
+
+ Midnight is a special case for `hh`, as the standard supports both
+ 00:00 and 24:00 as a representation. The decimal separator can be
+ either a dot or a comma.
+
+
+ .. caution::
+
+ Support for fractional components other than seconds is part of the
+ ISO-8601 standard, but is not currently implemented in this parser.
+
+ Supported time zone offset formats are:
+
+ - `Z` (UTC)
+ - `±HH:MM`
+ - `±HHMM`
+ - `±HH`
+
+ Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
+ with the exception of UTC, which will be represented as
+ :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
+ as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
+
+ :param dt_str:
+ A string or stream containing only an ISO-8601 datetime string
+
+ :return:
+ Returns a :class:`datetime.datetime` representing the string.
+ Unspecified components default to their lowest value.
+
+ .. warning::
+
+ As of version 2.7.0, the strictness of the parser should not be
+ considered a stable part of the contract. Any valid ISO-8601 string
+ that parses correctly with the default settings will continue to
+ parse correctly in future versions, but invalid strings that
+ currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
+ guaranteed to continue failing in future versions if they encode
+ a valid date.
+
+ .. versionadded:: 2.7.0
+ """
+ components, pos = self._parse_isodate(dt_str)
+
+ if len(dt_str) > pos:
+ if self._sep is None or dt_str[pos:pos + 1] == self._sep:
+ components += self._parse_isotime(dt_str[pos + 1:])
+ else:
+ raise ValueError('String contains unknown ISO components')
+
+ if len(components) > 3 and components[3] == 24:
+ components[3] = 0
+ return datetime(*components) + timedelta(days=1)
+
+ return datetime(*components)
+
+ @_takes_ascii
+ def parse_isodate(self, datestr):
+ """
+ Parse the date portion of an ISO string.
+
+ :param datestr:
+ The string portion of an ISO string, without a separator
+
+ :return:
+ Returns a :class:`datetime.date` object
+ """
+ components, pos = self._parse_isodate(datestr)
+ if pos < len(datestr):
+ raise ValueError('String contains unknown ISO ' +
+ 'components: {!r}'.format(datestr.decode('ascii')))
+ return date(*components)
+
+ @_takes_ascii
+ def parse_isotime(self, timestr):
+ """
+ Parse the time portion of an ISO string.
+
+ :param timestr:
+ The time portion of an ISO string, without a separator
+
+ :return:
+ Returns a :class:`datetime.time` object
+ """
+ components = self._parse_isotime(timestr)
+ if components[0] == 24:
+ components[0] = 0
+ return time(*components)
+
+ @_takes_ascii
+ def parse_tzstr(self, tzstr, zero_as_utc=True):
+ """
+ Parse a valid ISO time zone string.
+
+ See :func:`isoparser.isoparse` for details on supported formats.
+
+ :param tzstr:
+ A string representing an ISO time zone offset
+
+ :param zero_as_utc:
+ Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
+
+ :return:
+ Returns :class:`dateutil.tz.tzoffset` for offsets and
+ :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
+ specified) offsets equivalent to UTC.
+ """
+ return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
+
+ # Constants
+ _DATE_SEP = b'-'
+ _TIME_SEP = b':'
+ _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)')
+
+ def _parse_isodate(self, dt_str):
+ try:
+ return self._parse_isodate_common(dt_str)
+ except ValueError:
+ return self._parse_isodate_uncommon(dt_str)
+
+ def _parse_isodate_common(self, dt_str):
+ len_str = len(dt_str)
+ components = [1, 1, 1]
+
+ if len_str < 4:
+ raise ValueError('ISO string too short')
+
+ # Year
+ components[0] = int(dt_str[0:4])
+ pos = 4
+ if pos >= len_str:
+ return components, pos
+
+ has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
+ if has_sep:
+ pos += 1
+
+ # Month
+ if len_str - pos < 2:
+ raise ValueError('Invalid common month')
+
+ components[1] = int(dt_str[pos:pos + 2])
+ pos += 2
+
+ if pos >= len_str:
+ if has_sep:
+ return components, pos
+ else:
+ raise ValueError('Invalid ISO format')
+
+ if has_sep:
+ if dt_str[pos:pos + 1] != self._DATE_SEP:
+ raise ValueError('Invalid separator in ISO string')
+ pos += 1
+
+ # Day
+ if len_str - pos < 2:
+ raise ValueError('Invalid common day')
+ components[2] = int(dt_str[pos:pos + 2])
+ return components, pos + 2
+
+ def _parse_isodate_uncommon(self, dt_str):
+ if len(dt_str) < 4:
+ raise ValueError('ISO string too short')
+
+ # All ISO formats start with the year
+ year = int(dt_str[0:4])
+
+ has_sep = dt_str[4:5] == self._DATE_SEP
+
+ pos = 4 + has_sep # Skip '-' if it's there
+ if dt_str[pos:pos + 1] == b'W':
+ # YYYY-?Www-?D?
+ pos += 1
+ weekno = int(dt_str[pos:pos + 2])
+ pos += 2
+
+ dayno = 1
+ if len(dt_str) > pos:
+ if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
+ raise ValueError('Inconsistent use of dash separator')
+
+ pos += has_sep
+
+ dayno = int(dt_str[pos:pos + 1])
+ pos += 1
+
+ base_date = self._calculate_weekdate(year, weekno, dayno)
+ else:
+ # YYYYDDD or YYYY-DDD
+ if len(dt_str) - pos < 3:
+ raise ValueError('Invalid ordinal day')
+
+ ordinal_day = int(dt_str[pos:pos + 3])
+ pos += 3
+
+ if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
+ raise ValueError('Invalid ordinal day' +
+ ' {} for year {}'.format(ordinal_day, year))
+
+ base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
+
+ components = [base_date.year, base_date.month, base_date.day]
+ return components, pos
+
+ def _calculate_weekdate(self, year, week, day):
+ """
+ Calculate the day of corresponding to the ISO year-week-day calendar.
+
+ This function is effectively the inverse of
+ :func:`datetime.date.isocalendar`.
+
+ :param year:
+ The year in the ISO calendar
+
+ :param week:
+ The week in the ISO calendar - range is [1, 53]
+
+ :param day:
+ The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
+
+ :return:
+ Returns a :class:`datetime.date`
+ """
+ if not 0 < week < 54:
+ raise ValueError('Invalid week: {}'.format(week))
+
+ if not 0 < day < 8: # Range is 1-7
+ raise ValueError('Invalid weekday: {}'.format(day))
+
+ # Get week 1 for the specific year:
+ jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
+ week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
+
+ # Now add the specific number of weeks and days to get what we want
+ week_offset = (week - 1) * 7 + (day - 1)
+ return week_1 + timedelta(days=week_offset)
+
+ def _parse_isotime(self, timestr):
+ len_str = len(timestr)
+ components = [0, 0, 0, 0, None]
+ pos = 0
+ comp = -1
+
+ if len_str < 2:
+ raise ValueError('ISO time too short')
+
+ has_sep = False
+
+ while pos < len_str and comp < 5:
+ comp += 1
+
+ if timestr[pos:pos + 1] in b'-+Zz':
+ # Detect time zone boundary
+ components[-1] = self._parse_tzstr(timestr[pos:])
+ pos = len_str
+ break
+
+ if comp == 1 and timestr[pos:pos+1] == self._TIME_SEP:
+ has_sep = True
+ pos += 1
+ elif comp == 2 and has_sep:
+ if timestr[pos:pos+1] != self._TIME_SEP:
+ raise ValueError('Inconsistent use of colon separator')
+ pos += 1
+
+ if comp < 3:
+ # Hour, minute, second
+ components[comp] = int(timestr[pos:pos + 2])
+ pos += 2
+
+ if comp == 3:
+ # Fraction of a second
+ frac = self._FRACTION_REGEX.match(timestr[pos:])
+ if not frac:
+ continue
+
+ us_str = frac.group(1)[:6] # Truncate to microseconds
+ components[comp] = int(us_str) * 10**(6 - len(us_str))
+ pos += len(frac.group())
+
+ if pos < len_str:
+ raise ValueError('Unused components in ISO string')
+
+ if components[0] == 24:
+ # Standard supports 00:00 and 24:00 as representations of midnight
+ if any(component != 0 for component in components[1:4]):
+ raise ValueError('Hour may only be 24 at 24:00:00.000')
+
+ return components
+
+ def _parse_tzstr(self, tzstr, zero_as_utc=True):
+ if tzstr == b'Z' or tzstr == b'z':
+ return tz.UTC
+
+ if len(tzstr) not in {3, 5, 6}:
+ raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
+
+ if tzstr[0:1] == b'-':
+ mult = -1
+ elif tzstr[0:1] == b'+':
+ mult = 1
+ else:
+ raise ValueError('Time zone offset requires sign')
+
+ hours = int(tzstr[1:3])
+ if len(tzstr) == 3:
+ minutes = 0
+ else:
+ minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
+
+ if zero_as_utc and hours == 0 and minutes == 0:
+ return tz.UTC
+ else:
+ if minutes > 59:
+ raise ValueError('Invalid minutes in time zone offset')
+
+ if hours > 23:
+ raise ValueError('Invalid hours in time zone offset')
+
+ return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
+
+
+DEFAULT_ISOPARSER = isoparser()
+isoparse = DEFAULT_ISOPARSER.isoparse
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/relativedelta.py b/tapdown/lib/python3.11/site-packages/dateutil/relativedelta.py
new file mode 100644
index 0000000..cd323a5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/relativedelta.py
@@ -0,0 +1,599 @@
+# -*- coding: utf-8 -*-
+import datetime
+import calendar
+
+import operator
+from math import copysign
+
+from six import integer_types
+from warnings import warn
+
+from ._common import weekday
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
+
+__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+
+class relativedelta(object):
+ """
+ The relativedelta type is designed to be applied to an existing datetime and
+ can replace specific components of that datetime, or represents an interval
+ of time.
+
+ It is based on the specification of the excellent work done by M.-A. Lemburg
+ in his
+ `mx.DateTime `_ extension.
+ However, notice that this type does *NOT* implement the same algorithm as
+ his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
+
+ There are two different ways to build a relativedelta instance. The
+ first one is passing it two date/datetime classes::
+
+ relativedelta(datetime1, datetime2)
+
+ The second one is passing it any number of the following keyword arguments::
+
+ relativedelta(arg1=x,arg2=y,arg3=z...)
+
+ year, month, day, hour, minute, second, microsecond:
+ Absolute information (argument is singular); adding or subtracting a
+ relativedelta with absolute information does not perform an arithmetic
+ operation, but rather REPLACES the corresponding value in the
+ original datetime with the value(s) in relativedelta.
+
+ years, months, weeks, days, hours, minutes, seconds, microseconds:
+ Relative information, may be negative (argument is plural); adding
+ or subtracting a relativedelta with relative information performs
+ the corresponding arithmetic operation on the original datetime value
+ with the information in the relativedelta.
+
+ weekday:
+ One of the weekday instances (MO, TU, etc) available in the
+ relativedelta module. These instances may receive a parameter N,
+ specifying the Nth weekday, which could be positive or negative
+ (like MO(+1) or MO(-2)). Not specifying it is the same as specifying
+ +1. You can also use an integer, where 0=MO. This argument is always
+ relative e.g. if the calculated date is already Monday, using MO(1)
+ or MO(-1) won't change the day. To effectively make it absolute, use
+ it in combination with the day argument (e.g. day=1, MO(1) for first
+ Monday of the month).
+
+ leapdays:
+ Will add given days to the date found, if year is a leap
+ year, and the date found is post 28 of february.
+
+ yearday, nlyearday:
+ Set the yearday or the non-leap year day (jump leap days).
+ These are converted to day/month/leapdays information.
+
+ There are relative and absolute forms of the keyword
+ arguments. The plural is relative, and the singular is
+ absolute. For each argument in the order below, the absolute form
+ is applied first (by setting each attribute to that value) and
+ then the relative form (by adding the value to the attribute).
+
+ The order of attributes considered when this relativedelta is
+ added to a datetime is:
+
+ 1. Year
+ 2. Month
+ 3. Day
+ 4. Hours
+ 5. Minutes
+ 6. Seconds
+ 7. Microseconds
+
+ Finally, weekday is applied, using the rule described above.
+
+ For example
+
+ >>> from datetime import datetime
+ >>> from dateutil.relativedelta import relativedelta, MO
+ >>> dt = datetime(2018, 4, 9, 13, 37, 0)
+ >>> delta = relativedelta(hours=25, day=1, weekday=MO(1))
+ >>> dt + delta
+ datetime.datetime(2018, 4, 2, 14, 37)
+
+ First, the day is set to 1 (the first of the month), then 25 hours
+ are added, to get to the 2nd day and 14th hour, finally the
+ weekday is applied, but since the 2nd is already a Monday there is
+ no effect.
+
+ """
+
+ def __init__(self, dt1=None, dt2=None,
+ years=0, months=0, days=0, leapdays=0, weeks=0,
+ hours=0, minutes=0, seconds=0, microseconds=0,
+ year=None, month=None, day=None, weekday=None,
+ yearday=None, nlyearday=None,
+ hour=None, minute=None, second=None, microsecond=None):
+
+ if dt1 and dt2:
+ # datetime is a subclass of date. So both must be date
+ if not (isinstance(dt1, datetime.date) and
+ isinstance(dt2, datetime.date)):
+ raise TypeError("relativedelta only diffs datetime/date")
+
+ # We allow two dates, or two datetimes, so we coerce them to be
+ # of the same type
+ if (isinstance(dt1, datetime.datetime) !=
+ isinstance(dt2, datetime.datetime)):
+ if not isinstance(dt1, datetime.datetime):
+ dt1 = datetime.datetime.fromordinal(dt1.toordinal())
+ elif not isinstance(dt2, datetime.datetime):
+ dt2 = datetime.datetime.fromordinal(dt2.toordinal())
+
+ self.years = 0
+ self.months = 0
+ self.days = 0
+ self.leapdays = 0
+ self.hours = 0
+ self.minutes = 0
+ self.seconds = 0
+ self.microseconds = 0
+ self.year = None
+ self.month = None
+ self.day = None
+ self.weekday = None
+ self.hour = None
+ self.minute = None
+ self.second = None
+ self.microsecond = None
+ self._has_time = 0
+
+ # Get year / month delta between the two
+ months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month)
+ self._set_months(months)
+
+ # Remove the year/month delta so the timedelta is just well-defined
+ # time units (seconds, days and microseconds)
+ dtm = self.__radd__(dt2)
+
+ # If we've overshot our target, make an adjustment
+ if dt1 < dt2:
+ compare = operator.gt
+ increment = 1
+ else:
+ compare = operator.lt
+ increment = -1
+
+ while compare(dt1, dtm):
+ months += increment
+ self._set_months(months)
+ dtm = self.__radd__(dt2)
+
+ # Get the timedelta between the "months-adjusted" date and dt1
+ delta = dt1 - dtm
+ self.seconds = delta.seconds + delta.days * 86400
+ self.microseconds = delta.microseconds
+ else:
+ # Check for non-integer values in integer-only quantities
+ if any(x is not None and x != int(x) for x in (years, months)):
+ raise ValueError("Non-integer years and months are "
+ "ambiguous and not currently supported.")
+
+ # Relative information
+ self.years = int(years)
+ self.months = int(months)
+ self.days = days + weeks * 7
+ self.leapdays = leapdays
+ self.hours = hours
+ self.minutes = minutes
+ self.seconds = seconds
+ self.microseconds = microseconds
+
+ # Absolute information
+ self.year = year
+ self.month = month
+ self.day = day
+ self.hour = hour
+ self.minute = minute
+ self.second = second
+ self.microsecond = microsecond
+
+ if any(x is not None and int(x) != x
+ for x in (year, month, day, hour,
+ minute, second, microsecond)):
+ # For now we'll deprecate floats - later it'll be an error.
+ warn("Non-integer value passed as absolute information. " +
+ "This is not a well-defined condition and will raise " +
+ "errors in future versions.", DeprecationWarning)
+
+ if isinstance(weekday, integer_types):
+ self.weekday = weekdays[weekday]
+ else:
+ self.weekday = weekday
+
+ yday = 0
+ if nlyearday:
+ yday = nlyearday
+ elif yearday:
+ yday = yearday
+ if yearday > 59:
+ self.leapdays = -1
+ if yday:
+ ydayidx = [31, 59, 90, 120, 151, 181, 212,
+ 243, 273, 304, 334, 366]
+ for idx, ydays in enumerate(ydayidx):
+ if yday <= ydays:
+ self.month = idx+1
+ if idx == 0:
+ self.day = yday
+ else:
+ self.day = yday-ydayidx[idx-1]
+ break
+ else:
+ raise ValueError("invalid year day (%d)" % yday)
+
+ self._fix()
+
+ def _fix(self):
+ if abs(self.microseconds) > 999999:
+ s = _sign(self.microseconds)
+ div, mod = divmod(self.microseconds * s, 1000000)
+ self.microseconds = mod * s
+ self.seconds += div * s
+ if abs(self.seconds) > 59:
+ s = _sign(self.seconds)
+ div, mod = divmod(self.seconds * s, 60)
+ self.seconds = mod * s
+ self.minutes += div * s
+ if abs(self.minutes) > 59:
+ s = _sign(self.minutes)
+ div, mod = divmod(self.minutes * s, 60)
+ self.minutes = mod * s
+ self.hours += div * s
+ if abs(self.hours) > 23:
+ s = _sign(self.hours)
+ div, mod = divmod(self.hours * s, 24)
+ self.hours = mod * s
+ self.days += div * s
+ if abs(self.months) > 11:
+ s = _sign(self.months)
+ div, mod = divmod(self.months * s, 12)
+ self.months = mod * s
+ self.years += div * s
+ if (self.hours or self.minutes or self.seconds or self.microseconds
+ or self.hour is not None or self.minute is not None or
+ self.second is not None or self.microsecond is not None):
+ self._has_time = 1
+ else:
+ self._has_time = 0
+
+ @property
+ def weeks(self):
+ return int(self.days / 7.0)
+
+ @weeks.setter
+ def weeks(self, value):
+ self.days = self.days - (self.weeks * 7) + value * 7
+
+ def _set_months(self, months):
+ self.months = months
+ if abs(self.months) > 11:
+ s = _sign(self.months)
+ div, mod = divmod(self.months * s, 12)
+ self.months = mod * s
+ self.years = div * s
+ else:
+ self.years = 0
+
+ def normalized(self):
+ """
+ Return a version of this object represented entirely using integer
+ values for the relative attributes.
+
+ >>> relativedelta(days=1.5, hours=2).normalized()
+ relativedelta(days=+1, hours=+14)
+
+ :return:
+ Returns a :class:`dateutil.relativedelta.relativedelta` object.
+ """
+ # Cascade remainders down (rounding each to roughly nearest microsecond)
+ days = int(self.days)
+
+ hours_f = round(self.hours + 24 * (self.days - days), 11)
+ hours = int(hours_f)
+
+ minutes_f = round(self.minutes + 60 * (hours_f - hours), 10)
+ minutes = int(minutes_f)
+
+ seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8)
+ seconds = int(seconds_f)
+
+ microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds))
+
+ # Constructor carries overflow back up with call to _fix()
+ return self.__class__(years=self.years, months=self.months,
+ days=days, hours=hours, minutes=minutes,
+ seconds=seconds, microseconds=microseconds,
+ leapdays=self.leapdays, year=self.year,
+ month=self.month, day=self.day,
+ weekday=self.weekday, hour=self.hour,
+ minute=self.minute, second=self.second,
+ microsecond=self.microsecond)
+
+ def __add__(self, other):
+ if isinstance(other, relativedelta):
+ return self.__class__(years=other.years + self.years,
+ months=other.months + self.months,
+ days=other.days + self.days,
+ hours=other.hours + self.hours,
+ minutes=other.minutes + self.minutes,
+ seconds=other.seconds + self.seconds,
+ microseconds=(other.microseconds +
+ self.microseconds),
+ leapdays=other.leapdays or self.leapdays,
+ year=(other.year if other.year is not None
+ else self.year),
+ month=(other.month if other.month is not None
+ else self.month),
+ day=(other.day if other.day is not None
+ else self.day),
+ weekday=(other.weekday if other.weekday is not None
+ else self.weekday),
+ hour=(other.hour if other.hour is not None
+ else self.hour),
+ minute=(other.minute if other.minute is not None
+ else self.minute),
+ second=(other.second if other.second is not None
+ else self.second),
+ microsecond=(other.microsecond if other.microsecond
+ is not None else
+ self.microsecond))
+ if isinstance(other, datetime.timedelta):
+ return self.__class__(years=self.years,
+ months=self.months,
+ days=self.days + other.days,
+ hours=self.hours,
+ minutes=self.minutes,
+ seconds=self.seconds + other.seconds,
+ microseconds=self.microseconds + other.microseconds,
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+ if not isinstance(other, datetime.date):
+ return NotImplemented
+ elif self._has_time and not isinstance(other, datetime.datetime):
+ other = datetime.datetime.fromordinal(other.toordinal())
+ year = (self.year or other.year)+self.years
+ month = self.month or other.month
+ if self.months:
+ assert 1 <= abs(self.months) <= 12
+ month += self.months
+ if month > 12:
+ year += 1
+ month -= 12
+ elif month < 1:
+ year -= 1
+ month += 12
+ day = min(calendar.monthrange(year, month)[1],
+ self.day or other.day)
+ repl = {"year": year, "month": month, "day": day}
+ for attr in ["hour", "minute", "second", "microsecond"]:
+ value = getattr(self, attr)
+ if value is not None:
+ repl[attr] = value
+ days = self.days
+ if self.leapdays and month > 2 and calendar.isleap(year):
+ days += self.leapdays
+ ret = (other.replace(**repl)
+ + datetime.timedelta(days=days,
+ hours=self.hours,
+ minutes=self.minutes,
+ seconds=self.seconds,
+ microseconds=self.microseconds))
+ if self.weekday:
+ weekday, nth = self.weekday.weekday, self.weekday.n or 1
+ jumpdays = (abs(nth) - 1) * 7
+ if nth > 0:
+ jumpdays += (7 - ret.weekday() + weekday) % 7
+ else:
+ jumpdays += (ret.weekday() - weekday) % 7
+ jumpdays *= -1
+ ret += datetime.timedelta(days=jumpdays)
+ return ret
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+ def __rsub__(self, other):
+ return self.__neg__().__radd__(other)
+
+ def __sub__(self, other):
+ if not isinstance(other, relativedelta):
+ return NotImplemented # In case the other object defines __rsub__
+ return self.__class__(years=self.years - other.years,
+ months=self.months - other.months,
+ days=self.days - other.days,
+ hours=self.hours - other.hours,
+ minutes=self.minutes - other.minutes,
+ seconds=self.seconds - other.seconds,
+ microseconds=self.microseconds - other.microseconds,
+ leapdays=self.leapdays or other.leapdays,
+ year=(self.year if self.year is not None
+ else other.year),
+ month=(self.month if self.month is not None else
+ other.month),
+ day=(self.day if self.day is not None else
+ other.day),
+ weekday=(self.weekday if self.weekday is not None else
+ other.weekday),
+ hour=(self.hour if self.hour is not None else
+ other.hour),
+ minute=(self.minute if self.minute is not None else
+ other.minute),
+ second=(self.second if self.second is not None else
+ other.second),
+ microsecond=(self.microsecond if self.microsecond
+ is not None else
+ other.microsecond))
+
+ def __abs__(self):
+ return self.__class__(years=abs(self.years),
+ months=abs(self.months),
+ days=abs(self.days),
+ hours=abs(self.hours),
+ minutes=abs(self.minutes),
+ seconds=abs(self.seconds),
+ microseconds=abs(self.microseconds),
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ def __neg__(self):
+ return self.__class__(years=-self.years,
+ months=-self.months,
+ days=-self.days,
+ hours=-self.hours,
+ minutes=-self.minutes,
+ seconds=-self.seconds,
+ microseconds=-self.microseconds,
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ def __bool__(self):
+ return not (not self.years and
+ not self.months and
+ not self.days and
+ not self.hours and
+ not self.minutes and
+ not self.seconds and
+ not self.microseconds and
+ not self.leapdays and
+ self.year is None and
+ self.month is None and
+ self.day is None and
+ self.weekday is None and
+ self.hour is None and
+ self.minute is None and
+ self.second is None and
+ self.microsecond is None)
+ # Compatibility with Python 2.x
+ __nonzero__ = __bool__
+
+ def __mul__(self, other):
+ try:
+ f = float(other)
+ except TypeError:
+ return NotImplemented
+
+ return self.__class__(years=int(self.years * f),
+ months=int(self.months * f),
+ days=int(self.days * f),
+ hours=int(self.hours * f),
+ minutes=int(self.minutes * f),
+ seconds=int(self.seconds * f),
+ microseconds=int(self.microseconds * f),
+ leapdays=self.leapdays,
+ year=self.year,
+ month=self.month,
+ day=self.day,
+ weekday=self.weekday,
+ hour=self.hour,
+ minute=self.minute,
+ second=self.second,
+ microsecond=self.microsecond)
+
+ __rmul__ = __mul__
+
+ def __eq__(self, other):
+ if not isinstance(other, relativedelta):
+ return NotImplemented
+ if self.weekday or other.weekday:
+ if not self.weekday or not other.weekday:
+ return False
+ if self.weekday.weekday != other.weekday.weekday:
+ return False
+ n1, n2 = self.weekday.n, other.weekday.n
+ if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
+ return False
+ return (self.years == other.years and
+ self.months == other.months and
+ self.days == other.days and
+ self.hours == other.hours and
+ self.minutes == other.minutes and
+ self.seconds == other.seconds and
+ self.microseconds == other.microseconds and
+ self.leapdays == other.leapdays and
+ self.year == other.year and
+ self.month == other.month and
+ self.day == other.day and
+ self.hour == other.hour and
+ self.minute == other.minute and
+ self.second == other.second and
+ self.microsecond == other.microsecond)
+
+ def __hash__(self):
+ return hash((
+ self.weekday,
+ self.years,
+ self.months,
+ self.days,
+ self.hours,
+ self.minutes,
+ self.seconds,
+ self.microseconds,
+ self.leapdays,
+ self.year,
+ self.month,
+ self.day,
+ self.hour,
+ self.minute,
+ self.second,
+ self.microsecond,
+ ))
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __div__(self, other):
+ try:
+ reciprocal = 1 / float(other)
+ except TypeError:
+ return NotImplemented
+
+ return self.__mul__(reciprocal)
+
+ __truediv__ = __div__
+
+ def __repr__(self):
+ l = []
+ for attr in ["years", "months", "days", "leapdays",
+ "hours", "minutes", "seconds", "microseconds"]:
+ value = getattr(self, attr)
+ if value:
+ l.append("{attr}={value:+g}".format(attr=attr, value=value))
+ for attr in ["year", "month", "day", "weekday",
+ "hour", "minute", "second", "microsecond"]:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("{attr}={value}".format(attr=attr, value=repr(value)))
+ return "{classname}({attrs})".format(classname=self.__class__.__name__,
+ attrs=", ".join(l))
+
+
+def _sign(x):
+ return int(copysign(1, x))
+
+# vim:ts=4:sw=4:et
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/rrule.py b/tapdown/lib/python3.11/site-packages/dateutil/rrule.py
new file mode 100644
index 0000000..571a0d2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/rrule.py
@@ -0,0 +1,1737 @@
+# -*- coding: utf-8 -*-
+"""
+The rrule module offers a small, complete, and very fast, implementation of
+the recurrence rules documented in the
+`iCalendar RFC `_,
+including support for caching of results.
+"""
+import calendar
+import datetime
+import heapq
+import itertools
+import re
+import sys
+from functools import wraps
+# For warning about deprecation of until and count
+from warnings import warn
+
+from six import advance_iterator, integer_types
+
+from six.moves import _thread, range
+
+from ._common import weekday as weekdaybase
+
+try:
+ from math import gcd
+except ImportError:
+ from fractions import gcd
+
+__all__ = ["rrule", "rruleset", "rrulestr",
+ "YEARLY", "MONTHLY", "WEEKLY", "DAILY",
+ "HOURLY", "MINUTELY", "SECONDLY",
+ "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
+
+# Every mask is 7 days longer to handle cross-year weekly periods.
+M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 +
+ [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
+M365MASK = list(M366MASK)
+M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32))
+MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
+MDAY365MASK = list(MDAY366MASK)
+M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0))
+NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
+NMDAY365MASK = list(NMDAY366MASK)
+M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366)
+M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365)
+WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55
+del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
+MDAY365MASK = tuple(MDAY365MASK)
+M365MASK = tuple(M365MASK)
+
+FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY']
+
+(YEARLY,
+ MONTHLY,
+ WEEKLY,
+ DAILY,
+ HOURLY,
+ MINUTELY,
+ SECONDLY) = list(range(7))
+
+# Imported on demand.
+easter = None
+parser = None
+
+
+class weekday(weekdaybase):
+ """
+ This version of weekday does not allow n = 0.
+ """
+ def __init__(self, wkday, n=None):
+ if n == 0:
+ raise ValueError("Can't create weekday with n==0")
+
+ super(weekday, self).__init__(wkday, n)
+
+
+MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7))
+
+
+def _invalidates_cache(f):
+ """
+ Decorator for rruleset methods which may invalidate the
+ cached length.
+ """
+ @wraps(f)
+ def inner_func(self, *args, **kwargs):
+ rv = f(self, *args, **kwargs)
+ self._invalidate_cache()
+ return rv
+
+ return inner_func
+
+
+class rrulebase(object):
+ def __init__(self, cache=False):
+ if cache:
+ self._cache = []
+ self._cache_lock = _thread.allocate_lock()
+ self._invalidate_cache()
+ else:
+ self._cache = None
+ self._cache_complete = False
+ self._len = None
+
+ def __iter__(self):
+ if self._cache_complete:
+ return iter(self._cache)
+ elif self._cache is None:
+ return self._iter()
+ else:
+ return self._iter_cached()
+
+ def _invalidate_cache(self):
+ if self._cache is not None:
+ self._cache = []
+ self._cache_complete = False
+ self._cache_gen = self._iter()
+
+ if self._cache_lock.locked():
+ self._cache_lock.release()
+
+ self._len = None
+
+ def _iter_cached(self):
+ i = 0
+ gen = self._cache_gen
+ cache = self._cache
+ acquire = self._cache_lock.acquire
+ release = self._cache_lock.release
+ while gen:
+ if i == len(cache):
+ acquire()
+ if self._cache_complete:
+ break
+ try:
+ for j in range(10):
+ cache.append(advance_iterator(gen))
+ except StopIteration:
+ self._cache_gen = gen = None
+ self._cache_complete = True
+ break
+ release()
+ yield cache[i]
+ i += 1
+ while i < self._len:
+ yield cache[i]
+ i += 1
+
+ def __getitem__(self, item):
+ if self._cache_complete:
+ return self._cache[item]
+ elif isinstance(item, slice):
+ if item.step and item.step < 0:
+ return list(iter(self))[item]
+ else:
+ return list(itertools.islice(self,
+ item.start or 0,
+ item.stop or sys.maxsize,
+ item.step or 1))
+ elif item >= 0:
+ gen = iter(self)
+ try:
+ for i in range(item+1):
+ res = advance_iterator(gen)
+ except StopIteration:
+ raise IndexError
+ return res
+ else:
+ return list(iter(self))[item]
+
+ def __contains__(self, item):
+ if self._cache_complete:
+ return item in self._cache
+ else:
+ for i in self:
+ if i == item:
+ return True
+ elif i > item:
+ return False
+ return False
+
+ # __len__() introduces a large performance penalty.
+ def count(self):
+ """ Returns the number of recurrences in this set. It will have go
+ through the whole recurrence, if this hasn't been done before. """
+ if self._len is None:
+ for x in self:
+ pass
+ return self._len
+
+ def before(self, dt, inc=False):
+ """ Returns the last recurrence before the given datetime instance. The
+ inc keyword defines what happens if dt is an occurrence. With
+ inc=True, if dt itself is an occurrence, it will be returned. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ last = None
+ if inc:
+ for i in gen:
+ if i > dt:
+ break
+ last = i
+ else:
+ for i in gen:
+ if i >= dt:
+ break
+ last = i
+ return last
+
+ def after(self, dt, inc=False):
+ """ Returns the first recurrence after the given datetime instance. The
+ inc keyword defines what happens if dt is an occurrence. With
+ inc=True, if dt itself is an occurrence, it will be returned. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ if inc:
+ for i in gen:
+ if i >= dt:
+ return i
+ else:
+ for i in gen:
+ if i > dt:
+ return i
+ return None
+
+ def xafter(self, dt, count=None, inc=False):
+ """
+ Generator which yields up to `count` recurrences after the given
+ datetime instance, equivalent to `after`.
+
+ :param dt:
+ The datetime at which to start generating recurrences.
+
+ :param count:
+ The maximum number of recurrences to generate. If `None` (default),
+ dates are generated until the recurrence rule is exhausted.
+
+ :param inc:
+ If `dt` is an instance of the rule and `inc` is `True`, it is
+ included in the output.
+
+ :yields: Yields a sequence of `datetime` objects.
+ """
+
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+
+ # Select the comparison function
+ if inc:
+ comp = lambda dc, dtc: dc >= dtc
+ else:
+ comp = lambda dc, dtc: dc > dtc
+
+ # Generate dates
+ n = 0
+ for d in gen:
+ if comp(d, dt):
+ if count is not None:
+ n += 1
+ if n > count:
+ break
+
+ yield d
+
+ def between(self, after, before, inc=False, count=1):
+ """ Returns all the occurrences of the rrule between after and before.
+ The inc keyword defines what happens if after and/or before are
+ themselves occurrences. With inc=True, they will be included in the
+ list, if they are found in the recurrence set. """
+ if self._cache_complete:
+ gen = self._cache
+ else:
+ gen = self
+ started = False
+ l = []
+ if inc:
+ for i in gen:
+ if i > before:
+ break
+ elif not started:
+ if i >= after:
+ started = True
+ l.append(i)
+ else:
+ l.append(i)
+ else:
+ for i in gen:
+ if i >= before:
+ break
+ elif not started:
+ if i > after:
+ started = True
+ l.append(i)
+ else:
+ l.append(i)
+ return l
+
+
+class rrule(rrulebase):
+ """
+ That's the base of the rrule operation. It accepts all the keywords
+ defined in the RFC as its constructor parameters (except byday,
+ which was renamed to byweekday) and more. The constructor prototype is::
+
+ rrule(freq)
+
+ Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
+ or SECONDLY.
+
+ .. note::
+ Per RFC section 3.3.10, recurrence instances falling on invalid dates
+ and times are ignored rather than coerced:
+
+ Recurrence rules may generate recurrence instances with an invalid
+ date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM
+ on a day where the local time is moved forward by an hour at 1:00
+ AM). Such recurrence instances MUST be ignored and MUST NOT be
+ counted as part of the recurrence set.
+
+ This can lead to possibly surprising behavior when, for example, the
+ start date occurs at the end of the month:
+
+ >>> from dateutil.rrule import rrule, MONTHLY
+ >>> from datetime import datetime
+ >>> start_date = datetime(2014, 12, 31)
+ >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date))
+ ... # doctest: +NORMALIZE_WHITESPACE
+ [datetime.datetime(2014, 12, 31, 0, 0),
+ datetime.datetime(2015, 1, 31, 0, 0),
+ datetime.datetime(2015, 3, 31, 0, 0),
+ datetime.datetime(2015, 5, 31, 0, 0)]
+
+ Additionally, it supports the following keyword arguments:
+
+ :param dtstart:
+ The recurrence start. Besides being the base for the recurrence,
+ missing parameters in the final recurrence instances will also be
+ extracted from this date. If not given, datetime.now() will be used
+ instead.
+ :param interval:
+ The interval between each freq iteration. For example, when using
+ YEARLY, an interval of 2 means once every two years, but with HOURLY,
+ it means once every two hours. The default interval is 1.
+ :param wkst:
+ The week start day. Must be one of the MO, TU, WE constants, or an
+ integer, specifying the first day of the week. This will affect
+ recurrences based on weekly periods. The default week start is got
+ from calendar.firstweekday(), and may be modified by
+ calendar.setfirstweekday().
+ :param count:
+ If given, this determines how many occurrences will be generated.
+
+ .. note::
+ As of version 2.5.0, the use of the keyword ``until`` in conjunction
+ with ``count`` is deprecated, to make sure ``dateutil`` is fully
+ compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count``
+ **must not** occur in the same call to ``rrule``.
+ :param until:
+ If given, this must be a datetime instance specifying the upper-bound
+ limit of the recurrence. The last recurrence in the rule is the greatest
+ datetime that is less than or equal to the value specified in the
+ ``until`` parameter.
+
+ .. note::
+ As of version 2.5.0, the use of the keyword ``until`` in conjunction
+ with ``count`` is deprecated, to make sure ``dateutil`` is fully
+ compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count``
+ **must not** occur in the same call to ``rrule``.
+ :param bysetpos:
+ If given, it must be either an integer, or a sequence of integers,
+ positive or negative. Each given integer will specify an occurrence
+ number, corresponding to the nth occurrence of the rule inside the
+ frequency period. For example, a bysetpos of -1 if combined with a
+ MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will
+ result in the last work day of every month.
+ :param bymonth:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the months to apply the recurrence to.
+ :param bymonthday:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the month days to apply the recurrence to.
+ :param byyearday:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the year days to apply the recurrence to.
+ :param byeaster:
+ If given, it must be either an integer, or a sequence of integers,
+ positive or negative. Each integer will define an offset from the
+ Easter Sunday. Passing the offset 0 to byeaster will yield the Easter
+ Sunday itself. This is an extension to the RFC specification.
+ :param byweekno:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the week numbers to apply the recurrence to. Week numbers
+ have the meaning described in ISO8601, that is, the first week of
+ the year is that containing at least four days of the new year.
+ :param byweekday:
+ If given, it must be either an integer (0 == MO), a sequence of
+ integers, one of the weekday constants (MO, TU, etc), or a sequence
+ of these constants. When given, these variables will define the
+ weekdays where the recurrence will be applied. It's also possible to
+ use an argument n for the weekday instances, which will mean the nth
+ occurrence of this weekday in the period. For example, with MONTHLY,
+ or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the
+ first friday of the month where the recurrence happens. Notice that in
+ the RFC documentation, this is specified as BYDAY, but was renamed to
+ avoid the ambiguity of that keyword.
+ :param byhour:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the hours to apply the recurrence to.
+ :param byminute:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the minutes to apply the recurrence to.
+ :param bysecond:
+ If given, it must be either an integer, or a sequence of integers,
+ meaning the seconds to apply the recurrence to.
+ :param cache:
+ If given, it must be a boolean value specifying to enable or disable
+ caching of results. If you will use the same rrule instance multiple
+ times, enabling caching will improve the performance considerably.
+ """
+ def __init__(self, freq, dtstart=None,
+ interval=1, wkst=None, count=None, until=None, bysetpos=None,
+ bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
+ byweekno=None, byweekday=None,
+ byhour=None, byminute=None, bysecond=None,
+ cache=False):
+ super(rrule, self).__init__(cache)
+ global easter
+ if not dtstart:
+ if until and until.tzinfo:
+ dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0)
+ else:
+ dtstart = datetime.datetime.now().replace(microsecond=0)
+ elif not isinstance(dtstart, datetime.datetime):
+ dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
+ else:
+ dtstart = dtstart.replace(microsecond=0)
+ self._dtstart = dtstart
+ self._tzinfo = dtstart.tzinfo
+ self._freq = freq
+ self._interval = interval
+ self._count = count
+
+ # Cache the original byxxx rules, if they are provided, as the _byxxx
+ # attributes do not necessarily map to the inputs, and this can be
+ # a problem in generating the strings. Only store things if they've
+ # been supplied (the string retrieval will just use .get())
+ self._original_rule = {}
+
+ if until and not isinstance(until, datetime.datetime):
+ until = datetime.datetime.fromordinal(until.toordinal())
+ self._until = until
+
+ if self._dtstart and self._until:
+ if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None):
+ # According to RFC5545 Section 3.3.10:
+ # https://tools.ietf.org/html/rfc5545#section-3.3.10
+ #
+ # > If the "DTSTART" property is specified as a date with UTC
+ # > time or a date with local time and time zone reference,
+ # > then the UNTIL rule part MUST be specified as a date with
+ # > UTC time.
+ raise ValueError(
+ 'RRULE UNTIL values must be specified in UTC when DTSTART '
+ 'is timezone-aware'
+ )
+
+ if count is not None and until:
+ warn("Using both 'count' and 'until' is inconsistent with RFC 5545"
+ " and has been deprecated in dateutil. Future versions will "
+ "raise an error.", DeprecationWarning)
+
+ if wkst is None:
+ self._wkst = calendar.firstweekday()
+ elif isinstance(wkst, integer_types):
+ self._wkst = wkst
+ else:
+ self._wkst = wkst.weekday
+
+ if bysetpos is None:
+ self._bysetpos = None
+ elif isinstance(bysetpos, integer_types):
+ if bysetpos == 0 or not (-366 <= bysetpos <= 366):
+ raise ValueError("bysetpos must be between 1 and 366, "
+ "or between -366 and -1")
+ self._bysetpos = (bysetpos,)
+ else:
+ self._bysetpos = tuple(bysetpos)
+ for pos in self._bysetpos:
+ if pos == 0 or not (-366 <= pos <= 366):
+ raise ValueError("bysetpos must be between 1 and 366, "
+ "or between -366 and -1")
+
+ if self._bysetpos:
+ self._original_rule['bysetpos'] = self._bysetpos
+
+ if (byweekno is None and byyearday is None and bymonthday is None and
+ byweekday is None and byeaster is None):
+ if freq == YEARLY:
+ if bymonth is None:
+ bymonth = dtstart.month
+ self._original_rule['bymonth'] = None
+ bymonthday = dtstart.day
+ self._original_rule['bymonthday'] = None
+ elif freq == MONTHLY:
+ bymonthday = dtstart.day
+ self._original_rule['bymonthday'] = None
+ elif freq == WEEKLY:
+ byweekday = dtstart.weekday()
+ self._original_rule['byweekday'] = None
+
+ # bymonth
+ if bymonth is None:
+ self._bymonth = None
+ else:
+ if isinstance(bymonth, integer_types):
+ bymonth = (bymonth,)
+
+ self._bymonth = tuple(sorted(set(bymonth)))
+
+ if 'bymonth' not in self._original_rule:
+ self._original_rule['bymonth'] = self._bymonth
+
+ # byyearday
+ if byyearday is None:
+ self._byyearday = None
+ else:
+ if isinstance(byyearday, integer_types):
+ byyearday = (byyearday,)
+
+ self._byyearday = tuple(sorted(set(byyearday)))
+ self._original_rule['byyearday'] = self._byyearday
+
+ # byeaster
+ if byeaster is not None:
+ if not easter:
+ from dateutil import easter
+ if isinstance(byeaster, integer_types):
+ self._byeaster = (byeaster,)
+ else:
+ self._byeaster = tuple(sorted(byeaster))
+
+ self._original_rule['byeaster'] = self._byeaster
+ else:
+ self._byeaster = None
+
+ # bymonthday
+ if bymonthday is None:
+ self._bymonthday = ()
+ self._bynmonthday = ()
+ else:
+ if isinstance(bymonthday, integer_types):
+ bymonthday = (bymonthday,)
+
+ bymonthday = set(bymonthday) # Ensure it's unique
+
+ self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0))
+ self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0))
+
+ # Storing positive numbers first, then negative numbers
+ if 'bymonthday' not in self._original_rule:
+ self._original_rule['bymonthday'] = tuple(
+ itertools.chain(self._bymonthday, self._bynmonthday))
+
+ # byweekno
+ if byweekno is None:
+ self._byweekno = None
+ else:
+ if isinstance(byweekno, integer_types):
+ byweekno = (byweekno,)
+
+ self._byweekno = tuple(sorted(set(byweekno)))
+
+ self._original_rule['byweekno'] = self._byweekno
+
+ # byweekday / bynweekday
+ if byweekday is None:
+ self._byweekday = None
+ self._bynweekday = None
+ else:
+ # If it's one of the valid non-sequence types, convert to a
+ # single-element sequence before the iterator that builds the
+ # byweekday set.
+ if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"):
+ byweekday = (byweekday,)
+
+ self._byweekday = set()
+ self._bynweekday = set()
+ for wday in byweekday:
+ if isinstance(wday, integer_types):
+ self._byweekday.add(wday)
+ elif not wday.n or freq > MONTHLY:
+ self._byweekday.add(wday.weekday)
+ else:
+ self._bynweekday.add((wday.weekday, wday.n))
+
+ if not self._byweekday:
+ self._byweekday = None
+ elif not self._bynweekday:
+ self._bynweekday = None
+
+ if self._byweekday is not None:
+ self._byweekday = tuple(sorted(self._byweekday))
+ orig_byweekday = [weekday(x) for x in self._byweekday]
+ else:
+ orig_byweekday = ()
+
+ if self._bynweekday is not None:
+ self._bynweekday = tuple(sorted(self._bynweekday))
+ orig_bynweekday = [weekday(*x) for x in self._bynweekday]
+ else:
+ orig_bynweekday = ()
+
+ if 'byweekday' not in self._original_rule:
+ self._original_rule['byweekday'] = tuple(itertools.chain(
+ orig_byweekday, orig_bynweekday))
+
+ # byhour
+ if byhour is None:
+ if freq < HOURLY:
+ self._byhour = {dtstart.hour}
+ else:
+ self._byhour = None
+ else:
+ if isinstance(byhour, integer_types):
+ byhour = (byhour,)
+
+ if freq == HOURLY:
+ self._byhour = self.__construct_byset(start=dtstart.hour,
+ byxxx=byhour,
+ base=24)
+ else:
+ self._byhour = set(byhour)
+
+ self._byhour = tuple(sorted(self._byhour))
+ self._original_rule['byhour'] = self._byhour
+
+ # byminute
+ if byminute is None:
+ if freq < MINUTELY:
+ self._byminute = {dtstart.minute}
+ else:
+ self._byminute = None
+ else:
+ if isinstance(byminute, integer_types):
+ byminute = (byminute,)
+
+ if freq == MINUTELY:
+ self._byminute = self.__construct_byset(start=dtstart.minute,
+ byxxx=byminute,
+ base=60)
+ else:
+ self._byminute = set(byminute)
+
+ self._byminute = tuple(sorted(self._byminute))
+ self._original_rule['byminute'] = self._byminute
+
+ # bysecond
+ if bysecond is None:
+ if freq < SECONDLY:
+ self._bysecond = ((dtstart.second,))
+ else:
+ self._bysecond = None
+ else:
+ if isinstance(bysecond, integer_types):
+ bysecond = (bysecond,)
+
+ self._bysecond = set(bysecond)
+
+ if freq == SECONDLY:
+ self._bysecond = self.__construct_byset(start=dtstart.second,
+ byxxx=bysecond,
+ base=60)
+ else:
+ self._bysecond = set(bysecond)
+
+ self._bysecond = tuple(sorted(self._bysecond))
+ self._original_rule['bysecond'] = self._bysecond
+
+ if self._freq >= HOURLY:
+ self._timeset = None
+ else:
+ self._timeset = []
+ for hour in self._byhour:
+ for minute in self._byminute:
+ for second in self._bysecond:
+ self._timeset.append(
+ datetime.time(hour, minute, second,
+ tzinfo=self._tzinfo))
+ self._timeset.sort()
+ self._timeset = tuple(self._timeset)
+
+ def __str__(self):
+ """
+ Output a string that would generate this RRULE if passed to rrulestr.
+ This is mostly compatible with RFC5545, except for the
+ dateutil-specific extension BYEASTER.
+ """
+
+ output = []
+ h, m, s = [None] * 3
+ if self._dtstart:
+ output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S'))
+ h, m, s = self._dtstart.timetuple()[3:6]
+
+ parts = ['FREQ=' + FREQNAMES[self._freq]]
+ if self._interval != 1:
+ parts.append('INTERVAL=' + str(self._interval))
+
+ if self._wkst:
+ parts.append('WKST=' + repr(weekday(self._wkst))[0:2])
+
+ if self._count is not None:
+ parts.append('COUNT=' + str(self._count))
+
+ if self._until:
+ parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S'))
+
+ if self._original_rule.get('byweekday') is not None:
+ # The str() method on weekday objects doesn't generate
+ # RFC5545-compliant strings, so we should modify that.
+ original_rule = dict(self._original_rule)
+ wday_strings = []
+ for wday in original_rule['byweekday']:
+ if wday.n:
+ wday_strings.append('{n:+d}{wday}'.format(
+ n=wday.n,
+ wday=repr(wday)[0:2]))
+ else:
+ wday_strings.append(repr(wday))
+
+ original_rule['byweekday'] = wday_strings
+ else:
+ original_rule = self._original_rule
+
+ partfmt = '{name}={vals}'
+ for name, key in [('BYSETPOS', 'bysetpos'),
+ ('BYMONTH', 'bymonth'),
+ ('BYMONTHDAY', 'bymonthday'),
+ ('BYYEARDAY', 'byyearday'),
+ ('BYWEEKNO', 'byweekno'),
+ ('BYDAY', 'byweekday'),
+ ('BYHOUR', 'byhour'),
+ ('BYMINUTE', 'byminute'),
+ ('BYSECOND', 'bysecond'),
+ ('BYEASTER', 'byeaster')]:
+ value = original_rule.get(key)
+ if value:
+ parts.append(partfmt.format(name=name, vals=(','.join(str(v)
+ for v in value))))
+
+ output.append('RRULE:' + ';'.join(parts))
+ return '\n'.join(output)
+
+ def replace(self, **kwargs):
+ """Return new rrule with same attributes except for those attributes given new
+ values by whichever keyword arguments are specified."""
+ new_kwargs = {"interval": self._interval,
+ "count": self._count,
+ "dtstart": self._dtstart,
+ "freq": self._freq,
+ "until": self._until,
+ "wkst": self._wkst,
+ "cache": False if self._cache is None else True }
+ new_kwargs.update(self._original_rule)
+ new_kwargs.update(kwargs)
+ return rrule(**new_kwargs)
+
+ def _iter(self):
+ year, month, day, hour, minute, second, weekday, yearday, _ = \
+ self._dtstart.timetuple()
+
+ # Some local variables to speed things up a bit
+ freq = self._freq
+ interval = self._interval
+ wkst = self._wkst
+ until = self._until
+ bymonth = self._bymonth
+ byweekno = self._byweekno
+ byyearday = self._byyearday
+ byweekday = self._byweekday
+ byeaster = self._byeaster
+ bymonthday = self._bymonthday
+ bynmonthday = self._bynmonthday
+ bysetpos = self._bysetpos
+ byhour = self._byhour
+ byminute = self._byminute
+ bysecond = self._bysecond
+
+ ii = _iterinfo(self)
+ ii.rebuild(year, month)
+
+ getdayset = {YEARLY: ii.ydayset,
+ MONTHLY: ii.mdayset,
+ WEEKLY: ii.wdayset,
+ DAILY: ii.ddayset,
+ HOURLY: ii.ddayset,
+ MINUTELY: ii.ddayset,
+ SECONDLY: ii.ddayset}[freq]
+
+ if freq < HOURLY:
+ timeset = self._timeset
+ else:
+ gettimeset = {HOURLY: ii.htimeset,
+ MINUTELY: ii.mtimeset,
+ SECONDLY: ii.stimeset}[freq]
+ if ((freq >= HOURLY and
+ self._byhour and hour not in self._byhour) or
+ (freq >= MINUTELY and
+ self._byminute and minute not in self._byminute) or
+ (freq >= SECONDLY and
+ self._bysecond and second not in self._bysecond)):
+ timeset = ()
+ else:
+ timeset = gettimeset(hour, minute, second)
+
+ total = 0
+ count = self._count
+ while True:
+ # Get dayset with the right frequency
+ dayset, start, end = getdayset(year, month, day)
+
+ # Do the "hard" work ;-)
+ filtered = False
+ for i in dayset[start:end]:
+ if ((bymonth and ii.mmask[i] not in bymonth) or
+ (byweekno and not ii.wnomask[i]) or
+ (byweekday and ii.wdaymask[i] not in byweekday) or
+ (ii.nwdaymask and not ii.nwdaymask[i]) or
+ (byeaster and not ii.eastermask[i]) or
+ ((bymonthday or bynmonthday) and
+ ii.mdaymask[i] not in bymonthday and
+ ii.nmdaymask[i] not in bynmonthday) or
+ (byyearday and
+ ((i < ii.yearlen and i+1 not in byyearday and
+ -ii.yearlen+i not in byyearday) or
+ (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and
+ -ii.nextyearlen+i-ii.yearlen not in byyearday)))):
+ dayset[i] = None
+ filtered = True
+
+ # Output results
+ if bysetpos and timeset:
+ poslist = []
+ for pos in bysetpos:
+ if pos < 0:
+ daypos, timepos = divmod(pos, len(timeset))
+ else:
+ daypos, timepos = divmod(pos-1, len(timeset))
+ try:
+ i = [x for x in dayset[start:end]
+ if x is not None][daypos]
+ time = timeset[timepos]
+ except IndexError:
+ pass
+ else:
+ date = datetime.date.fromordinal(ii.yearordinal+i)
+ res = datetime.datetime.combine(date, time)
+ if res not in poslist:
+ poslist.append(res)
+ poslist.sort()
+ for res in poslist:
+ if until and res > until:
+ self._len = total
+ return
+ elif res >= self._dtstart:
+ if count is not None:
+ count -= 1
+ if count < 0:
+ self._len = total
+ return
+ total += 1
+ yield res
+ else:
+ for i in dayset[start:end]:
+ if i is not None:
+ date = datetime.date.fromordinal(ii.yearordinal + i)
+ for time in timeset:
+ res = datetime.datetime.combine(date, time)
+ if until and res > until:
+ self._len = total
+ return
+ elif res >= self._dtstart:
+ if count is not None:
+ count -= 1
+ if count < 0:
+ self._len = total
+ return
+
+ total += 1
+ yield res
+
+ # Handle frequency and interval
+ fixday = False
+ if freq == YEARLY:
+ year += interval
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ ii.rebuild(year, month)
+ elif freq == MONTHLY:
+ month += interval
+ if month > 12:
+ div, mod = divmod(month, 12)
+ month = mod
+ year += div
+ if month == 0:
+ month = 12
+ year -= 1
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ ii.rebuild(year, month)
+ elif freq == WEEKLY:
+ if wkst > weekday:
+ day += -(weekday+1+(6-wkst))+self._interval*7
+ else:
+ day += -(weekday-wkst)+self._interval*7
+ weekday = wkst
+ fixday = True
+ elif freq == DAILY:
+ day += interval
+ fixday = True
+ elif freq == HOURLY:
+ if filtered:
+ # Jump to one iteration before next day
+ hour += ((23-hour)//interval)*interval
+
+ if byhour:
+ ndays, hour = self.__mod_distance(value=hour,
+ byxxx=self._byhour,
+ base=24)
+ else:
+ ndays, hour = divmod(hour+interval, 24)
+
+ if ndays:
+ day += ndays
+ fixday = True
+
+ timeset = gettimeset(hour, minute, second)
+ elif freq == MINUTELY:
+ if filtered:
+ # Jump to one iteration before next day
+ minute += ((1439-(hour*60+minute))//interval)*interval
+
+ valid = False
+ rep_rate = (24*60)
+ for j in range(rep_rate // gcd(interval, rep_rate)):
+ if byminute:
+ nhours, minute = \
+ self.__mod_distance(value=minute,
+ byxxx=self._byminute,
+ base=60)
+ else:
+ nhours, minute = divmod(minute+interval, 60)
+
+ div, hour = divmod(hour+nhours, 24)
+ if div:
+ day += div
+ fixday = True
+ filtered = False
+
+ if not byhour or hour in byhour:
+ valid = True
+ break
+
+ if not valid:
+ raise ValueError('Invalid combination of interval and ' +
+ 'byhour resulting in empty rule.')
+
+ timeset = gettimeset(hour, minute, second)
+ elif freq == SECONDLY:
+ if filtered:
+ # Jump to one iteration before next day
+ second += (((86399 - (hour * 3600 + minute * 60 + second))
+ // interval) * interval)
+
+ rep_rate = (24 * 3600)
+ valid = False
+ for j in range(0, rep_rate // gcd(interval, rep_rate)):
+ if bysecond:
+ nminutes, second = \
+ self.__mod_distance(value=second,
+ byxxx=self._bysecond,
+ base=60)
+ else:
+ nminutes, second = divmod(second+interval, 60)
+
+ div, minute = divmod(minute+nminutes, 60)
+ if div:
+ hour += div
+ div, hour = divmod(hour, 24)
+ if div:
+ day += div
+ fixday = True
+
+ if ((not byhour or hour in byhour) and
+ (not byminute or minute in byminute) and
+ (not bysecond or second in bysecond)):
+ valid = True
+ break
+
+ if not valid:
+ raise ValueError('Invalid combination of interval, ' +
+ 'byhour and byminute resulting in empty' +
+ ' rule.')
+
+ timeset = gettimeset(hour, minute, second)
+
+ if fixday and day > 28:
+ daysinmonth = calendar.monthrange(year, month)[1]
+ if day > daysinmonth:
+ while day > daysinmonth:
+ day -= daysinmonth
+ month += 1
+ if month == 13:
+ month = 1
+ year += 1
+ if year > datetime.MAXYEAR:
+ self._len = total
+ return
+ daysinmonth = calendar.monthrange(year, month)[1]
+ ii.rebuild(year, month)
+
+ def __construct_byset(self, start, byxxx, base):
+ """
+ If a `BYXXX` sequence is passed to the constructor at the same level as
+ `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some
+ specifications which cannot be reached given some starting conditions.
+
+ This occurs whenever the interval is not coprime with the base of a
+ given unit and the difference between the starting position and the
+ ending position is not coprime with the greatest common denominator
+ between the interval and the base. For example, with a FREQ of hourly
+ starting at 17:00 and an interval of 4, the only valid values for
+ BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not
+ coprime.
+
+ :param start:
+ Specifies the starting position.
+ :param byxxx:
+ An iterable containing the list of allowed values.
+ :param base:
+ The largest allowable value for the specified frequency (e.g.
+ 24 hours, 60 minutes).
+
+ This does not preserve the type of the iterable, returning a set, since
+ the values should be unique and the order is irrelevant, this will
+ speed up later lookups.
+
+ In the event of an empty set, raises a :exception:`ValueError`, as this
+ results in an empty rrule.
+ """
+
+ cset = set()
+
+ # Support a single byxxx value.
+ if isinstance(byxxx, integer_types):
+ byxxx = (byxxx, )
+
+ for num in byxxx:
+ i_gcd = gcd(self._interval, base)
+ # Use divmod rather than % because we need to wrap negative nums.
+ if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0:
+ cset.add(num)
+
+ if len(cset) == 0:
+ raise ValueError("Invalid rrule byxxx generates an empty set.")
+
+ return cset
+
+ def __mod_distance(self, value, byxxx, base):
+ """
+ Calculates the next value in a sequence where the `FREQ` parameter is
+ specified along with a `BYXXX` parameter at the same "level"
+ (e.g. `HOURLY` specified with `BYHOUR`).
+
+ :param value:
+ The old value of the component.
+ :param byxxx:
+ The `BYXXX` set, which should have been generated by
+ `rrule._construct_byset`, or something else which checks that a
+ valid rule is present.
+ :param base:
+ The largest allowable value for the specified frequency (e.g.
+ 24 hours, 60 minutes).
+
+ If a valid value is not found after `base` iterations (the maximum
+ number before the sequence would start to repeat), this raises a
+ :exception:`ValueError`, as no valid values were found.
+
+ This returns a tuple of `divmod(n*interval, base)`, where `n` is the
+ smallest number of `interval` repetitions until the next specified
+ value in `byxxx` is found.
+ """
+ accumulator = 0
+ for ii in range(1, base + 1):
+ # Using divmod() over % to account for negative intervals
+ div, value = divmod(value + self._interval, base)
+ accumulator += div
+ if value in byxxx:
+ return (accumulator, value)
+
+
+class _iterinfo(object):
+ __slots__ = ["rrule", "lastyear", "lastmonth",
+ "yearlen", "nextyearlen", "yearordinal", "yearweekday",
+ "mmask", "mrange", "mdaymask", "nmdaymask",
+ "wdaymask", "wnomask", "nwdaymask", "eastermask"]
+
+ def __init__(self, rrule):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+ self.rrule = rrule
+
+ def rebuild(self, year, month):
+ # Every mask is 7 days longer to handle cross-year weekly periods.
+ rr = self.rrule
+ if year != self.lastyear:
+ self.yearlen = 365 + calendar.isleap(year)
+ self.nextyearlen = 365 + calendar.isleap(year + 1)
+ firstyday = datetime.date(year, 1, 1)
+ self.yearordinal = firstyday.toordinal()
+ self.yearweekday = firstyday.weekday()
+
+ wday = datetime.date(year, 1, 1).weekday()
+ if self.yearlen == 365:
+ self.mmask = M365MASK
+ self.mdaymask = MDAY365MASK
+ self.nmdaymask = NMDAY365MASK
+ self.wdaymask = WDAYMASK[wday:]
+ self.mrange = M365RANGE
+ else:
+ self.mmask = M366MASK
+ self.mdaymask = MDAY366MASK
+ self.nmdaymask = NMDAY366MASK
+ self.wdaymask = WDAYMASK[wday:]
+ self.mrange = M366RANGE
+
+ if not rr._byweekno:
+ self.wnomask = None
+ else:
+ self.wnomask = [0]*(self.yearlen+7)
+ # no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
+ no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7
+ if no1wkst >= 4:
+ no1wkst = 0
+ # Number of days in the year, plus the days we got
+ # from last year.
+ wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7
+ else:
+ # Number of days in the year, minus the days we
+ # left in last year.
+ wyearlen = self.yearlen-no1wkst
+ div, mod = divmod(wyearlen, 7)
+ numweeks = div+mod//4
+ for n in rr._byweekno:
+ if n < 0:
+ n += numweeks+1
+ if not (0 < n <= numweeks):
+ continue
+ if n > 1:
+ i = no1wkst+(n-1)*7
+ if no1wkst != firstwkst:
+ i -= 7-firstwkst
+ else:
+ i = no1wkst
+ for j in range(7):
+ self.wnomask[i] = 1
+ i += 1
+ if self.wdaymask[i] == rr._wkst:
+ break
+ if 1 in rr._byweekno:
+ # Check week number 1 of next year as well
+ # TODO: Check -numweeks for next year.
+ i = no1wkst+numweeks*7
+ if no1wkst != firstwkst:
+ i -= 7-firstwkst
+ if i < self.yearlen:
+ # If week starts in next year, we
+ # don't care about it.
+ for j in range(7):
+ self.wnomask[i] = 1
+ i += 1
+ if self.wdaymask[i] == rr._wkst:
+ break
+ if no1wkst:
+ # Check last week number of last year as
+ # well. If no1wkst is 0, either the year
+ # started on week start, or week number 1
+ # got days from last year, so there are no
+ # days from last year's last week number in
+ # this year.
+ if -1 not in rr._byweekno:
+ lyearweekday = datetime.date(year-1, 1, 1).weekday()
+ lno1wkst = (7-lyearweekday+rr._wkst) % 7
+ lyearlen = 365+calendar.isleap(year-1)
+ if lno1wkst >= 4:
+ lno1wkst = 0
+ lnumweeks = 52+(lyearlen +
+ (lyearweekday-rr._wkst) % 7) % 7//4
+ else:
+ lnumweeks = 52+(self.yearlen-no1wkst) % 7//4
+ else:
+ lnumweeks = -1
+ if lnumweeks in rr._byweekno:
+ for i in range(no1wkst):
+ self.wnomask[i] = 1
+
+ if (rr._bynweekday and (month != self.lastmonth or
+ year != self.lastyear)):
+ ranges = []
+ if rr._freq == YEARLY:
+ if rr._bymonth:
+ for month in rr._bymonth:
+ ranges.append(self.mrange[month-1:month+1])
+ else:
+ ranges = [(0, self.yearlen)]
+ elif rr._freq == MONTHLY:
+ ranges = [self.mrange[month-1:month+1]]
+ if ranges:
+ # Weekly frequency won't get here, so we may not
+ # care about cross-year weekly periods.
+ self.nwdaymask = [0]*self.yearlen
+ for first, last in ranges:
+ last -= 1
+ for wday, n in rr._bynweekday:
+ if n < 0:
+ i = last+(n+1)*7
+ i -= (self.wdaymask[i]-wday) % 7
+ else:
+ i = first+(n-1)*7
+ i += (7-self.wdaymask[i]+wday) % 7
+ if first <= i <= last:
+ self.nwdaymask[i] = 1
+
+ if rr._byeaster:
+ self.eastermask = [0]*(self.yearlen+7)
+ eyday = easter.easter(year).toordinal()-self.yearordinal
+ for offset in rr._byeaster:
+ self.eastermask[eyday+offset] = 1
+
+ self.lastyear = year
+ self.lastmonth = month
+
+ def ydayset(self, year, month, day):
+ return list(range(self.yearlen)), 0, self.yearlen
+
+ def mdayset(self, year, month, day):
+ dset = [None]*self.yearlen
+ start, end = self.mrange[month-1:month+1]
+ for i in range(start, end):
+ dset[i] = i
+ return dset, start, end
+
+ def wdayset(self, year, month, day):
+ # We need to handle cross-year weeks here.
+ dset = [None]*(self.yearlen+7)
+ i = datetime.date(year, month, day).toordinal()-self.yearordinal
+ start = i
+ for j in range(7):
+ dset[i] = i
+ i += 1
+ # if (not (0 <= i < self.yearlen) or
+ # self.wdaymask[i] == self.rrule._wkst):
+ # This will cross the year boundary, if necessary.
+ if self.wdaymask[i] == self.rrule._wkst:
+ break
+ return dset, start, i
+
+ def ddayset(self, year, month, day):
+ dset = [None] * self.yearlen
+ i = datetime.date(year, month, day).toordinal() - self.yearordinal
+ dset[i] = i
+ return dset, i, i + 1
+
+ def htimeset(self, hour, minute, second):
+ tset = []
+ rr = self.rrule
+ for minute in rr._byminute:
+ for second in rr._bysecond:
+ tset.append(datetime.time(hour, minute, second,
+ tzinfo=rr._tzinfo))
+ tset.sort()
+ return tset
+
+ def mtimeset(self, hour, minute, second):
+ tset = []
+ rr = self.rrule
+ for second in rr._bysecond:
+ tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
+ tset.sort()
+ return tset
+
+ def stimeset(self, hour, minute, second):
+ return (datetime.time(hour, minute, second,
+ tzinfo=self.rrule._tzinfo),)
+
+
+class rruleset(rrulebase):
+ """ The rruleset type allows more complex recurrence setups, mixing
+ multiple rules, dates, exclusion rules, and exclusion dates. The type
+ constructor takes the following keyword arguments:
+
+ :param cache: If True, caching of results will be enabled, improving
+ performance of multiple queries considerably. """
+
+ class _genitem(object):
+ def __init__(self, genlist, gen):
+ try:
+ self.dt = advance_iterator(gen)
+ genlist.append(self)
+ except StopIteration:
+ pass
+ self.genlist = genlist
+ self.gen = gen
+
+ def __next__(self):
+ try:
+ self.dt = advance_iterator(self.gen)
+ except StopIteration:
+ if self.genlist[0] is self:
+ heapq.heappop(self.genlist)
+ else:
+ self.genlist.remove(self)
+ heapq.heapify(self.genlist)
+
+ next = __next__
+
+ def __lt__(self, other):
+ return self.dt < other.dt
+
+ def __gt__(self, other):
+ return self.dt > other.dt
+
+ def __eq__(self, other):
+ return self.dt == other.dt
+
+ def __ne__(self, other):
+ return self.dt != other.dt
+
+ def __init__(self, cache=False):
+ super(rruleset, self).__init__(cache)
+ self._rrule = []
+ self._rdate = []
+ self._exrule = []
+ self._exdate = []
+
+ @_invalidates_cache
+ def rrule(self, rrule):
+ """ Include the given :py:class:`rrule` instance in the recurrence set
+ generation. """
+ self._rrule.append(rrule)
+
+ @_invalidates_cache
+ def rdate(self, rdate):
+ """ Include the given :py:class:`datetime` instance in the recurrence
+ set generation. """
+ self._rdate.append(rdate)
+
+ @_invalidates_cache
+ def exrule(self, exrule):
+ """ Include the given rrule instance in the recurrence set exclusion
+ list. Dates which are part of the given recurrence rules will not
+ be generated, even if some inclusive rrule or rdate matches them.
+ """
+ self._exrule.append(exrule)
+
+ @_invalidates_cache
+ def exdate(self, exdate):
+ """ Include the given datetime instance in the recurrence set
+ exclusion list. Dates included that way will not be generated,
+ even if some inclusive rrule or rdate matches them. """
+ self._exdate.append(exdate)
+
+ def _iter(self):
+ rlist = []
+ self._rdate.sort()
+ self._genitem(rlist, iter(self._rdate))
+ for gen in [iter(x) for x in self._rrule]:
+ self._genitem(rlist, gen)
+ exlist = []
+ self._exdate.sort()
+ self._genitem(exlist, iter(self._exdate))
+ for gen in [iter(x) for x in self._exrule]:
+ self._genitem(exlist, gen)
+ lastdt = None
+ total = 0
+ heapq.heapify(rlist)
+ heapq.heapify(exlist)
+ while rlist:
+ ritem = rlist[0]
+ if not lastdt or lastdt != ritem.dt:
+ while exlist and exlist[0] < ritem:
+ exitem = exlist[0]
+ advance_iterator(exitem)
+ if exlist and exlist[0] is exitem:
+ heapq.heapreplace(exlist, exitem)
+ if not exlist or ritem != exlist[0]:
+ total += 1
+ yield ritem.dt
+ lastdt = ritem.dt
+ advance_iterator(ritem)
+ if rlist and rlist[0] is ritem:
+ heapq.heapreplace(rlist, ritem)
+ self._len = total
+
+
+
+
+class _rrulestr(object):
+ """ Parses a string representation of a recurrence rule or set of
+ recurrence rules.
+
+ :param s:
+ Required, a string defining one or more recurrence rules.
+
+ :param dtstart:
+ If given, used as the default recurrence start if not specified in the
+ rule string.
+
+ :param cache:
+ If set ``True`` caching of results will be enabled, improving
+ performance of multiple queries considerably.
+
+ :param unfold:
+ If set ``True`` indicates that a rule string is split over more
+ than one line and should be joined before processing.
+
+ :param forceset:
+ If set ``True`` forces a :class:`dateutil.rrule.rruleset` to
+ be returned.
+
+ :param compatible:
+ If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``.
+
+ :param ignoretz:
+ If set ``True``, time zones in parsed strings are ignored and a naive
+ :class:`datetime.datetime` object is returned.
+
+ :param tzids:
+ If given, a callable or mapping used to retrieve a
+ :class:`datetime.tzinfo` from a string representation.
+ Defaults to :func:`dateutil.tz.gettz`.
+
+ :param tzinfos:
+ Additional time zone names / aliases which may be present in a string
+ representation. See :func:`dateutil.parser.parse` for more
+ information.
+
+ :return:
+ Returns a :class:`dateutil.rrule.rruleset` or
+ :class:`dateutil.rrule.rrule`
+ """
+
+ _freq_map = {"YEARLY": YEARLY,
+ "MONTHLY": MONTHLY,
+ "WEEKLY": WEEKLY,
+ "DAILY": DAILY,
+ "HOURLY": HOURLY,
+ "MINUTELY": MINUTELY,
+ "SECONDLY": SECONDLY}
+
+ _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3,
+ "FR": 4, "SA": 5, "SU": 6}
+
+ def _handle_int(self, rrkwargs, name, value, **kwargs):
+ rrkwargs[name.lower()] = int(value)
+
+ def _handle_int_list(self, rrkwargs, name, value, **kwargs):
+ rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
+
+ _handle_INTERVAL = _handle_int
+ _handle_COUNT = _handle_int
+ _handle_BYSETPOS = _handle_int_list
+ _handle_BYMONTH = _handle_int_list
+ _handle_BYMONTHDAY = _handle_int_list
+ _handle_BYYEARDAY = _handle_int_list
+ _handle_BYEASTER = _handle_int_list
+ _handle_BYWEEKNO = _handle_int_list
+ _handle_BYHOUR = _handle_int_list
+ _handle_BYMINUTE = _handle_int_list
+ _handle_BYSECOND = _handle_int_list
+
+ def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
+ rrkwargs["freq"] = self._freq_map[value]
+
+ def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
+ global parser
+ if not parser:
+ from dateutil import parser
+ try:
+ rrkwargs["until"] = parser.parse(value,
+ ignoretz=kwargs.get("ignoretz"),
+ tzinfos=kwargs.get("tzinfos"))
+ except ValueError:
+ raise ValueError("invalid until date")
+
+ def _handle_WKST(self, rrkwargs, name, value, **kwargs):
+ rrkwargs["wkst"] = self._weekday_map[value]
+
+ def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs):
+ """
+ Two ways to specify this: +1MO or MO(+1)
+ """
+ l = []
+ for wday in value.split(','):
+ if '(' in wday:
+ # If it's of the form TH(+1), etc.
+ splt = wday.split('(')
+ w = splt[0]
+ n = int(splt[1][:-1])
+ elif len(wday):
+ # If it's of the form +1MO
+ for i in range(len(wday)):
+ if wday[i] not in '+-0123456789':
+ break
+ n = wday[:i] or None
+ w = wday[i:]
+ if n:
+ n = int(n)
+ else:
+ raise ValueError("Invalid (empty) BYDAY specification.")
+
+ l.append(weekdays[self._weekday_map[w]](n))
+ rrkwargs["byweekday"] = l
+
+ _handle_BYDAY = _handle_BYWEEKDAY
+
+ def _parse_rfc_rrule(self, line,
+ dtstart=None,
+ cache=False,
+ ignoretz=False,
+ tzinfos=None):
+ if line.find(':') != -1:
+ name, value = line.split(':')
+ if name != "RRULE":
+ raise ValueError("unknown parameter name")
+ else:
+ value = line
+ rrkwargs = {}
+ for pair in value.split(';'):
+ name, value = pair.split('=')
+ name = name.upper()
+ value = value.upper()
+ try:
+ getattr(self, "_handle_"+name)(rrkwargs, name, value,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ except AttributeError:
+ raise ValueError("unknown parameter '%s'" % name)
+ except (KeyError, ValueError):
+ raise ValueError("invalid '%s': %s" % (name, value))
+ return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
+
+ def _parse_date_value(self, date_value, parms, rule_tzids,
+ ignoretz, tzids, tzinfos):
+ global parser
+ if not parser:
+ from dateutil import parser
+
+ datevals = []
+ value_found = False
+ TZID = None
+
+ for parm in parms:
+ if parm.startswith("TZID="):
+ try:
+ tzkey = rule_tzids[parm.split('TZID=')[-1]]
+ except KeyError:
+ continue
+ if tzids is None:
+ from . import tz
+ tzlookup = tz.gettz
+ elif callable(tzids):
+ tzlookup = tzids
+ else:
+ tzlookup = getattr(tzids, 'get', None)
+ if tzlookup is None:
+ msg = ('tzids must be a callable, mapping, or None, '
+ 'not %s' % tzids)
+ raise ValueError(msg)
+
+ TZID = tzlookup(tzkey)
+ continue
+
+ # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found
+ # only once.
+ if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}:
+ raise ValueError("unsupported parm: " + parm)
+ else:
+ if value_found:
+ msg = ("Duplicate value parameter found in: " + parm)
+ raise ValueError(msg)
+ value_found = True
+
+ for datestr in date_value.split(','):
+ date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos)
+ if TZID is not None:
+ if date.tzinfo is None:
+ date = date.replace(tzinfo=TZID)
+ else:
+ raise ValueError('DTSTART/EXDATE specifies multiple timezone')
+ datevals.append(date)
+
+ return datevals
+
+ def _parse_rfc(self, s,
+ dtstart=None,
+ cache=False,
+ unfold=False,
+ forceset=False,
+ compatible=False,
+ ignoretz=False,
+ tzids=None,
+ tzinfos=None):
+ global parser
+ if compatible:
+ forceset = True
+ unfold = True
+
+ TZID_NAMES = dict(map(
+ lambda x: (x.upper(), x),
+ re.findall('TZID=(?P[^:]+):', s)
+ ))
+ s = s.upper()
+ if not s.strip():
+ raise ValueError("empty string")
+ if unfold:
+ lines = s.splitlines()
+ i = 0
+ while i < len(lines):
+ line = lines[i].rstrip()
+ if not line:
+ del lines[i]
+ elif i > 0 and line[0] == " ":
+ lines[i-1] += line[1:]
+ del lines[i]
+ else:
+ i += 1
+ else:
+ lines = s.split()
+ if (not forceset and len(lines) == 1 and (s.find(':') == -1 or
+ s.startswith('RRULE:'))):
+ return self._parse_rfc_rrule(lines[0], cache=cache,
+ dtstart=dtstart, ignoretz=ignoretz,
+ tzinfos=tzinfos)
+ else:
+ rrulevals = []
+ rdatevals = []
+ exrulevals = []
+ exdatevals = []
+ for line in lines:
+ if not line:
+ continue
+ if line.find(':') == -1:
+ name = "RRULE"
+ value = line
+ else:
+ name, value = line.split(':', 1)
+ parms = name.split(';')
+ if not parms:
+ raise ValueError("empty property name")
+ name = parms[0]
+ parms = parms[1:]
+ if name == "RRULE":
+ for parm in parms:
+ raise ValueError("unsupported RRULE parm: "+parm)
+ rrulevals.append(value)
+ elif name == "RDATE":
+ for parm in parms:
+ if parm != "VALUE=DATE-TIME":
+ raise ValueError("unsupported RDATE parm: "+parm)
+ rdatevals.append(value)
+ elif name == "EXRULE":
+ for parm in parms:
+ raise ValueError("unsupported EXRULE parm: "+parm)
+ exrulevals.append(value)
+ elif name == "EXDATE":
+ exdatevals.extend(
+ self._parse_date_value(value, parms,
+ TZID_NAMES, ignoretz,
+ tzids, tzinfos)
+ )
+ elif name == "DTSTART":
+ dtvals = self._parse_date_value(value, parms, TZID_NAMES,
+ ignoretz, tzids, tzinfos)
+ if len(dtvals) != 1:
+ raise ValueError("Multiple DTSTART values specified:" +
+ value)
+ dtstart = dtvals[0]
+ else:
+ raise ValueError("unsupported property: "+name)
+ if (forceset or len(rrulevals) > 1 or rdatevals
+ or exrulevals or exdatevals):
+ if not parser and (rdatevals or exdatevals):
+ from dateutil import parser
+ rset = rruleset(cache=cache)
+ for value in rrulevals:
+ rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in rdatevals:
+ for datestr in value.split(','):
+ rset.rdate(parser.parse(datestr,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in exrulevals:
+ rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos))
+ for value in exdatevals:
+ rset.exdate(value)
+ if compatible and dtstart:
+ rset.rdate(dtstart)
+ return rset
+ else:
+ return self._parse_rfc_rrule(rrulevals[0],
+ dtstart=dtstart,
+ cache=cache,
+ ignoretz=ignoretz,
+ tzinfos=tzinfos)
+
+ def __call__(self, s, **kwargs):
+ return self._parse_rfc(s, **kwargs)
+
+
+rrulestr = _rrulestr()
+
+# vim:ts=4:sw=4:et
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/tz/__init__.py b/tapdown/lib/python3.11/site-packages/dateutil/tz/__init__.py
new file mode 100644
index 0000000..af1352c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/tz/__init__.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+from .tz import *
+from .tz import __doc__
+
+__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
+ "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz",
+ "enfold", "datetime_ambiguous", "datetime_exists",
+ "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"]
+
+
+class DeprecatedTzFormatWarning(Warning):
+ """Warning raised when time zones are parsed from deprecated formats."""
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/tz/_common.py b/tapdown/lib/python3.11/site-packages/dateutil/tz/_common.py
new file mode 100644
index 0000000..e6ac118
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/tz/_common.py
@@ -0,0 +1,419 @@
+from six import PY2
+
+from functools import wraps
+
+from datetime import datetime, timedelta, tzinfo
+
+
+ZERO = timedelta(0)
+
+__all__ = ['tzname_in_python2', 'enfold']
+
+
+def tzname_in_python2(namefunc):
+ """Change unicode output into bytestrings in Python 2
+
+ tzname() API changed in Python 3. It used to return bytes, but was changed
+ to unicode strings
+ """
+ if PY2:
+ @wraps(namefunc)
+ def adjust_encoding(*args, **kwargs):
+ name = namefunc(*args, **kwargs)
+ if name is not None:
+ name = name.encode()
+
+ return name
+
+ return adjust_encoding
+ else:
+ return namefunc
+
+
+# The following is adapted from Alexander Belopolsky's tz library
+# https://github.com/abalkin/tz
+if hasattr(datetime, 'fold'):
+ # This is the pre-python 3.6 fold situation
+ def enfold(dt, fold=1):
+ """
+ Provides a unified interface for assigning the ``fold`` attribute to
+ datetimes both before and after the implementation of PEP-495.
+
+ :param fold:
+ The value for the ``fold`` attribute in the returned datetime. This
+ should be either 0 or 1.
+
+ :return:
+ Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+ ``fold`` for all versions of Python. In versions prior to
+ Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+ subclass of :py:class:`datetime.datetime` with the ``fold``
+ attribute added, if ``fold`` is 1.
+
+ .. versionadded:: 2.6.0
+ """
+ return dt.replace(fold=fold)
+
+else:
+ class _DatetimeWithFold(datetime):
+ """
+ This is a class designed to provide a PEP 495-compliant interface for
+ Python versions before 3.6. It is used only for dates in a fold, so
+ the ``fold`` attribute is fixed at ``1``.
+
+ .. versionadded:: 2.6.0
+ """
+ __slots__ = ()
+
+ def replace(self, *args, **kwargs):
+ """
+ Return a datetime with the same attributes, except for those
+ attributes given new values by whichever keyword arguments are
+ specified. Note that tzinfo=None can be specified to create a naive
+ datetime from an aware datetime with no conversion of date and time
+ data.
+
+ This is reimplemented in ``_DatetimeWithFold`` because pypy3 will
+ return a ``datetime.datetime`` even if ``fold`` is unchanged.
+ """
+ argnames = (
+ 'year', 'month', 'day', 'hour', 'minute', 'second',
+ 'microsecond', 'tzinfo'
+ )
+
+ for arg, argname in zip(args, argnames):
+ if argname in kwargs:
+ raise TypeError('Duplicate argument: {}'.format(argname))
+
+ kwargs[argname] = arg
+
+ for argname in argnames:
+ if argname not in kwargs:
+ kwargs[argname] = getattr(self, argname)
+
+ dt_class = self.__class__ if kwargs.get('fold', 1) else datetime
+
+ return dt_class(**kwargs)
+
+ @property
+ def fold(self):
+ return 1
+
+ def enfold(dt, fold=1):
+ """
+ Provides a unified interface for assigning the ``fold`` attribute to
+ datetimes both before and after the implementation of PEP-495.
+
+ :param fold:
+ The value for the ``fold`` attribute in the returned datetime. This
+ should be either 0 or 1.
+
+ :return:
+ Returns an object for which ``getattr(dt, 'fold', 0)`` returns
+ ``fold`` for all versions of Python. In versions prior to
+ Python 3.6, this is a ``_DatetimeWithFold`` object, which is a
+ subclass of :py:class:`datetime.datetime` with the ``fold``
+ attribute added, if ``fold`` is 1.
+
+ .. versionadded:: 2.6.0
+ """
+ if getattr(dt, 'fold', 0) == fold:
+ return dt
+
+ args = dt.timetuple()[:6]
+ args += (dt.microsecond, dt.tzinfo)
+
+ if fold:
+ return _DatetimeWithFold(*args)
+ else:
+ return datetime(*args)
+
+
+def _validate_fromutc_inputs(f):
+ """
+ The CPython version of ``fromutc`` checks that the input is a ``datetime``
+ object and that ``self`` is attached as its ``tzinfo``.
+ """
+ @wraps(f)
+ def fromutc(self, dt):
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ return f(self, dt)
+
+ return fromutc
+
+
+class _tzinfo(tzinfo):
+ """
+ Base class for all ``dateutil`` ``tzinfo`` objects.
+ """
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+
+ dt = dt.replace(tzinfo=self)
+
+ wall_0 = enfold(dt, fold=0)
+ wall_1 = enfold(dt, fold=1)
+
+ same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+ same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None)
+
+ return same_dt and not same_offset
+
+ def _fold_status(self, dt_utc, dt_wall):
+ """
+ Determine the fold status of a "wall" datetime, given a representation
+ of the same datetime as a (naive) UTC datetime. This is calculated based
+ on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all
+ datetimes, and that this offset is the actual number of hours separating
+ ``dt_utc`` and ``dt_wall``.
+
+ :param dt_utc:
+ Representation of the datetime as UTC
+
+ :param dt_wall:
+ Representation of the datetime as "wall time". This parameter must
+ either have a `fold` attribute or have a fold-naive
+ :class:`datetime.tzinfo` attached, otherwise the calculation may
+ fail.
+ """
+ if self.is_ambiguous(dt_wall):
+ delta_wall = dt_wall - dt_utc
+ _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst()))
+ else:
+ _fold = 0
+
+ return _fold
+
+ def _fold(self, dt):
+ return getattr(dt, 'fold', 0)
+
+ def _fromutc(self, dt):
+ """
+ Given a timezone-aware datetime in a given timezone, calculates a
+ timezone-aware datetime in a new timezone.
+
+ Since this is the one time that we *know* we have an unambiguous
+ datetime object, we take this opportunity to determine whether the
+ datetime is ambiguous and in a "fold" state (e.g. if it's the first
+ occurrence, chronologically, of the ambiguous datetime).
+
+ :param dt:
+ A timezone-aware :class:`datetime.datetime` object.
+ """
+
+ # Re-implement the algorithm from Python's datetime.py
+ dtoff = dt.utcoffset()
+ if dtoff is None:
+ raise ValueError("fromutc() requires a non-None utcoffset() "
+ "result")
+
+ # The original datetime.py code assumes that `dst()` defaults to
+ # zero during ambiguous times. PEP 495 inverts this presumption, so
+ # for pre-PEP 495 versions of python, we need to tweak the algorithm.
+ dtdst = dt.dst()
+ if dtdst is None:
+ raise ValueError("fromutc() requires a non-None dst() result")
+ delta = dtoff - dtdst
+
+ dt += delta
+ # Set fold=1 so we can default to being in the fold for
+ # ambiguous dates.
+ dtdst = enfold(dt, fold=1).dst()
+ if dtdst is None:
+ raise ValueError("fromutc(): dt.dst gave inconsistent "
+ "results; cannot convert")
+ return dt + dtdst
+
+ @_validate_fromutc_inputs
+ def fromutc(self, dt):
+ """
+ Given a timezone-aware datetime in a given timezone, calculates a
+ timezone-aware datetime in a new timezone.
+
+ Since this is the one time that we *know* we have an unambiguous
+ datetime object, we take this opportunity to determine whether the
+ datetime is ambiguous and in a "fold" state (e.g. if it's the first
+ occurrence, chronologically, of the ambiguous datetime).
+
+ :param dt:
+ A timezone-aware :class:`datetime.datetime` object.
+ """
+ dt_wall = self._fromutc(dt)
+
+ # Calculate the fold status given the two datetimes.
+ _fold = self._fold_status(dt, dt_wall)
+
+ # Set the default fold value for ambiguous dates
+ return enfold(dt_wall, fold=_fold)
+
+
+class tzrangebase(_tzinfo):
+ """
+ This is an abstract base class for time zones represented by an annual
+ transition into and out of DST. Child classes should implement the following
+ methods:
+
+ * ``__init__(self, *args, **kwargs)``
+ * ``transitions(self, year)`` - this is expected to return a tuple of
+ datetimes representing the DST on and off transitions in standard
+ time.
+
+ A fully initialized ``tzrangebase`` subclass should also provide the
+ following attributes:
+ * ``hasdst``: Boolean whether or not the zone uses DST.
+ * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects
+ representing the respective UTC offsets.
+ * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short
+ abbreviations in DST and STD, respectively.
+ * ``_hasdst``: Whether or not the zone has DST.
+
+ .. versionadded:: 2.6.0
+ """
+ def __init__(self):
+ raise NotImplementedError('tzrangebase is an abstract base class')
+
+ def utcoffset(self, dt):
+ isdst = self._isdst(dt)
+
+ if isdst is None:
+ return None
+ elif isdst:
+ return self._dst_offset
+ else:
+ return self._std_offset
+
+ def dst(self, dt):
+ isdst = self._isdst(dt)
+
+ if isdst is None:
+ return None
+ elif isdst:
+ return self._dst_base_offset
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ if self._isdst(dt):
+ return self._dst_abbr
+ else:
+ return self._std_abbr
+
+ def fromutc(self, dt):
+ """ Given a datetime in UTC, return local time """
+ if not isinstance(dt, datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ # Get transitions - if there are none, fixed offset
+ transitions = self.transitions(dt.year)
+ if transitions is None:
+ return dt + self.utcoffset(dt)
+
+ # Get the transition times in UTC
+ dston, dstoff = transitions
+
+ dston -= self._std_offset
+ dstoff -= self._std_offset
+
+ utc_transitions = (dston, dstoff)
+ dt_utc = dt.replace(tzinfo=None)
+
+ isdst = self._naive_isdst(dt_utc, utc_transitions)
+
+ if isdst:
+ dt_wall = dt + self._dst_offset
+ else:
+ dt_wall = dt + self._std_offset
+
+ _fold = int(not isdst and self.is_ambiguous(dt_wall))
+
+ return enfold(dt_wall, fold=_fold)
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ if not self.hasdst:
+ return False
+
+ start, end = self.transitions(dt.year)
+
+ dt = dt.replace(tzinfo=None)
+ return (end <= dt < end + self._dst_base_offset)
+
+ def _isdst(self, dt):
+ if not self.hasdst:
+ return False
+ elif dt is None:
+ return None
+
+ transitions = self.transitions(dt.year)
+
+ if transitions is None:
+ return False
+
+ dt = dt.replace(tzinfo=None)
+
+ isdst = self._naive_isdst(dt, transitions)
+
+ # Handle ambiguous dates
+ if not isdst and self.is_ambiguous(dt):
+ return not self._fold(dt)
+ else:
+ return isdst
+
+ def _naive_isdst(self, dt, transitions):
+ dston, dstoff = transitions
+
+ dt = dt.replace(tzinfo=None)
+
+ if dston < dstoff:
+ isdst = dston <= dt < dstoff
+ else:
+ isdst = not dstoff <= dt < dston
+
+ return isdst
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_offset - self._std_offset
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(...)" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/tz/_factories.py b/tapdown/lib/python3.11/site-packages/dateutil/tz/_factories.py
new file mode 100644
index 0000000..f8a6589
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/tz/_factories.py
@@ -0,0 +1,80 @@
+from datetime import timedelta
+import weakref
+from collections import OrderedDict
+
+from six.moves import _thread
+
+
+class _TzSingleton(type):
+ def __init__(cls, *args, **kwargs):
+ cls.__instance = None
+ super(_TzSingleton, cls).__init__(*args, **kwargs)
+
+ def __call__(cls):
+ if cls.__instance is None:
+ cls.__instance = super(_TzSingleton, cls).__call__()
+ return cls.__instance
+
+
+class _TzFactory(type):
+ def instance(cls, *args, **kwargs):
+ """Alternate constructor that returns a fresh instance"""
+ return type.__call__(cls, *args, **kwargs)
+
+
+class _TzOffsetFactory(_TzFactory):
+ def __init__(cls, *args, **kwargs):
+ cls.__instances = weakref.WeakValueDictionary()
+ cls.__strong_cache = OrderedDict()
+ cls.__strong_cache_size = 8
+
+ cls._cache_lock = _thread.allocate_lock()
+
+ def __call__(cls, name, offset):
+ if isinstance(offset, timedelta):
+ key = (name, offset.total_seconds())
+ else:
+ key = (name, offset)
+
+ instance = cls.__instances.get(key, None)
+ if instance is None:
+ instance = cls.__instances.setdefault(key,
+ cls.instance(name, offset))
+
+ # This lock may not be necessary in Python 3. See GH issue #901
+ with cls._cache_lock:
+ cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
+
+ # Remove an item if the strong cache is overpopulated
+ if len(cls.__strong_cache) > cls.__strong_cache_size:
+ cls.__strong_cache.popitem(last=False)
+
+ return instance
+
+
+class _TzStrFactory(_TzFactory):
+ def __init__(cls, *args, **kwargs):
+ cls.__instances = weakref.WeakValueDictionary()
+ cls.__strong_cache = OrderedDict()
+ cls.__strong_cache_size = 8
+
+ cls.__cache_lock = _thread.allocate_lock()
+
+ def __call__(cls, s, posix_offset=False):
+ key = (s, posix_offset)
+ instance = cls.__instances.get(key, None)
+
+ if instance is None:
+ instance = cls.__instances.setdefault(key,
+ cls.instance(s, posix_offset))
+
+ # This lock may not be necessary in Python 3. See GH issue #901
+ with cls.__cache_lock:
+ cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance)
+
+ # Remove an item if the strong cache is overpopulated
+ if len(cls.__strong_cache) > cls.__strong_cache_size:
+ cls.__strong_cache.popitem(last=False)
+
+ return instance
+
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/tz/tz.py b/tapdown/lib/python3.11/site-packages/dateutil/tz/tz.py
new file mode 100644
index 0000000..6175914
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/tz/tz.py
@@ -0,0 +1,1849 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers timezone implementations subclassing the abstract
+:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format
+files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`,
+etc), TZ environment string (in all known formats), given ranges (with help
+from relative deltas), local machine timezone, fixed offset timezone, and UTC
+timezone.
+"""
+import datetime
+import struct
+import time
+import sys
+import os
+import bisect
+import weakref
+from collections import OrderedDict
+
+import six
+from six import string_types
+from six.moves import _thread
+from ._common import tzname_in_python2, _tzinfo
+from ._common import tzrangebase, enfold
+from ._common import _validate_fromutc_inputs
+
+from ._factories import _TzSingleton, _TzOffsetFactory
+from ._factories import _TzStrFactory
+try:
+ from .win import tzwin, tzwinlocal
+except ImportError:
+ tzwin = tzwinlocal = None
+
+# For warning about rounding tzinfo
+from warnings import warn
+
+ZERO = datetime.timedelta(0)
+EPOCH = datetime.datetime(1970, 1, 1, 0, 0)
+EPOCHORDINAL = EPOCH.toordinal()
+
+
+@six.add_metaclass(_TzSingleton)
+class tzutc(datetime.tzinfo):
+ """
+ This is a tzinfo object that represents the UTC time zone.
+
+ **Examples:**
+
+ .. doctest::
+
+ >>> from datetime import *
+ >>> from dateutil.tz import *
+
+ >>> datetime.now()
+ datetime.datetime(2003, 9, 27, 9, 40, 1, 521290)
+
+ >>> datetime.now(tzutc())
+ datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc())
+
+ >>> datetime.now(tzutc()).tzname()
+ 'UTC'
+
+ .. versionchanged:: 2.7.0
+ ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will
+ always return the same object.
+
+ .. doctest::
+
+ >>> from dateutil.tz import tzutc, UTC
+ >>> tzutc() is tzutc()
+ True
+ >>> tzutc() is UTC
+ True
+ """
+ def utcoffset(self, dt):
+ return ZERO
+
+ def dst(self, dt):
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return "UTC"
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ return False
+
+ @_validate_fromutc_inputs
+ def fromutc(self, dt):
+ """
+ Fast track version of fromutc() returns the original ``dt`` object for
+ any valid :py:class:`datetime.datetime` object.
+ """
+ return dt
+
+ def __eq__(self, other):
+ if not isinstance(other, (tzutc, tzoffset)):
+ return NotImplemented
+
+ return (isinstance(other, tzutc) or
+ (isinstance(other, tzoffset) and other._offset == ZERO))
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+#: Convenience constant providing a :class:`tzutc()` instance
+#:
+#: .. versionadded:: 2.7.0
+UTC = tzutc()
+
+
+@six.add_metaclass(_TzOffsetFactory)
+class tzoffset(datetime.tzinfo):
+ """
+ A simple class for representing a fixed offset from UTC.
+
+ :param name:
+ The timezone name, to be returned when ``tzname()`` is called.
+ :param offset:
+ The time zone offset in seconds, or (since version 2.6.0, represented
+ as a :py:class:`datetime.timedelta` object).
+ """
+ def __init__(self, name, offset):
+ self._name = name
+
+ try:
+ # Allow a timedelta
+ offset = offset.total_seconds()
+ except (TypeError, AttributeError):
+ pass
+
+ self._offset = datetime.timedelta(seconds=_get_supported_offset(offset))
+
+ def utcoffset(self, dt):
+ return self._offset
+
+ def dst(self, dt):
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._name
+
+ @_validate_fromutc_inputs
+ def fromutc(self, dt):
+ return dt + self._offset
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ return False
+
+ def __eq__(self, other):
+ if not isinstance(other, tzoffset):
+ return NotImplemented
+
+ return self._offset == other._offset
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(%s, %s)" % (self.__class__.__name__,
+ repr(self._name),
+ int(self._offset.total_seconds()))
+
+ __reduce__ = object.__reduce__
+
+
+class tzlocal(_tzinfo):
+ """
+ A :class:`tzinfo` subclass built around the ``time`` timezone functions.
+ """
+ def __init__(self):
+ super(tzlocal, self).__init__()
+
+ self._std_offset = datetime.timedelta(seconds=-time.timezone)
+ if time.daylight:
+ self._dst_offset = datetime.timedelta(seconds=-time.altzone)
+ else:
+ self._dst_offset = self._std_offset
+
+ self._dst_saved = self._dst_offset - self._std_offset
+ self._hasdst = bool(self._dst_saved)
+ self._tznames = tuple(time.tzname)
+
+ def utcoffset(self, dt):
+ if dt is None and self._hasdst:
+ return None
+
+ if self._isdst(dt):
+ return self._dst_offset
+ else:
+ return self._std_offset
+
+ def dst(self, dt):
+ if dt is None and self._hasdst:
+ return None
+
+ if self._isdst(dt):
+ return self._dst_offset - self._std_offset
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._tznames[self._isdst(dt)]
+
+ def is_ambiguous(self, dt):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ naive_dst = self._naive_is_dst(dt)
+ return (not naive_dst and
+ (naive_dst != self._naive_is_dst(dt - self._dst_saved)))
+
+ def _naive_is_dst(self, dt):
+ timestamp = _datetime_to_timestamp(dt)
+ return time.localtime(timestamp + time.timezone).tm_isdst
+
+ def _isdst(self, dt, fold_naive=True):
+ # We can't use mktime here. It is unstable when deciding if
+ # the hour near to a change is DST or not.
+ #
+ # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
+ # dt.minute, dt.second, dt.weekday(), 0, -1))
+ # return time.localtime(timestamp).tm_isdst
+ #
+ # The code above yields the following result:
+ #
+ # >>> import tz, datetime
+ # >>> t = tz.tzlocal()
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRDT'
+ # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
+ # 'BRST'
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRST'
+ # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
+ # 'BRDT'
+ # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
+ # 'BRDT'
+ #
+ # Here is a more stable implementation:
+ #
+ if not self._hasdst:
+ return False
+
+ # Check for ambiguous times:
+ dstval = self._naive_is_dst(dt)
+ fold = getattr(dt, 'fold', None)
+
+ if self.is_ambiguous(dt):
+ if fold is not None:
+ return not self._fold(dt)
+ else:
+ return True
+
+ return dstval
+
+ def __eq__(self, other):
+ if isinstance(other, tzlocal):
+ return (self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset)
+ elif isinstance(other, tzutc):
+ return (not self._hasdst and
+ self._tznames[0] in {'UTC', 'GMT'} and
+ self._std_offset == ZERO)
+ elif isinstance(other, tzoffset):
+ return (not self._hasdst and
+ self._tznames[0] == other._name and
+ self._std_offset == other._offset)
+ else:
+ return NotImplemented
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s()" % self.__class__.__name__
+
+ __reduce__ = object.__reduce__
+
+
+class _ttinfo(object):
+ __slots__ = ["offset", "delta", "isdst", "abbr",
+ "isstd", "isgmt", "dstoffset"]
+
+ def __init__(self):
+ for attr in self.__slots__:
+ setattr(self, attr, None)
+
+ def __repr__(self):
+ l = []
+ for attr in self.__slots__:
+ value = getattr(self, attr)
+ if value is not None:
+ l.append("%s=%s" % (attr, repr(value)))
+ return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
+
+ def __eq__(self, other):
+ if not isinstance(other, _ttinfo):
+ return NotImplemented
+
+ return (self.offset == other.offset and
+ self.delta == other.delta and
+ self.isdst == other.isdst and
+ self.abbr == other.abbr and
+ self.isstd == other.isstd and
+ self.isgmt == other.isgmt and
+ self.dstoffset == other.dstoffset)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __getstate__(self):
+ state = {}
+ for name in self.__slots__:
+ state[name] = getattr(self, name, None)
+ return state
+
+ def __setstate__(self, state):
+ for name in self.__slots__:
+ if name in state:
+ setattr(self, name, state[name])
+
+
+class _tzfile(object):
+ """
+ Lightweight class for holding the relevant transition and time zone
+ information read from binary tzfiles.
+ """
+ attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list',
+ 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first']
+
+ def __init__(self, **kwargs):
+ for attr in self.attrs:
+ setattr(self, attr, kwargs.get(attr, None))
+
+
+class tzfile(_tzinfo):
+ """
+ This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)``
+ format timezone files to extract current and historical zone information.
+
+ :param fileobj:
+ This can be an opened file stream or a file name that the time zone
+ information can be read from.
+
+ :param filename:
+ This is an optional parameter specifying the source of the time zone
+ information in the event that ``fileobj`` is a file object. If omitted
+ and ``fileobj`` is a file stream, this parameter will be set either to
+ ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``.
+
+ See `Sources for Time Zone and Daylight Saving Time Data
+ `_ for more information.
+ Time zone files can be compiled from the `IANA Time Zone database files
+ `_ with the `zic time zone compiler
+ `_
+
+ .. note::
+
+ Only construct a ``tzfile`` directly if you have a specific timezone
+ file on disk that you want to read into a Python ``tzinfo`` object.
+ If you want to get a ``tzfile`` representing a specific IANA zone,
+ (e.g. ``'America/New_York'``), you should call
+ :func:`dateutil.tz.gettz` with the zone identifier.
+
+
+ **Examples:**
+
+ Using the US Eastern time zone as an example, we can see that a ``tzfile``
+ provides time zone information for the standard Daylight Saving offsets:
+
+ .. testsetup:: tzfile
+
+ from dateutil.tz import gettz
+ from datetime import datetime
+
+ .. doctest:: tzfile
+
+ >>> NYC = gettz('America/New_York')
+ >>> NYC
+ tzfile('/usr/share/zoneinfo/America/New_York')
+
+ >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST
+ 2016-01-03 00:00:00-05:00
+
+ >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT
+ 2016-07-07 00:00:00-04:00
+
+
+ The ``tzfile`` structure contains a fully history of the time zone,
+ so historical dates will also have the right offsets. For example, before
+ the adoption of the UTC standards, New York used local solar mean time:
+
+ .. doctest:: tzfile
+
+ >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT
+ 1901-04-12 00:00:00-04:56
+
+ And during World War II, New York was on "Eastern War Time", which was a
+ state of permanent daylight saving time:
+
+ .. doctest:: tzfile
+
+ >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT
+ 1944-02-07 00:00:00-04:00
+
+ """
+
+ def __init__(self, fileobj, filename=None):
+ super(tzfile, self).__init__()
+
+ file_opened_here = False
+ if isinstance(fileobj, string_types):
+ self._filename = fileobj
+ fileobj = open(fileobj, 'rb')
+ file_opened_here = True
+ elif filename is not None:
+ self._filename = filename
+ elif hasattr(fileobj, "name"):
+ self._filename = fileobj.name
+ else:
+ self._filename = repr(fileobj)
+
+ if fileobj is not None:
+ if not file_opened_here:
+ fileobj = _nullcontext(fileobj)
+
+ with fileobj as file_stream:
+ tzobj = self._read_tzfile(file_stream)
+
+ self._set_tzdata(tzobj)
+
+ def _set_tzdata(self, tzobj):
+ """ Set the time zone data of this object from a _tzfile object """
+ # Copy the relevant attributes over as private attributes
+ for attr in _tzfile.attrs:
+ setattr(self, '_' + attr, getattr(tzobj, attr))
+
+ def _read_tzfile(self, fileobj):
+ out = _tzfile()
+
+ # From tzfile(5):
+ #
+ # The time zone information files used by tzset(3)
+ # begin with the magic characters "TZif" to identify
+ # them as time zone information files, followed by
+ # sixteen bytes reserved for future use, followed by
+ # six four-byte values of type long, written in a
+ # ``standard'' byte order (the high-order byte
+ # of the value is written first).
+ if fileobj.read(4).decode() != "TZif":
+ raise ValueError("magic not found")
+
+ fileobj.read(16)
+
+ (
+ # The number of UTC/local indicators stored in the file.
+ ttisgmtcnt,
+
+ # The number of standard/wall indicators stored in the file.
+ ttisstdcnt,
+
+ # The number of leap seconds for which data is
+ # stored in the file.
+ leapcnt,
+
+ # The number of "transition times" for which data
+ # is stored in the file.
+ timecnt,
+
+ # The number of "local time types" for which data
+ # is stored in the file (must not be zero).
+ typecnt,
+
+ # The number of characters of "time zone
+ # abbreviation strings" stored in the file.
+ charcnt,
+
+ ) = struct.unpack(">6l", fileobj.read(24))
+
+ # The above header is followed by tzh_timecnt four-byte
+ # values of type long, sorted in ascending order.
+ # These values are written in ``standard'' byte order.
+ # Each is used as a transition time (as returned by
+ # time(2)) at which the rules for computing local time
+ # change.
+
+ if timecnt:
+ out.trans_list_utc = list(struct.unpack(">%dl" % timecnt,
+ fileobj.read(timecnt*4)))
+ else:
+ out.trans_list_utc = []
+
+ # Next come tzh_timecnt one-byte values of type unsigned
+ # char; each one tells which of the different types of
+ # ``local time'' types described in the file is associated
+ # with the same-indexed transition time. These values
+ # serve as indices into an array of ttinfo structures that
+ # appears next in the file.
+
+ if timecnt:
+ out.trans_idx = struct.unpack(">%dB" % timecnt,
+ fileobj.read(timecnt))
+ else:
+ out.trans_idx = []
+
+ # Each ttinfo structure is written as a four-byte value
+ # for tt_gmtoff of type long, in a standard byte
+ # order, followed by a one-byte value for tt_isdst
+ # and a one-byte value for tt_abbrind. In each
+ # structure, tt_gmtoff gives the number of
+ # seconds to be added to UTC, tt_isdst tells whether
+ # tm_isdst should be set by localtime(3), and
+ # tt_abbrind serves as an index into the array of
+ # time zone abbreviation characters that follow the
+ # ttinfo structure(s) in the file.
+
+ ttinfo = []
+
+ for i in range(typecnt):
+ ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
+
+ abbr = fileobj.read(charcnt).decode()
+
+ # Then there are tzh_leapcnt pairs of four-byte
+ # values, written in standard byte order; the
+ # first value of each pair gives the time (as
+ # returned by time(2)) at which a leap second
+ # occurs; the second gives the total number of
+ # leap seconds to be applied after the given time.
+ # The pairs of values are sorted in ascending order
+ # by time.
+
+ # Not used, for now (but seek for correct file position)
+ if leapcnt:
+ fileobj.seek(leapcnt * 8, os.SEEK_CUR)
+
+ # Then there are tzh_ttisstdcnt standard/wall
+ # indicators, each stored as a one-byte value;
+ # they tell whether the transition times associated
+ # with local time types were specified as standard
+ # time or wall clock time, and are used when
+ # a time zone file is used in handling POSIX-style
+ # time zone environment variables.
+
+ if ttisstdcnt:
+ isstd = struct.unpack(">%db" % ttisstdcnt,
+ fileobj.read(ttisstdcnt))
+
+ # Finally, there are tzh_ttisgmtcnt UTC/local
+ # indicators, each stored as a one-byte value;
+ # they tell whether the transition times associated
+ # with local time types were specified as UTC or
+ # local time, and are used when a time zone file
+ # is used in handling POSIX-style time zone envi-
+ # ronment variables.
+
+ if ttisgmtcnt:
+ isgmt = struct.unpack(">%db" % ttisgmtcnt,
+ fileobj.read(ttisgmtcnt))
+
+ # Build ttinfo list
+ out.ttinfo_list = []
+ for i in range(typecnt):
+ gmtoff, isdst, abbrind = ttinfo[i]
+ gmtoff = _get_supported_offset(gmtoff)
+ tti = _ttinfo()
+ tti.offset = gmtoff
+ tti.dstoffset = datetime.timedelta(0)
+ tti.delta = datetime.timedelta(seconds=gmtoff)
+ tti.isdst = isdst
+ tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
+ tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
+ tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
+ out.ttinfo_list.append(tti)
+
+ # Replace ttinfo indexes for ttinfo objects.
+ out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx]
+
+ # Set standard, dst, and before ttinfos. before will be
+ # used when a given time is before any transitions,
+ # and will be set to the first non-dst ttinfo, or to
+ # the first dst, if all of them are dst.
+ out.ttinfo_std = None
+ out.ttinfo_dst = None
+ out.ttinfo_before = None
+ if out.ttinfo_list:
+ if not out.trans_list_utc:
+ out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0]
+ else:
+ for i in range(timecnt-1, -1, -1):
+ tti = out.trans_idx[i]
+ if not out.ttinfo_std and not tti.isdst:
+ out.ttinfo_std = tti
+ elif not out.ttinfo_dst and tti.isdst:
+ out.ttinfo_dst = tti
+
+ if out.ttinfo_std and out.ttinfo_dst:
+ break
+ else:
+ if out.ttinfo_dst and not out.ttinfo_std:
+ out.ttinfo_std = out.ttinfo_dst
+
+ for tti in out.ttinfo_list:
+ if not tti.isdst:
+ out.ttinfo_before = tti
+ break
+ else:
+ out.ttinfo_before = out.ttinfo_list[0]
+
+ # Now fix transition times to become relative to wall time.
+ #
+ # I'm not sure about this. In my tests, the tz source file
+ # is setup to wall time, and in the binary file isstd and
+ # isgmt are off, so it should be in wall time. OTOH, it's
+ # always in gmt time. Let me know if you have comments
+ # about this.
+ lastdst = None
+ lastoffset = None
+ lastdstoffset = None
+ lastbaseoffset = None
+ out.trans_list = []
+
+ for i, tti in enumerate(out.trans_idx):
+ offset = tti.offset
+ dstoffset = 0
+
+ if lastdst is not None:
+ if tti.isdst:
+ if not lastdst:
+ dstoffset = offset - lastoffset
+
+ if not dstoffset and lastdstoffset:
+ dstoffset = lastdstoffset
+
+ tti.dstoffset = datetime.timedelta(seconds=dstoffset)
+ lastdstoffset = dstoffset
+
+ # If a time zone changes its base offset during a DST transition,
+ # then you need to adjust by the previous base offset to get the
+ # transition time in local time. Otherwise you use the current
+ # base offset. Ideally, I would have some mathematical proof of
+ # why this is true, but I haven't really thought about it enough.
+ baseoffset = offset - dstoffset
+ adjustment = baseoffset
+ if (lastbaseoffset is not None and baseoffset != lastbaseoffset
+ and tti.isdst != lastdst):
+ # The base DST has changed
+ adjustment = lastbaseoffset
+
+ lastdst = tti.isdst
+ lastoffset = offset
+ lastbaseoffset = baseoffset
+
+ out.trans_list.append(out.trans_list_utc[i] + adjustment)
+
+ out.trans_idx = tuple(out.trans_idx)
+ out.trans_list = tuple(out.trans_list)
+ out.trans_list_utc = tuple(out.trans_list_utc)
+
+ return out
+
+ def _find_last_transition(self, dt, in_utc=False):
+ # If there's no list, there are no transitions to find
+ if not self._trans_list:
+ return None
+
+ timestamp = _datetime_to_timestamp(dt)
+
+ # Find where the timestamp fits in the transition list - if the
+ # timestamp is a transition time, it's part of the "after" period.
+ trans_list = self._trans_list_utc if in_utc else self._trans_list
+ idx = bisect.bisect_right(trans_list, timestamp)
+
+ # We want to know when the previous transition was, so subtract off 1
+ return idx - 1
+
+ def _get_ttinfo(self, idx):
+ # For no list or after the last transition, default to _ttinfo_std
+ if idx is None or (idx + 1) >= len(self._trans_list):
+ return self._ttinfo_std
+
+ # If there is a list and the time is before it, return _ttinfo_before
+ if idx < 0:
+ return self._ttinfo_before
+
+ return self._trans_idx[idx]
+
+ def _find_ttinfo(self, dt):
+ idx = self._resolve_ambiguous_time(dt)
+
+ return self._get_ttinfo(idx)
+
+ def fromutc(self, dt):
+ """
+ The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`.
+
+ :param dt:
+ A :py:class:`datetime.datetime` object.
+
+ :raises TypeError:
+ Raised if ``dt`` is not a :py:class:`datetime.datetime` object.
+
+ :raises ValueError:
+ Raised if this is called with a ``dt`` which does not have this
+ ``tzinfo`` attached.
+
+ :return:
+ Returns a :py:class:`datetime.datetime` object representing the
+ wall time in ``self``'s time zone.
+ """
+ # These isinstance checks are in datetime.tzinfo, so we'll preserve
+ # them, even if we don't care about duck typing.
+ if not isinstance(dt, datetime.datetime):
+ raise TypeError("fromutc() requires a datetime argument")
+
+ if dt.tzinfo is not self:
+ raise ValueError("dt.tzinfo is not self")
+
+ # First treat UTC as wall time and get the transition we're in.
+ idx = self._find_last_transition(dt, in_utc=True)
+ tti = self._get_ttinfo(idx)
+
+ dt_out = dt + datetime.timedelta(seconds=tti.offset)
+
+ fold = self.is_ambiguous(dt_out, idx=idx)
+
+ return enfold(dt_out, fold=int(fold))
+
+ def is_ambiguous(self, dt, idx=None):
+ """
+ Whether or not the "wall time" of a given datetime is ambiguous in this
+ zone.
+
+ :param dt:
+ A :py:class:`datetime.datetime`, naive or time zone aware.
+
+
+ :return:
+ Returns ``True`` if ambiguous, ``False`` otherwise.
+
+ .. versionadded:: 2.6.0
+ """
+ if idx is None:
+ idx = self._find_last_transition(dt)
+
+ # Calculate the difference in offsets from current to previous
+ timestamp = _datetime_to_timestamp(dt)
+ tti = self._get_ttinfo(idx)
+
+ if idx is None or idx <= 0:
+ return False
+
+ od = self._get_ttinfo(idx - 1).offset - tti.offset
+ tt = self._trans_list[idx] # Transition time
+
+ return timestamp < tt + od
+
+ def _resolve_ambiguous_time(self, dt):
+ idx = self._find_last_transition(dt)
+
+ # If we have no transitions, return the index
+ _fold = self._fold(dt)
+ if idx is None or idx == 0:
+ return idx
+
+ # If it's ambiguous and we're in a fold, shift to a different index.
+ idx_offset = int(not _fold and self.is_ambiguous(dt, idx))
+
+ return idx - idx_offset
+
+ def utcoffset(self, dt):
+ if dt is None:
+ return None
+
+ if not self._ttinfo_std:
+ return ZERO
+
+ return self._find_ttinfo(dt).delta
+
+ def dst(self, dt):
+ if dt is None:
+ return None
+
+ if not self._ttinfo_dst:
+ return ZERO
+
+ tti = self._find_ttinfo(dt)
+
+ if not tti.isdst:
+ return ZERO
+
+ # The documentation says that utcoffset()-dst() must
+ # be constant for every dt.
+ return tti.dstoffset
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ if not self._ttinfo_std or dt is None:
+ return None
+ return self._find_ttinfo(dt).abbr
+
+ def __eq__(self, other):
+ if not isinstance(other, tzfile):
+ return NotImplemented
+ return (self._trans_list == other._trans_list and
+ self._trans_idx == other._trans_idx and
+ self._ttinfo_list == other._ttinfo_list)
+
+ __hash__ = None
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._filename))
+
+ def __reduce__(self):
+ return self.__reduce_ex__(None)
+
+ def __reduce_ex__(self, protocol):
+ return (self.__class__, (None, self._filename), self.__dict__)
+
+
+class tzrange(tzrangebase):
+ """
+ The ``tzrange`` object is a time zone specified by a set of offsets and
+ abbreviations, equivalent to the way the ``TZ`` variable can be specified
+ in POSIX-like systems, but using Python delta objects to specify DST
+ start, end and offsets.
+
+ :param stdabbr:
+ The abbreviation for standard time (e.g. ``'EST'``).
+
+ :param stdoffset:
+ An integer or :class:`datetime.timedelta` object or equivalent
+ specifying the base offset from UTC.
+
+ If unspecified, +00:00 is used.
+
+ :param dstabbr:
+ The abbreviation for DST / "Summer" time (e.g. ``'EDT'``).
+
+ If specified, with no other DST information, DST is assumed to occur
+ and the default behavior or ``dstoffset``, ``start`` and ``end`` is
+ used. If unspecified and no other DST information is specified, it
+ is assumed that this zone has no DST.
+
+ If this is unspecified and other DST information is *is* specified,
+ DST occurs in the zone but the time zone abbreviation is left
+ unchanged.
+
+ :param dstoffset:
+ A an integer or :class:`datetime.timedelta` object or equivalent
+ specifying the UTC offset during DST. If unspecified and any other DST
+ information is specified, it is assumed to be the STD offset +1 hour.
+
+ :param start:
+ A :class:`relativedelta.relativedelta` object or equivalent specifying
+ the time and time of year that daylight savings time starts. To
+ specify, for example, that DST starts at 2AM on the 2nd Sunday in
+ March, pass:
+
+ ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))``
+
+ If unspecified and any other DST information is specified, the default
+ value is 2 AM on the first Sunday in April.
+
+ :param end:
+ A :class:`relativedelta.relativedelta` object or equivalent
+ representing the time and time of year that daylight savings time
+ ends, with the same specification method as in ``start``. One note is
+ that this should point to the first time in the *standard* zone, so if
+ a transition occurs at 2AM in the DST zone and the clocks are set back
+ 1 hour to 1AM, set the ``hours`` parameter to +1.
+
+
+ **Examples:**
+
+ .. testsetup:: tzrange
+
+ from dateutil.tz import tzrange, tzstr
+
+ .. doctest:: tzrange
+
+ >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT")
+ True
+
+ >>> from dateutil.relativedelta import *
+ >>> range1 = tzrange("EST", -18000, "EDT")
+ >>> range2 = tzrange("EST", -18000, "EDT", -14400,
+ ... relativedelta(hours=+2, month=4, day=1,
+ ... weekday=SU(+1)),
+ ... relativedelta(hours=+1, month=10, day=31,
+ ... weekday=SU(-1)))
+ >>> tzstr('EST5EDT') == range1 == range2
+ True
+
+ """
+ def __init__(self, stdabbr, stdoffset=None,
+ dstabbr=None, dstoffset=None,
+ start=None, end=None):
+
+ global relativedelta
+ from dateutil import relativedelta
+
+ self._std_abbr = stdabbr
+ self._dst_abbr = dstabbr
+
+ try:
+ stdoffset = stdoffset.total_seconds()
+ except (TypeError, AttributeError):
+ pass
+
+ try:
+ dstoffset = dstoffset.total_seconds()
+ except (TypeError, AttributeError):
+ pass
+
+ if stdoffset is not None:
+ self._std_offset = datetime.timedelta(seconds=stdoffset)
+ else:
+ self._std_offset = ZERO
+
+ if dstoffset is not None:
+ self._dst_offset = datetime.timedelta(seconds=dstoffset)
+ elif dstabbr and stdoffset is not None:
+ self._dst_offset = self._std_offset + datetime.timedelta(hours=+1)
+ else:
+ self._dst_offset = ZERO
+
+ if dstabbr and start is None:
+ self._start_delta = relativedelta.relativedelta(
+ hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
+ else:
+ self._start_delta = start
+
+ if dstabbr and end is None:
+ self._end_delta = relativedelta.relativedelta(
+ hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
+ else:
+ self._end_delta = end
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = bool(self._start_delta)
+
+ def transitions(self, year):
+ """
+ For a given year, get the DST on and off transition times, expressed
+ always on the standard time side. For zones with no transitions, this
+ function returns ``None``.
+
+ :param year:
+ The year whose transitions you would like to query.
+
+ :return:
+ Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+ ``(dston, dstoff)`` for zones with an annual DST transition, or
+ ``None`` for fixed offset zones.
+ """
+ if not self.hasdst:
+ return None
+
+ base_year = datetime.datetime(year, 1, 1)
+
+ start = base_year + self._start_delta
+ end = base_year + self._end_delta
+
+ return (start, end)
+
+ def __eq__(self, other):
+ if not isinstance(other, tzrange):
+ return NotImplemented
+
+ return (self._std_abbr == other._std_abbr and
+ self._dst_abbr == other._dst_abbr and
+ self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset and
+ self._start_delta == other._start_delta and
+ self._end_delta == other._end_delta)
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_base_offset_
+
+
+@six.add_metaclass(_TzStrFactory)
+class tzstr(tzrange):
+ """
+ ``tzstr`` objects are time zone objects specified by a time-zone string as
+ it would be passed to a ``TZ`` variable on POSIX-style systems (see
+ the `GNU C Library: TZ Variable`_ for more details).
+
+ There is one notable exception, which is that POSIX-style time zones use an
+ inverted offset format, so normally ``GMT+3`` would be parsed as an offset
+ 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
+ offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
+ behavior, pass a ``True`` value to ``posix_offset``.
+
+ The :class:`tzrange` object provides the same functionality, but is
+ specified using :class:`relativedelta.relativedelta` objects. rather than
+ strings.
+
+ :param s:
+ A time zone string in ``TZ`` variable format. This can be a
+ :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
+ :class:`unicode`) or a stream emitting unicode characters
+ (e.g. :class:`StringIO`).
+
+ :param posix_offset:
+ Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
+ ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
+ POSIX standard.
+
+ .. caution::
+
+ Prior to version 2.7.0, this function also supported time zones
+ in the format:
+
+ * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
+ * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
+
+ This format is non-standard and has been deprecated; this function
+ will raise a :class:`DeprecatedTZFormatWarning` until
+ support is removed in a future version.
+
+ .. _`GNU C Library: TZ Variable`:
+ https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
+ """
+ def __init__(self, s, posix_offset=False):
+ global parser
+ from dateutil.parser import _parser as parser
+
+ self._s = s
+
+ res = parser._parsetz(s)
+ if res is None or res.any_unused_tokens:
+ raise ValueError("unknown string format")
+
+ # Here we break the compatibility with the TZ variable handling.
+ # GMT-3 actually *means* the timezone -3.
+ if res.stdabbr in ("GMT", "UTC") and not posix_offset:
+ res.stdoffset *= -1
+
+ # We must initialize it first, since _delta() needs
+ # _std_offset and _dst_offset set. Use False in start/end
+ # to avoid building it two times.
+ tzrange.__init__(self, res.stdabbr, res.stdoffset,
+ res.dstabbr, res.dstoffset,
+ start=False, end=False)
+
+ if not res.dstabbr:
+ self._start_delta = None
+ self._end_delta = None
+ else:
+ self._start_delta = self._delta(res.start)
+ if self._start_delta:
+ self._end_delta = self._delta(res.end, isend=1)
+
+ self.hasdst = bool(self._start_delta)
+
+ def _delta(self, x, isend=0):
+ from dateutil import relativedelta
+ kwargs = {}
+ if x.month is not None:
+ kwargs["month"] = x.month
+ if x.weekday is not None:
+ kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
+ if x.week > 0:
+ kwargs["day"] = 1
+ else:
+ kwargs["day"] = 31
+ elif x.day:
+ kwargs["day"] = x.day
+ elif x.yday is not None:
+ kwargs["yearday"] = x.yday
+ elif x.jyday is not None:
+ kwargs["nlyearday"] = x.jyday
+ if not kwargs:
+ # Default is to start on first sunday of april, and end
+ # on last sunday of october.
+ if not isend:
+ kwargs["month"] = 4
+ kwargs["day"] = 1
+ kwargs["weekday"] = relativedelta.SU(+1)
+ else:
+ kwargs["month"] = 10
+ kwargs["day"] = 31
+ kwargs["weekday"] = relativedelta.SU(-1)
+ if x.time is not None:
+ kwargs["seconds"] = x.time
+ else:
+ # Default is 2AM.
+ kwargs["seconds"] = 7200
+ if isend:
+ # Convert to standard time, to follow the documented way
+ # of working with the extra hour. See the documentation
+ # of the tzinfo class.
+ delta = self._dst_offset - self._std_offset
+ kwargs["seconds"] -= delta.seconds + delta.days * 86400
+ return relativedelta.relativedelta(**kwargs)
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._s))
+
+
+class _tzicalvtzcomp(object):
+ def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
+ tzname=None, rrule=None):
+ self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
+ self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
+ self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom
+ self.isdst = isdst
+ self.tzname = tzname
+ self.rrule = rrule
+
+
+class _tzicalvtz(_tzinfo):
+ def __init__(self, tzid, comps=[]):
+ super(_tzicalvtz, self).__init__()
+
+ self._tzid = tzid
+ self._comps = comps
+ self._cachedate = []
+ self._cachecomp = []
+ self._cache_lock = _thread.allocate_lock()
+
+ def _find_comp(self, dt):
+ if len(self._comps) == 1:
+ return self._comps[0]
+
+ dt = dt.replace(tzinfo=None)
+
+ try:
+ with self._cache_lock:
+ return self._cachecomp[self._cachedate.index(
+ (dt, self._fold(dt)))]
+ except ValueError:
+ pass
+
+ lastcompdt = None
+ lastcomp = None
+
+ for comp in self._comps:
+ compdt = self._find_compdt(comp, dt)
+
+ if compdt and (not lastcompdt or lastcompdt < compdt):
+ lastcompdt = compdt
+ lastcomp = comp
+
+ if not lastcomp:
+ # RFC says nothing about what to do when a given
+ # time is before the first onset date. We'll look for the
+ # first standard component, or the first component, if
+ # none is found.
+ for comp in self._comps:
+ if not comp.isdst:
+ lastcomp = comp
+ break
+ else:
+ lastcomp = comp[0]
+
+ with self._cache_lock:
+ self._cachedate.insert(0, (dt, self._fold(dt)))
+ self._cachecomp.insert(0, lastcomp)
+
+ if len(self._cachedate) > 10:
+ self._cachedate.pop()
+ self._cachecomp.pop()
+
+ return lastcomp
+
+ def _find_compdt(self, comp, dt):
+ if comp.tzoffsetdiff < ZERO and self._fold(dt):
+ dt -= comp.tzoffsetdiff
+
+ compdt = comp.rrule.before(dt, inc=True)
+
+ return compdt
+
+ def utcoffset(self, dt):
+ if dt is None:
+ return None
+
+ return self._find_comp(dt).tzoffsetto
+
+ def dst(self, dt):
+ comp = self._find_comp(dt)
+ if comp.isdst:
+ return comp.tzoffsetdiff
+ else:
+ return ZERO
+
+ @tzname_in_python2
+ def tzname(self, dt):
+ return self._find_comp(dt).tzname
+
+ def __repr__(self):
+ return "" % repr(self._tzid)
+
+ __reduce__ = object.__reduce__
+
+
+class tzical(object):
+ """
+ This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure
+ as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects.
+
+ :param `fileobj`:
+ A file or stream in iCalendar format, which should be UTF-8 encoded
+ with CRLF endings.
+
+ .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545
+ """
+ def __init__(self, fileobj):
+ global rrule
+ from dateutil import rrule
+
+ if isinstance(fileobj, string_types):
+ self._s = fileobj
+ # ical should be encoded in UTF-8 with CRLF
+ fileobj = open(fileobj, 'r')
+ else:
+ self._s = getattr(fileobj, 'name', repr(fileobj))
+ fileobj = _nullcontext(fileobj)
+
+ self._vtz = {}
+
+ with fileobj as fobj:
+ self._parse_rfc(fobj.read())
+
+ def keys(self):
+ """
+ Retrieves the available time zones as a list.
+ """
+ return list(self._vtz.keys())
+
+ def get(self, tzid=None):
+ """
+ Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``.
+
+ :param tzid:
+ If there is exactly one time zone available, omitting ``tzid``
+ or passing :py:const:`None` value returns it. Otherwise a valid
+ key (which can be retrieved from :func:`keys`) is required.
+
+ :raises ValueError:
+ Raised if ``tzid`` is not specified but there are either more
+ or fewer than 1 zone defined.
+
+ :returns:
+ Returns either a :py:class:`datetime.tzinfo` object representing
+ the relevant time zone or :py:const:`None` if the ``tzid`` was
+ not found.
+ """
+ if tzid is None:
+ if len(self._vtz) == 0:
+ raise ValueError("no timezones defined")
+ elif len(self._vtz) > 1:
+ raise ValueError("more than one timezone available")
+ tzid = next(iter(self._vtz))
+
+ return self._vtz.get(tzid)
+
+ def _parse_offset(self, s):
+ s = s.strip()
+ if not s:
+ raise ValueError("empty offset")
+ if s[0] in ('+', '-'):
+ signal = (-1, +1)[s[0] == '+']
+ s = s[1:]
+ else:
+ signal = +1
+ if len(s) == 4:
+ return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal
+ elif len(s) == 6:
+ return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal
+ else:
+ raise ValueError("invalid offset: " + s)
+
+ def _parse_rfc(self, s):
+ lines = s.splitlines()
+ if not lines:
+ raise ValueError("empty string")
+
+ # Unfold
+ i = 0
+ while i < len(lines):
+ line = lines[i].rstrip()
+ if not line:
+ del lines[i]
+ elif i > 0 and line[0] == " ":
+ lines[i-1] += line[1:]
+ del lines[i]
+ else:
+ i += 1
+
+ tzid = None
+ comps = []
+ invtz = False
+ comptype = None
+ for line in lines:
+ if not line:
+ continue
+ name, value = line.split(':', 1)
+ parms = name.split(';')
+ if not parms:
+ raise ValueError("empty property name")
+ name = parms[0].upper()
+ parms = parms[1:]
+ if invtz:
+ if name == "BEGIN":
+ if value in ("STANDARD", "DAYLIGHT"):
+ # Process component
+ pass
+ else:
+ raise ValueError("unknown component: "+value)
+ comptype = value
+ founddtstart = False
+ tzoffsetfrom = None
+ tzoffsetto = None
+ rrulelines = []
+ tzname = None
+ elif name == "END":
+ if value == "VTIMEZONE":
+ if comptype:
+ raise ValueError("component not closed: "+comptype)
+ if not tzid:
+ raise ValueError("mandatory TZID not found")
+ if not comps:
+ raise ValueError(
+ "at least one component is needed")
+ # Process vtimezone
+ self._vtz[tzid] = _tzicalvtz(tzid, comps)
+ invtz = False
+ elif value == comptype:
+ if not founddtstart:
+ raise ValueError("mandatory DTSTART not found")
+ if tzoffsetfrom is None:
+ raise ValueError(
+ "mandatory TZOFFSETFROM not found")
+ if tzoffsetto is None:
+ raise ValueError(
+ "mandatory TZOFFSETFROM not found")
+ # Process component
+ rr = None
+ if rrulelines:
+ rr = rrule.rrulestr("\n".join(rrulelines),
+ compatible=True,
+ ignoretz=True,
+ cache=True)
+ comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
+ (comptype == "DAYLIGHT"),
+ tzname, rr)
+ comps.append(comp)
+ comptype = None
+ else:
+ raise ValueError("invalid component end: "+value)
+ elif comptype:
+ if name == "DTSTART":
+ # DTSTART in VTIMEZONE takes a subset of valid RRULE
+ # values under RFC 5545.
+ for parm in parms:
+ if parm != 'VALUE=DATE-TIME':
+ msg = ('Unsupported DTSTART param in ' +
+ 'VTIMEZONE: ' + parm)
+ raise ValueError(msg)
+ rrulelines.append(line)
+ founddtstart = True
+ elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
+ rrulelines.append(line)
+ elif name == "TZOFFSETFROM":
+ if parms:
+ raise ValueError(
+ "unsupported %s parm: %s " % (name, parms[0]))
+ tzoffsetfrom = self._parse_offset(value)
+ elif name == "TZOFFSETTO":
+ if parms:
+ raise ValueError(
+ "unsupported TZOFFSETTO parm: "+parms[0])
+ tzoffsetto = self._parse_offset(value)
+ elif name == "TZNAME":
+ if parms:
+ raise ValueError(
+ "unsupported TZNAME parm: "+parms[0])
+ tzname = value
+ elif name == "COMMENT":
+ pass
+ else:
+ raise ValueError("unsupported property: "+name)
+ else:
+ if name == "TZID":
+ if parms:
+ raise ValueError(
+ "unsupported TZID parm: "+parms[0])
+ tzid = value
+ elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
+ pass
+ else:
+ raise ValueError("unsupported property: "+name)
+ elif name == "BEGIN" and value == "VTIMEZONE":
+ tzid = None
+ comps = []
+ invtz = True
+
+ def __repr__(self):
+ return "%s(%s)" % (self.__class__.__name__, repr(self._s))
+
+
+if sys.platform != "win32":
+ TZFILES = ["/etc/localtime", "localtime"]
+ TZPATHS = ["/usr/share/zoneinfo",
+ "/usr/lib/zoneinfo",
+ "/usr/share/lib/zoneinfo",
+ "/etc/zoneinfo"]
+else:
+ TZFILES = []
+ TZPATHS = []
+
+
+def __get_gettz():
+ tzlocal_classes = (tzlocal,)
+ if tzwinlocal is not None:
+ tzlocal_classes += (tzwinlocal,)
+
+ class GettzFunc(object):
+ """
+ Retrieve a time zone object from a string representation
+
+ This function is intended to retrieve the :py:class:`tzinfo` subclass
+ that best represents the time zone that would be used if a POSIX
+ `TZ variable`_ were set to the same value.
+
+ If no argument or an empty string is passed to ``gettz``, local time
+ is returned:
+
+ .. code-block:: python3
+
+ >>> gettz()
+ tzfile('/etc/localtime')
+
+ This function is also the preferred way to map IANA tz database keys
+ to :class:`tzfile` objects:
+
+ .. code-block:: python3
+
+ >>> gettz('Pacific/Kiritimati')
+ tzfile('/usr/share/zoneinfo/Pacific/Kiritimati')
+
+ On Windows, the standard is extended to include the Windows-specific
+ zone names provided by the operating system:
+
+ .. code-block:: python3
+
+ >>> gettz('Egypt Standard Time')
+ tzwin('Egypt Standard Time')
+
+ Passing a GNU ``TZ`` style string time zone specification returns a
+ :class:`tzstr` object:
+
+ .. code-block:: python3
+
+ >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
+ tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3')
+
+ :param name:
+ A time zone name (IANA, or, on Windows, Windows keys), location of
+ a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone
+ specifier. An empty string, no argument or ``None`` is interpreted
+ as local time.
+
+ :return:
+ Returns an instance of one of ``dateutil``'s :py:class:`tzinfo`
+ subclasses.
+
+ .. versionchanged:: 2.7.0
+
+ After version 2.7.0, any two calls to ``gettz`` using the same
+ input strings will return the same object:
+
+ .. code-block:: python3
+
+ >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago')
+ True
+
+ In addition to improving performance, this ensures that
+ `"same zone" semantics`_ are used for datetimes in the same zone.
+
+
+ .. _`TZ variable`:
+ https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
+
+ .. _`"same zone" semantics`:
+ https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html
+ """
+ def __init__(self):
+
+ self.__instances = weakref.WeakValueDictionary()
+ self.__strong_cache_size = 8
+ self.__strong_cache = OrderedDict()
+ self._cache_lock = _thread.allocate_lock()
+
+ def __call__(self, name=None):
+ with self._cache_lock:
+ rv = self.__instances.get(name, None)
+
+ if rv is None:
+ rv = self.nocache(name=name)
+ if not (name is None
+ or isinstance(rv, tzlocal_classes)
+ or rv is None):
+ # tzlocal is slightly more complicated than the other
+ # time zone providers because it depends on environment
+ # at construction time, so don't cache that.
+ #
+ # We also cannot store weak references to None, so we
+ # will also not store that.
+ self.__instances[name] = rv
+ else:
+ # No need for strong caching, return immediately
+ return rv
+
+ self.__strong_cache[name] = self.__strong_cache.pop(name, rv)
+
+ if len(self.__strong_cache) > self.__strong_cache_size:
+ self.__strong_cache.popitem(last=False)
+
+ return rv
+
+ def set_cache_size(self, size):
+ with self._cache_lock:
+ self.__strong_cache_size = size
+ while len(self.__strong_cache) > size:
+ self.__strong_cache.popitem(last=False)
+
+ def cache_clear(self):
+ with self._cache_lock:
+ self.__instances = weakref.WeakValueDictionary()
+ self.__strong_cache.clear()
+
+ @staticmethod
+ def nocache(name=None):
+ """A non-cached version of gettz"""
+ tz = None
+ if not name:
+ try:
+ name = os.environ["TZ"]
+ except KeyError:
+ pass
+ if name is None or name in ("", ":"):
+ for filepath in TZFILES:
+ if not os.path.isabs(filepath):
+ filename = filepath
+ for path in TZPATHS:
+ filepath = os.path.join(path, filename)
+ if os.path.isfile(filepath):
+ break
+ else:
+ continue
+ if os.path.isfile(filepath):
+ try:
+ tz = tzfile(filepath)
+ break
+ except (IOError, OSError, ValueError):
+ pass
+ else:
+ tz = tzlocal()
+ else:
+ try:
+ if name.startswith(":"):
+ name = name[1:]
+ except TypeError as e:
+ if isinstance(name, bytes):
+ new_msg = "gettz argument should be str, not bytes"
+ six.raise_from(TypeError(new_msg), e)
+ else:
+ raise
+ if os.path.isabs(name):
+ if os.path.isfile(name):
+ tz = tzfile(name)
+ else:
+ tz = None
+ else:
+ for path in TZPATHS:
+ filepath = os.path.join(path, name)
+ if not os.path.isfile(filepath):
+ filepath = filepath.replace(' ', '_')
+ if not os.path.isfile(filepath):
+ continue
+ try:
+ tz = tzfile(filepath)
+ break
+ except (IOError, OSError, ValueError):
+ pass
+ else:
+ tz = None
+ if tzwin is not None:
+ try:
+ tz = tzwin(name)
+ except (WindowsError, UnicodeEncodeError):
+ # UnicodeEncodeError is for Python 2.7 compat
+ tz = None
+
+ if not tz:
+ from dateutil.zoneinfo import get_zonefile_instance
+ tz = get_zonefile_instance().get(name)
+
+ if not tz:
+ for c in name:
+ # name is not a tzstr unless it has at least
+ # one offset. For short values of "name", an
+ # explicit for loop seems to be the fastest way
+ # To determine if a string contains a digit
+ if c in "0123456789":
+ try:
+ tz = tzstr(name)
+ except ValueError:
+ pass
+ break
+ else:
+ if name in ("GMT", "UTC"):
+ tz = UTC
+ elif name in time.tzname:
+ tz = tzlocal()
+ return tz
+
+ return GettzFunc()
+
+
+gettz = __get_gettz()
+del __get_gettz
+
+
+def datetime_exists(dt, tz=None):
+ """
+ Given a datetime and a time zone, determine whether or not a given datetime
+ would fall in a gap.
+
+ :param dt:
+ A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+ is provided.)
+
+ :param tz:
+ A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+ ``None`` or not provided, the datetime's own time zone will be used.
+
+ :return:
+ Returns a boolean value whether or not the "wall time" exists in
+ ``tz``.
+
+ .. versionadded:: 2.7.0
+ """
+ if tz is None:
+ if dt.tzinfo is None:
+ raise ValueError('Datetime is naive and no time zone provided.')
+ tz = dt.tzinfo
+
+ dt = dt.replace(tzinfo=None)
+
+ # This is essentially a test of whether or not the datetime can survive
+ # a round trip to UTC.
+ dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz)
+ dt_rt = dt_rt.replace(tzinfo=None)
+
+ return dt == dt_rt
+
+
+def datetime_ambiguous(dt, tz=None):
+ """
+ Given a datetime and a time zone, determine whether or not a given datetime
+ is ambiguous (i.e if there are two times differentiated only by their DST
+ status).
+
+ :param dt:
+ A :class:`datetime.datetime` (whose time zone will be ignored if ``tz``
+ is provided.)
+
+ :param tz:
+ A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If
+ ``None`` or not provided, the datetime's own time zone will be used.
+
+ :return:
+ Returns a boolean value whether or not the "wall time" is ambiguous in
+ ``tz``.
+
+ .. versionadded:: 2.6.0
+ """
+ if tz is None:
+ if dt.tzinfo is None:
+ raise ValueError('Datetime is naive and no time zone provided.')
+
+ tz = dt.tzinfo
+
+ # If a time zone defines its own "is_ambiguous" function, we'll use that.
+ is_ambiguous_fn = getattr(tz, 'is_ambiguous', None)
+ if is_ambiguous_fn is not None:
+ try:
+ return tz.is_ambiguous(dt)
+ except Exception:
+ pass
+
+ # If it doesn't come out and tell us it's ambiguous, we'll just check if
+ # the fold attribute has any effect on this particular date and time.
+ dt = dt.replace(tzinfo=tz)
+ wall_0 = enfold(dt, fold=0)
+ wall_1 = enfold(dt, fold=1)
+
+ same_offset = wall_0.utcoffset() == wall_1.utcoffset()
+ same_dst = wall_0.dst() == wall_1.dst()
+
+ return not (same_offset and same_dst)
+
+
+def resolve_imaginary(dt):
+ """
+ Given a datetime that may be imaginary, return an existing datetime.
+
+ This function assumes that an imaginary datetime represents what the
+ wall time would be in a zone had the offset transition not occurred, so
+ it will always fall forward by the transition's change in offset.
+
+ .. doctest::
+
+ >>> from dateutil import tz
+ >>> from datetime import datetime
+ >>> NYC = tz.gettz('America/New_York')
+ >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC)))
+ 2017-03-12 03:30:00-04:00
+
+ >>> KIR = tz.gettz('Pacific/Kiritimati')
+ >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR)))
+ 1995-01-02 12:30:00+14:00
+
+ As a note, :func:`datetime.astimezone` is guaranteed to produce a valid,
+ existing datetime, so a round-trip to and from UTC is sufficient to get
+ an extant datetime, however, this generally "falls back" to an earlier time
+ rather than falling forward to the STD side (though no guarantees are made
+ about this behavior).
+
+ :param dt:
+ A :class:`datetime.datetime` which may or may not exist.
+
+ :return:
+ Returns an existing :class:`datetime.datetime`. If ``dt`` was not
+ imaginary, the datetime returned is guaranteed to be the same object
+ passed to the function.
+
+ .. versionadded:: 2.7.0
+ """
+ if dt.tzinfo is not None and not datetime_exists(dt):
+
+ curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset()
+ old_offset = (dt - datetime.timedelta(hours=24)).utcoffset()
+
+ dt += curr_offset - old_offset
+
+ return dt
+
+
+def _datetime_to_timestamp(dt):
+ """
+ Convert a :class:`datetime.datetime` object to an epoch timestamp in
+ seconds since January 1, 1970, ignoring the time zone.
+ """
+ return (dt.replace(tzinfo=None) - EPOCH).total_seconds()
+
+
+if sys.version_info >= (3, 6):
+ def _get_supported_offset(second_offset):
+ return second_offset
+else:
+ def _get_supported_offset(second_offset):
+ # For python pre-3.6, round to full-minutes if that's not the case.
+ # Python's datetime doesn't accept sub-minute timezones. Check
+ # http://python.org/sf/1447945 or https://bugs.python.org/issue5288
+ # for some information.
+ old_offset = second_offset
+ calculated_offset = 60 * ((second_offset + 30) // 60)
+ return calculated_offset
+
+
+try:
+ # Python 3.7 feature
+ from contextlib import nullcontext as _nullcontext
+except ImportError:
+ class _nullcontext(object):
+ """
+ Class for wrapping contexts so that they are passed through in a
+ with statement.
+ """
+ def __init__(self, context):
+ self.context = context
+
+ def __enter__(self):
+ return self.context
+
+ def __exit__(*args, **kwargs):
+ pass
+
+# vim:ts=4:sw=4:et
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/tz/win.py b/tapdown/lib/python3.11/site-packages/dateutil/tz/win.py
new file mode 100644
index 0000000..cde07ba
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/tz/win.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+"""
+This module provides an interface to the native time zone data on Windows,
+including :py:class:`datetime.tzinfo` implementations.
+
+Attempting to import this module on a non-Windows platform will raise an
+:py:obj:`ImportError`.
+"""
+# This code was originally contributed by Jeffrey Harris.
+import datetime
+import struct
+
+from six.moves import winreg
+from six import text_type
+
+try:
+ import ctypes
+ from ctypes import wintypes
+except ValueError:
+ # ValueError is raised on non-Windows systems for some horrible reason.
+ raise ImportError("Running tzwin on non-Windows system")
+
+from ._common import tzrangebase
+
+__all__ = ["tzwin", "tzwinlocal", "tzres"]
+
+ONEWEEK = datetime.timedelta(7)
+
+TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
+TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
+TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
+
+
+def _settzkeyname():
+ handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+ try:
+ winreg.OpenKey(handle, TZKEYNAMENT).Close()
+ TZKEYNAME = TZKEYNAMENT
+ except WindowsError:
+ TZKEYNAME = TZKEYNAME9X
+ handle.Close()
+ return TZKEYNAME
+
+
+TZKEYNAME = _settzkeyname()
+
+
+class tzres(object):
+ """
+ Class for accessing ``tzres.dll``, which contains timezone name related
+ resources.
+
+ .. versionadded:: 2.5.0
+ """
+ p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char
+
+ def __init__(self, tzres_loc='tzres.dll'):
+ # Load the user32 DLL so we can load strings from tzres
+ user32 = ctypes.WinDLL('user32')
+
+ # Specify the LoadStringW function
+ user32.LoadStringW.argtypes = (wintypes.HINSTANCE,
+ wintypes.UINT,
+ wintypes.LPWSTR,
+ ctypes.c_int)
+
+ self.LoadStringW = user32.LoadStringW
+ self._tzres = ctypes.WinDLL(tzres_loc)
+ self.tzres_loc = tzres_loc
+
+ def load_name(self, offset):
+ """
+ Load a timezone name from a DLL offset (integer).
+
+ >>> from dateutil.tzwin import tzres
+ >>> tzr = tzres()
+ >>> print(tzr.load_name(112))
+ 'Eastern Standard Time'
+
+ :param offset:
+ A positive integer value referring to a string from the tzres dll.
+
+ .. note::
+
+ Offsets found in the registry are generally of the form
+ ``@tzres.dll,-114``. The offset in this case is 114, not -114.
+
+ """
+ resource = self.p_wchar()
+ lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR)
+ nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0)
+ return resource[:nchar]
+
+ def name_from_string(self, tzname_str):
+ """
+ Parse strings as returned from the Windows registry into the time zone
+ name as defined in the registry.
+
+ >>> from dateutil.tzwin import tzres
+ >>> tzr = tzres()
+ >>> print(tzr.name_from_string('@tzres.dll,-251'))
+ 'Dateline Daylight Time'
+ >>> print(tzr.name_from_string('Eastern Standard Time'))
+ 'Eastern Standard Time'
+
+ :param tzname_str:
+ A timezone name string as returned from a Windows registry key.
+
+ :return:
+ Returns the localized timezone string from tzres.dll if the string
+ is of the form `@tzres.dll,-offset`, else returns the input string.
+ """
+ if not tzname_str.startswith('@'):
+ return tzname_str
+
+ name_splt = tzname_str.split(',-')
+ try:
+ offset = int(name_splt[1])
+ except:
+ raise ValueError("Malformed timezone string.")
+
+ return self.load_name(offset)
+
+
+class tzwinbase(tzrangebase):
+ """tzinfo class based on win32's timezones available in the registry."""
+ def __init__(self):
+ raise NotImplementedError('tzwinbase is an abstract base class')
+
+ def __eq__(self, other):
+ # Compare on all relevant dimensions, including name.
+ if not isinstance(other, tzwinbase):
+ return NotImplemented
+
+ return (self._std_offset == other._std_offset and
+ self._dst_offset == other._dst_offset and
+ self._stddayofweek == other._stddayofweek and
+ self._dstdayofweek == other._dstdayofweek and
+ self._stdweeknumber == other._stdweeknumber and
+ self._dstweeknumber == other._dstweeknumber and
+ self._stdhour == other._stdhour and
+ self._dsthour == other._dsthour and
+ self._stdminute == other._stdminute and
+ self._dstminute == other._dstminute and
+ self._std_abbr == other._std_abbr and
+ self._dst_abbr == other._dst_abbr)
+
+ @staticmethod
+ def list():
+ """Return a list of all time zones known to the system."""
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ with winreg.OpenKey(handle, TZKEYNAME) as tzkey:
+ result = [winreg.EnumKey(tzkey, i)
+ for i in range(winreg.QueryInfoKey(tzkey)[0])]
+ return result
+
+ def display(self):
+ """
+ Return the display name of the time zone.
+ """
+ return self._display
+
+ def transitions(self, year):
+ """
+ For a given year, get the DST on and off transition times, expressed
+ always on the standard time side. For zones with no transitions, this
+ function returns ``None``.
+
+ :param year:
+ The year whose transitions you would like to query.
+
+ :return:
+ Returns a :class:`tuple` of :class:`datetime.datetime` objects,
+ ``(dston, dstoff)`` for zones with an annual DST transition, or
+ ``None`` for fixed offset zones.
+ """
+
+ if not self.hasdst:
+ return None
+
+ dston = picknthweekday(year, self._dstmonth, self._dstdayofweek,
+ self._dsthour, self._dstminute,
+ self._dstweeknumber)
+
+ dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek,
+ self._stdhour, self._stdminute,
+ self._stdweeknumber)
+
+ # Ambiguous dates default to the STD side
+ dstoff -= self._dst_base_offset
+
+ return dston, dstoff
+
+ def _get_hasdst(self):
+ return self._dstmonth != 0
+
+ @property
+ def _dst_base_offset(self):
+ return self._dst_base_offset_
+
+
+class tzwin(tzwinbase):
+ """
+ Time zone object created from the zone info in the Windows registry
+
+ These are similar to :py:class:`dateutil.tz.tzrange` objects in that
+ the time zone data is provided in the format of a single offset rule
+ for either 0 or 2 time zone transitions per year.
+
+ :param: name
+ The name of a Windows time zone key, e.g. "Eastern Standard Time".
+ The full list of keys can be retrieved with :func:`tzwin.list`.
+ """
+
+ def __init__(self, name):
+ self._name = name
+
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name)
+ with winreg.OpenKey(handle, tzkeyname) as tzkey:
+ keydict = valuestodict(tzkey)
+
+ self._std_abbr = keydict["Std"]
+ self._dst_abbr = keydict["Dlt"]
+
+ self._display = keydict["Display"]
+
+ # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
+ tup = struct.unpack("=3l16h", keydict["TZI"])
+ stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
+ dstoffset = stdoffset-tup[2] # + DaylightBias * -1
+ self._std_offset = datetime.timedelta(minutes=stdoffset)
+ self._dst_offset = datetime.timedelta(minutes=dstoffset)
+
+ # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs
+ # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx
+ (self._stdmonth,
+ self._stddayofweek, # Sunday = 0
+ self._stdweeknumber, # Last = 5
+ self._stdhour,
+ self._stdminute) = tup[4:9]
+
+ (self._dstmonth,
+ self._dstdayofweek, # Sunday = 0
+ self._dstweeknumber, # Last = 5
+ self._dsthour,
+ self._dstminute) = tup[12:17]
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = self._get_hasdst()
+
+ def __repr__(self):
+ return "tzwin(%s)" % repr(self._name)
+
+ def __reduce__(self):
+ return (self.__class__, (self._name,))
+
+
+class tzwinlocal(tzwinbase):
+ """
+ Class representing the local time zone information in the Windows registry
+
+ While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time`
+ module) to retrieve time zone information, ``tzwinlocal`` retrieves the
+ rules directly from the Windows registry and creates an object like
+ :class:`dateutil.tz.tzwin`.
+
+ Because Windows does not have an equivalent of :func:`time.tzset`, on
+ Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the
+ time zone settings *at the time that the process was started*, meaning
+ changes to the machine's time zone settings during the run of a program
+ on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`.
+ Because ``tzwinlocal`` reads the registry directly, it is unaffected by
+ this issue.
+ """
+ def __init__(self):
+ with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle:
+ with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey:
+ keydict = valuestodict(tzlocalkey)
+
+ self._std_abbr = keydict["StandardName"]
+ self._dst_abbr = keydict["DaylightName"]
+
+ try:
+ tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME,
+ sn=self._std_abbr)
+ with winreg.OpenKey(handle, tzkeyname) as tzkey:
+ _keydict = valuestodict(tzkey)
+ self._display = _keydict["Display"]
+ except OSError:
+ self._display = None
+
+ stdoffset = -keydict["Bias"]-keydict["StandardBias"]
+ dstoffset = stdoffset-keydict["DaylightBias"]
+
+ self._std_offset = datetime.timedelta(minutes=stdoffset)
+ self._dst_offset = datetime.timedelta(minutes=dstoffset)
+
+ # For reasons unclear, in this particular key, the day of week has been
+ # moved to the END of the SYSTEMTIME structure.
+ tup = struct.unpack("=8h", keydict["StandardStart"])
+
+ (self._stdmonth,
+ self._stdweeknumber, # Last = 5
+ self._stdhour,
+ self._stdminute) = tup[1:5]
+
+ self._stddayofweek = tup[7]
+
+ tup = struct.unpack("=8h", keydict["DaylightStart"])
+
+ (self._dstmonth,
+ self._dstweeknumber, # Last = 5
+ self._dsthour,
+ self._dstminute) = tup[1:5]
+
+ self._dstdayofweek = tup[7]
+
+ self._dst_base_offset_ = self._dst_offset - self._std_offset
+ self.hasdst = self._get_hasdst()
+
+ def __repr__(self):
+ return "tzwinlocal()"
+
+ def __str__(self):
+ # str will return the standard name, not the daylight name.
+ return "tzwinlocal(%s)" % repr(self._std_abbr)
+
+ def __reduce__(self):
+ return (self.__class__, ())
+
+
+def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
+ """ dayofweek == 0 means Sunday, whichweek 5 means last instance """
+ first = datetime.datetime(year, month, 1, hour, minute)
+
+ # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6),
+ # Because 7 % 7 = 0
+ weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1)
+ wd = weekdayone + ((whichweek - 1) * ONEWEEK)
+ if (wd.month != month):
+ wd -= ONEWEEK
+
+ return wd
+
+
+def valuestodict(key):
+ """Convert a registry key's values to a dictionary."""
+ dout = {}
+ size = winreg.QueryInfoKey(key)[1]
+ tz_res = None
+
+ for i in range(size):
+ key_name, value, dtype = winreg.EnumValue(key, i)
+ if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN:
+ # If it's a DWORD (32-bit integer), it's stored as unsigned - convert
+ # that to a proper signed integer
+ if value & (1 << 31):
+ value = value - (1 << 32)
+ elif dtype == winreg.REG_SZ:
+ # If it's a reference to the tzres DLL, load the actual string
+ if value.startswith('@tzres'):
+ tz_res = tz_res or tzres()
+ value = tz_res.name_from_string(value)
+
+ value = value.rstrip('\x00') # Remove trailing nulls
+
+ dout[key_name] = value
+
+ return dout
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/tzwin.py b/tapdown/lib/python3.11/site-packages/dateutil/tzwin.py
new file mode 100644
index 0000000..cebc673
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/tzwin.py
@@ -0,0 +1,2 @@
+# tzwin has moved to dateutil.tz.win
+from .tz.win import *
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/utils.py b/tapdown/lib/python3.11/site-packages/dateutil/utils.py
new file mode 100644
index 0000000..dd2d245
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/utils.py
@@ -0,0 +1,71 @@
+# -*- coding: utf-8 -*-
+"""
+This module offers general convenience and utility functions for dealing with
+datetimes.
+
+.. versionadded:: 2.7.0
+"""
+from __future__ import unicode_literals
+
+from datetime import datetime, time
+
+
+def today(tzinfo=None):
+ """
+ Returns a :py:class:`datetime` representing the current day at midnight
+
+ :param tzinfo:
+ The time zone to attach (also used to determine the current day).
+
+ :return:
+ A :py:class:`datetime.datetime` object representing the current day
+ at midnight.
+ """
+
+ dt = datetime.now(tzinfo)
+ return datetime.combine(dt.date(), time(0, tzinfo=tzinfo))
+
+
+def default_tzinfo(dt, tzinfo):
+ """
+ Sets the ``tzinfo`` parameter on naive datetimes only
+
+ This is useful for example when you are provided a datetime that may have
+ either an implicit or explicit time zone, such as when parsing a time zone
+ string.
+
+ .. doctest::
+
+ >>> from dateutil.tz import tzoffset
+ >>> from dateutil.parser import parse
+ >>> from dateutil.utils import default_tzinfo
+ >>> dflt_tz = tzoffset("EST", -18000)
+ >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz))
+ 2014-01-01 12:30:00+00:00
+ >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz))
+ 2014-01-01 12:30:00-05:00
+
+ :param dt:
+ The datetime on which to replace the time zone
+
+ :param tzinfo:
+ The :py:class:`datetime.tzinfo` subclass instance to assign to
+ ``dt`` if (and only if) it is naive.
+
+ :return:
+ Returns an aware :py:class:`datetime.datetime`.
+ """
+ if dt.tzinfo is not None:
+ return dt
+ else:
+ return dt.replace(tzinfo=tzinfo)
+
+
+def within_delta(dt1, dt2, delta):
+ """
+ Useful for comparing two datetimes that may have a negligible difference
+ to be considered equal.
+ """
+ delta = abs(delta)
+ difference = dt1 - dt2
+ return -delta <= difference <= delta
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/__init__.py b/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/__init__.py
new file mode 100644
index 0000000..34f11ad
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/__init__.py
@@ -0,0 +1,167 @@
+# -*- coding: utf-8 -*-
+import warnings
+import json
+
+from tarfile import TarFile
+from pkgutil import get_data
+from io import BytesIO
+
+from dateutil.tz import tzfile as _tzfile
+
+__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"]
+
+ZONEFILENAME = "dateutil-zoneinfo.tar.gz"
+METADATA_FN = 'METADATA'
+
+
+class tzfile(_tzfile):
+ def __reduce__(self):
+ return (gettz, (self._filename,))
+
+
+def getzoneinfofile_stream():
+ try:
+ return BytesIO(get_data(__name__, ZONEFILENAME))
+ except IOError as e: # TODO switch to FileNotFoundError?
+ warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror))
+ return None
+
+
+class ZoneInfoFile(object):
+ def __init__(self, zonefile_stream=None):
+ if zonefile_stream is not None:
+ with TarFile.open(fileobj=zonefile_stream) as tf:
+ self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name)
+ for zf in tf.getmembers()
+ if zf.isfile() and zf.name != METADATA_FN}
+ # deal with links: They'll point to their parent object. Less
+ # waste of memory
+ links = {zl.name: self.zones[zl.linkname]
+ for zl in tf.getmembers() if
+ zl.islnk() or zl.issym()}
+ self.zones.update(links)
+ try:
+ metadata_json = tf.extractfile(tf.getmember(METADATA_FN))
+ metadata_str = metadata_json.read().decode('UTF-8')
+ self.metadata = json.loads(metadata_str)
+ except KeyError:
+ # no metadata in tar file
+ self.metadata = None
+ else:
+ self.zones = {}
+ self.metadata = None
+
+ def get(self, name, default=None):
+ """
+ Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method
+ for retrieving zones from the zone dictionary.
+
+ :param name:
+ The name of the zone to retrieve. (Generally IANA zone names)
+
+ :param default:
+ The value to return in the event of a missing key.
+
+ .. versionadded:: 2.6.0
+
+ """
+ return self.zones.get(name, default)
+
+
+# The current API has gettz as a module function, although in fact it taps into
+# a stateful class. So as a workaround for now, without changing the API, we
+# will create a new "global" class instance the first time a user requests a
+# timezone. Ugly, but adheres to the api.
+#
+# TODO: Remove after deprecation period.
+_CLASS_ZONE_INSTANCE = []
+
+
+def get_zonefile_instance(new_instance=False):
+ """
+ This is a convenience function which provides a :class:`ZoneInfoFile`
+ instance using the data provided by the ``dateutil`` package. By default, it
+ caches a single instance of the ZoneInfoFile object and returns that.
+
+ :param new_instance:
+ If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and
+ used as the cached instance for the next call. Otherwise, new instances
+ are created only as necessary.
+
+ :return:
+ Returns a :class:`ZoneInfoFile` object.
+
+ .. versionadded:: 2.6
+ """
+ if new_instance:
+ zif = None
+ else:
+ zif = getattr(get_zonefile_instance, '_cached_instance', None)
+
+ if zif is None:
+ zif = ZoneInfoFile(getzoneinfofile_stream())
+
+ get_zonefile_instance._cached_instance = zif
+
+ return zif
+
+
+def gettz(name):
+ """
+ This retrieves a time zone from the local zoneinfo tarball that is packaged
+ with dateutil.
+
+ :param name:
+ An IANA-style time zone name, as found in the zoneinfo file.
+
+ :return:
+ Returns a :class:`dateutil.tz.tzfile` time zone object.
+
+ .. warning::
+ It is generally inadvisable to use this function, and it is only
+ provided for API compatibility with earlier versions. This is *not*
+ equivalent to ``dateutil.tz.gettz()``, which selects an appropriate
+ time zone based on the inputs, favoring system zoneinfo. This is ONLY
+ for accessing the dateutil-specific zoneinfo (which may be out of
+ date compared to the system zoneinfo).
+
+ .. deprecated:: 2.6
+ If you need to use a specific zoneinfofile over the system zoneinfo,
+ instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call
+ :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead.
+
+ Use :func:`get_zonefile_instance` to retrieve an instance of the
+ dateutil-provided zoneinfo.
+ """
+ warnings.warn("zoneinfo.gettz() will be removed in future versions, "
+ "to use the dateutil-provided zoneinfo files, instantiate a "
+ "ZoneInfoFile object and use ZoneInfoFile.zones.get() "
+ "instead. See the documentation for details.",
+ DeprecationWarning)
+
+ if len(_CLASS_ZONE_INSTANCE) == 0:
+ _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+ return _CLASS_ZONE_INSTANCE[0].zones.get(name)
+
+
+def gettz_db_metadata():
+ """ Get the zonefile metadata
+
+ See `zonefile_metadata`_
+
+ :returns:
+ A dictionary with the database metadata
+
+ .. deprecated:: 2.6
+ See deprecation warning in :func:`zoneinfo.gettz`. To get metadata,
+ query the attribute ``zoneinfo.ZoneInfoFile.metadata``.
+ """
+ warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future "
+ "versions, to use the dateutil-provided zoneinfo files, "
+ "ZoneInfoFile object and query the 'metadata' attribute "
+ "instead. See the documentation for details.",
+ DeprecationWarning)
+
+ if len(_CLASS_ZONE_INSTANCE) == 0:
+ _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream()))
+ return _CLASS_ZONE_INSTANCE[0].metadata
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz
new file mode 100644
index 0000000..1461f8c
Binary files /dev/null and b/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz differ
diff --git a/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/rebuild.py b/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/rebuild.py
new file mode 100644
index 0000000..684c658
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dateutil/zoneinfo/rebuild.py
@@ -0,0 +1,75 @@
+import logging
+import os
+import tempfile
+import shutil
+import json
+from subprocess import check_call, check_output
+from tarfile import TarFile
+
+from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME
+
+
+def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None):
+ """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar*
+
+ filename is the timezone tarball from ``ftp.iana.org/tz``.
+
+ """
+ tmpdir = tempfile.mkdtemp()
+ zonedir = os.path.join(tmpdir, "zoneinfo")
+ moduledir = os.path.dirname(__file__)
+ try:
+ with TarFile.open(filename) as tf:
+ for name in zonegroups:
+ tf.extract(name, tmpdir)
+ filepaths = [os.path.join(tmpdir, n) for n in zonegroups]
+
+ _run_zic(zonedir, filepaths)
+
+ # write metadata file
+ with open(os.path.join(zonedir, METADATA_FN), 'w') as f:
+ json.dump(metadata, f, indent=4, sort_keys=True)
+ target = os.path.join(moduledir, ZONEFILENAME)
+ with TarFile.open(target, "w:%s" % format) as tf:
+ for entry in os.listdir(zonedir):
+ entrypath = os.path.join(zonedir, entry)
+ tf.add(entrypath, entry)
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+def _run_zic(zonedir, filepaths):
+ """Calls the ``zic`` compiler in a compatible way to get a "fat" binary.
+
+ Recent versions of ``zic`` default to ``-b slim``, while older versions
+ don't even have the ``-b`` option (but default to "fat" binaries). The
+ current version of dateutil does not support Version 2+ TZif files, which
+ causes problems when used in conjunction with "slim" binaries, so this
+ function is used to ensure that we always get a "fat" binary.
+ """
+
+ try:
+ help_text = check_output(["zic", "--help"])
+ except OSError as e:
+ _print_on_nosuchfile(e)
+ raise
+
+ if b"-b " in help_text:
+ bloat_args = ["-b", "fat"]
+ else:
+ bloat_args = []
+
+ check_call(["zic"] + bloat_args + ["-d", zonedir] + filepaths)
+
+
+def _print_on_nosuchfile(e):
+ """Print helpful troubleshooting message
+
+ e is an exception raised by subprocess.check_call()
+
+ """
+ if e.errno == 2:
+ logging.error(
+ "Could not find zic. Perhaps you need to install "
+ "libc-bin or some other package that provides it, "
+ "or it's not in your PATH?")
diff --git a/tapdown/lib/python3.11/site-packages/distutils-precedence.pth b/tapdown/lib/python3.11/site-packages/distutils-precedence.pth
new file mode 100644
index 0000000..7f009fe
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/distutils-precedence.pth
@@ -0,0 +1 @@
+import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'local') == 'local'; enabled and __import__('_distutils_hack').add_shim();
diff --git a/tapdown/lib/python3.11/site-packages/dns/__init__.py b/tapdown/lib/python3.11/site-packages/dns/__init__.py
new file mode 100644
index 0000000..d30fd74
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/__init__.py
@@ -0,0 +1,72 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython DNS toolkit"""
+
+__all__ = [
+ "asyncbackend",
+ "asyncquery",
+ "asyncresolver",
+ "btree",
+ "btreezone",
+ "dnssec",
+ "dnssecalgs",
+ "dnssectypes",
+ "e164",
+ "edns",
+ "entropy",
+ "exception",
+ "flags",
+ "immutable",
+ "inet",
+ "ipv4",
+ "ipv6",
+ "message",
+ "name",
+ "namedict",
+ "node",
+ "opcode",
+ "query",
+ "quic",
+ "rcode",
+ "rdata",
+ "rdataclass",
+ "rdataset",
+ "rdatatype",
+ "renderer",
+ "resolver",
+ "reversename",
+ "rrset",
+ "serial",
+ "set",
+ "tokenizer",
+ "transaction",
+ "tsig",
+ "tsigkeyring",
+ "ttl",
+ "rdtypes",
+ "update",
+ "version",
+ "versioned",
+ "wire",
+ "xfr",
+ "zone",
+ "zonetypes",
+ "zonefile",
+]
+
+from dns.version import version as __version__ # noqa
diff --git a/tapdown/lib/python3.11/site-packages/dns/_asyncbackend.py b/tapdown/lib/python3.11/site-packages/dns/_asyncbackend.py
new file mode 100644
index 0000000..23455db
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_asyncbackend.py
@@ -0,0 +1,100 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# This is a nullcontext for both sync and async. 3.7 has a nullcontext,
+# but it is only for sync use.
+
+
+class NullContext:
+ def __init__(self, enter_result=None):
+ self.enter_result = enter_result
+
+ def __enter__(self):
+ return self.enter_result
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+ async def __aenter__(self):
+ return self.enter_result
+
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+# These are declared here so backends can import them without creating
+# circular dependencies with dns.asyncbackend.
+
+
+class Socket: # pragma: no cover
+ def __init__(self, family: int, type: int):
+ self.family = family
+ self.type = type
+
+ async def close(self):
+ pass
+
+ async def getpeername(self):
+ raise NotImplementedError
+
+ async def getsockname(self):
+ raise NotImplementedError
+
+ async def getpeercert(self, timeout):
+ raise NotImplementedError
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ await self.close()
+
+
+class DatagramSocket(Socket): # pragma: no cover
+ async def sendto(self, what, destination, timeout):
+ raise NotImplementedError
+
+ async def recvfrom(self, size, timeout):
+ raise NotImplementedError
+
+
+class StreamSocket(Socket): # pragma: no cover
+ async def sendall(self, what, timeout):
+ raise NotImplementedError
+
+ async def recv(self, size, timeout):
+ raise NotImplementedError
+
+
+class NullTransport:
+ async def connect_tcp(self, host, port, timeout, local_address):
+ raise NotImplementedError
+
+
+class Backend: # pragma: no cover
+ def name(self) -> str:
+ return "unknown"
+
+ async def make_socket(
+ self,
+ af,
+ socktype,
+ proto=0,
+ source=None,
+ destination=None,
+ timeout=None,
+ ssl_context=None,
+ server_hostname=None,
+ ):
+ raise NotImplementedError
+
+ def datagram_connection_required(self):
+ return False
+
+ async def sleep(self, interval):
+ raise NotImplementedError
+
+ def get_transport_class(self):
+ raise NotImplementedError
+
+ async def wait_for(self, awaitable, timeout):
+ raise NotImplementedError
diff --git a/tapdown/lib/python3.11/site-packages/dns/_asyncio_backend.py b/tapdown/lib/python3.11/site-packages/dns/_asyncio_backend.py
new file mode 100644
index 0000000..303908c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_asyncio_backend.py
@@ -0,0 +1,276 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+"""asyncio library query support"""
+
+import asyncio
+import socket
+import sys
+
+import dns._asyncbackend
+import dns._features
+import dns.exception
+import dns.inet
+
+_is_win32 = sys.platform == "win32"
+
+
+def _get_running_loop():
+ try:
+ return asyncio.get_running_loop()
+ except AttributeError: # pragma: no cover
+ return asyncio.get_event_loop()
+
+
+class _DatagramProtocol:
+ def __init__(self):
+ self.transport = None
+ self.recvfrom = None
+
+ def connection_made(self, transport):
+ self.transport = transport
+
+ def datagram_received(self, data, addr):
+ if self.recvfrom and not self.recvfrom.done():
+ self.recvfrom.set_result((data, addr))
+
+ def error_received(self, exc): # pragma: no cover
+ if self.recvfrom and not self.recvfrom.done():
+ self.recvfrom.set_exception(exc)
+
+ def connection_lost(self, exc):
+ if self.recvfrom and not self.recvfrom.done():
+ if exc is None:
+ # EOF we triggered. Is there a better way to do this?
+ try:
+ raise EOFError("EOF")
+ except EOFError as e:
+ self.recvfrom.set_exception(e)
+ else:
+ self.recvfrom.set_exception(exc)
+
+ def close(self):
+ if self.transport is not None:
+ self.transport.close()
+
+
+async def _maybe_wait_for(awaitable, timeout):
+ if timeout is not None:
+ try:
+ return await asyncio.wait_for(awaitable, timeout)
+ except asyncio.TimeoutError:
+ raise dns.exception.Timeout(timeout=timeout)
+ else:
+ return await awaitable
+
+
+class DatagramSocket(dns._asyncbackend.DatagramSocket):
+ def __init__(self, family, transport, protocol):
+ super().__init__(family, socket.SOCK_DGRAM)
+ self.transport = transport
+ self.protocol = protocol
+
+ async def sendto(self, what, destination, timeout): # pragma: no cover
+ # no timeout for asyncio sendto
+ self.transport.sendto(what, destination)
+ return len(what)
+
+ async def recvfrom(self, size, timeout):
+ # ignore size as there's no way I know to tell protocol about it
+ done = _get_running_loop().create_future()
+ try:
+ assert self.protocol.recvfrom is None
+ self.protocol.recvfrom = done
+ await _maybe_wait_for(done, timeout)
+ return done.result()
+ finally:
+ self.protocol.recvfrom = None
+
+ async def close(self):
+ self.protocol.close()
+
+ async def getpeername(self):
+ return self.transport.get_extra_info("peername")
+
+ async def getsockname(self):
+ return self.transport.get_extra_info("sockname")
+
+ async def getpeercert(self, timeout):
+ raise NotImplementedError
+
+
+class StreamSocket(dns._asyncbackend.StreamSocket):
+ def __init__(self, af, reader, writer):
+ super().__init__(af, socket.SOCK_STREAM)
+ self.reader = reader
+ self.writer = writer
+
+ async def sendall(self, what, timeout):
+ self.writer.write(what)
+ return await _maybe_wait_for(self.writer.drain(), timeout)
+
+ async def recv(self, size, timeout):
+ return await _maybe_wait_for(self.reader.read(size), timeout)
+
+ async def close(self):
+ self.writer.close()
+
+ async def getpeername(self):
+ return self.writer.get_extra_info("peername")
+
+ async def getsockname(self):
+ return self.writer.get_extra_info("sockname")
+
+ async def getpeercert(self, timeout):
+ return self.writer.get_extra_info("peercert")
+
+
+if dns._features.have("doh"):
+ import anyio
+ import httpcore
+ import httpcore._backends.anyio
+ import httpx
+
+ _CoreAsyncNetworkBackend = httpcore.AsyncNetworkBackend
+ _CoreAnyIOStream = httpcore._backends.anyio.AnyIOStream # pyright: ignore
+
+ from dns.query import _compute_times, _expiration_for_this_attempt, _remaining
+
+ class _NetworkBackend(_CoreAsyncNetworkBackend):
+ def __init__(self, resolver, local_port, bootstrap_address, family):
+ super().__init__()
+ self._local_port = local_port
+ self._resolver = resolver
+ self._bootstrap_address = bootstrap_address
+ self._family = family
+ if local_port != 0:
+ raise NotImplementedError(
+ "the asyncio transport for HTTPX cannot set the local port"
+ )
+
+ async def connect_tcp(
+ self, host, port, timeout=None, local_address=None, socket_options=None
+ ): # pylint: disable=signature-differs
+ addresses = []
+ _, expiration = _compute_times(timeout)
+ if dns.inet.is_address(host):
+ addresses.append(host)
+ elif self._bootstrap_address is not None:
+ addresses.append(self._bootstrap_address)
+ else:
+ timeout = _remaining(expiration)
+ family = self._family
+ if local_address:
+ family = dns.inet.af_for_address(local_address)
+ answers = await self._resolver.resolve_name(
+ host, family=family, lifetime=timeout
+ )
+ addresses = answers.addresses()
+ for address in addresses:
+ try:
+ attempt_expiration = _expiration_for_this_attempt(2.0, expiration)
+ timeout = _remaining(attempt_expiration)
+ with anyio.fail_after(timeout):
+ stream = await anyio.connect_tcp(
+ remote_host=address,
+ remote_port=port,
+ local_host=local_address,
+ )
+ return _CoreAnyIOStream(stream)
+ except Exception:
+ pass
+ raise httpcore.ConnectError
+
+ async def connect_unix_socket(
+ self, path, timeout=None, socket_options=None
+ ): # pylint: disable=signature-differs
+ raise NotImplementedError
+
+ async def sleep(self, seconds): # pylint: disable=signature-differs
+ await anyio.sleep(seconds)
+
+ class _HTTPTransport(httpx.AsyncHTTPTransport):
+ def __init__(
+ self,
+ *args,
+ local_port=0,
+ bootstrap_address=None,
+ resolver=None,
+ family=socket.AF_UNSPEC,
+ **kwargs,
+ ):
+ if resolver is None and bootstrap_address is None:
+ # pylint: disable=import-outside-toplevel,redefined-outer-name
+ import dns.asyncresolver
+
+ resolver = dns.asyncresolver.Resolver()
+ super().__init__(*args, **kwargs)
+ self._pool._network_backend = _NetworkBackend(
+ resolver, local_port, bootstrap_address, family
+ )
+
+else:
+ _HTTPTransport = dns._asyncbackend.NullTransport # type: ignore
+
+
+class Backend(dns._asyncbackend.Backend):
+ def name(self):
+ return "asyncio"
+
+ async def make_socket(
+ self,
+ af,
+ socktype,
+ proto=0,
+ source=None,
+ destination=None,
+ timeout=None,
+ ssl_context=None,
+ server_hostname=None,
+ ):
+ loop = _get_running_loop()
+ if socktype == socket.SOCK_DGRAM:
+ if _is_win32 and source is None:
+ # Win32 wants explicit binding before recvfrom(). This is the
+ # proper fix for [#637].
+ source = (dns.inet.any_for_af(af), 0)
+ transport, protocol = await loop.create_datagram_endpoint(
+ _DatagramProtocol, # pyright: ignore
+ source,
+ family=af,
+ proto=proto,
+ remote_addr=destination,
+ )
+ return DatagramSocket(af, transport, protocol)
+ elif socktype == socket.SOCK_STREAM:
+ if destination is None:
+ # This shouldn't happen, but we check to make code analysis software
+ # happier.
+ raise ValueError("destination required for stream sockets")
+ (r, w) = await _maybe_wait_for(
+ asyncio.open_connection(
+ destination[0],
+ destination[1],
+ ssl=ssl_context,
+ family=af,
+ proto=proto,
+ local_addr=source,
+ server_hostname=server_hostname,
+ ),
+ timeout,
+ )
+ return StreamSocket(af, r, w)
+ raise NotImplementedError(
+ "unsupported socket " + f"type {socktype}"
+ ) # pragma: no cover
+
+ async def sleep(self, interval):
+ await asyncio.sleep(interval)
+
+ def datagram_connection_required(self):
+ return False
+
+ def get_transport_class(self):
+ return _HTTPTransport
+
+ async def wait_for(self, awaitable, timeout):
+ return await _maybe_wait_for(awaitable, timeout)
diff --git a/tapdown/lib/python3.11/site-packages/dns/_ddr.py b/tapdown/lib/python3.11/site-packages/dns/_ddr.py
new file mode 100644
index 0000000..bf5c11e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_ddr.py
@@ -0,0 +1,154 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+#
+# Support for Discovery of Designated Resolvers
+
+import socket
+import time
+from urllib.parse import urlparse
+
+import dns.asyncbackend
+import dns.inet
+import dns.name
+import dns.nameserver
+import dns.query
+import dns.rdtypes.svcbbase
+
+# The special name of the local resolver when using DDR
+_local_resolver_name = dns.name.from_text("_dns.resolver.arpa")
+
+
+#
+# Processing is split up into I/O independent and I/O dependent parts to
+# make supporting sync and async versions easy.
+#
+
+
+class _SVCBInfo:
+ def __init__(self, bootstrap_address, port, hostname, nameservers):
+ self.bootstrap_address = bootstrap_address
+ self.port = port
+ self.hostname = hostname
+ self.nameservers = nameservers
+
+ def ddr_check_certificate(self, cert):
+ """Verify that the _SVCBInfo's address is in the cert's subjectAltName (SAN)"""
+ for name, value in cert["subjectAltName"]:
+ if name == "IP Address" and value == self.bootstrap_address:
+ return True
+ return False
+
+ def make_tls_context(self):
+ ssl = dns.query.ssl
+ ctx = ssl.create_default_context()
+ ctx.minimum_version = ssl.TLSVersion.TLSv1_2
+ return ctx
+
+ def ddr_tls_check_sync(self, lifetime):
+ ctx = self.make_tls_context()
+ expiration = time.time() + lifetime
+ with socket.create_connection(
+ (self.bootstrap_address, self.port), lifetime
+ ) as s:
+ with ctx.wrap_socket(s, server_hostname=self.hostname) as ts:
+ ts.settimeout(dns.query._remaining(expiration))
+ ts.do_handshake()
+ cert = ts.getpeercert()
+ return self.ddr_check_certificate(cert)
+
+ async def ddr_tls_check_async(self, lifetime, backend=None):
+ if backend is None:
+ backend = dns.asyncbackend.get_default_backend()
+ ctx = self.make_tls_context()
+ expiration = time.time() + lifetime
+ async with await backend.make_socket(
+ dns.inet.af_for_address(self.bootstrap_address),
+ socket.SOCK_STREAM,
+ 0,
+ None,
+ (self.bootstrap_address, self.port),
+ lifetime,
+ ctx,
+ self.hostname,
+ ) as ts:
+ cert = await ts.getpeercert(dns.query._remaining(expiration))
+ return self.ddr_check_certificate(cert)
+
+
+def _extract_nameservers_from_svcb(answer):
+ bootstrap_address = answer.nameserver
+ if not dns.inet.is_address(bootstrap_address):
+ return []
+ infos = []
+ for rr in answer.rrset.processing_order():
+ nameservers = []
+ param = rr.params.get(dns.rdtypes.svcbbase.ParamKey.ALPN)
+ if param is None:
+ continue
+ alpns = set(param.ids)
+ host = rr.target.to_text(omit_final_dot=True)
+ port = None
+ param = rr.params.get(dns.rdtypes.svcbbase.ParamKey.PORT)
+ if param is not None:
+ port = param.port
+ # For now we ignore address hints and address resolution and always use the
+ # bootstrap address
+ if b"h2" in alpns:
+ param = rr.params.get(dns.rdtypes.svcbbase.ParamKey.DOHPATH)
+ if param is None or not param.value.endswith(b"{?dns}"):
+ continue
+ path = param.value[:-6].decode()
+ if not path.startswith("/"):
+ path = "/" + path
+ if port is None:
+ port = 443
+ url = f"https://{host}:{port}{path}"
+ # check the URL
+ try:
+ urlparse(url)
+ nameservers.append(dns.nameserver.DoHNameserver(url, bootstrap_address))
+ except Exception:
+ # continue processing other ALPN types
+ pass
+ if b"dot" in alpns:
+ if port is None:
+ port = 853
+ nameservers.append(
+ dns.nameserver.DoTNameserver(bootstrap_address, port, host)
+ )
+ if b"doq" in alpns:
+ if port is None:
+ port = 853
+ nameservers.append(
+ dns.nameserver.DoQNameserver(bootstrap_address, port, True, host)
+ )
+ if len(nameservers) > 0:
+ infos.append(_SVCBInfo(bootstrap_address, port, host, nameservers))
+ return infos
+
+
+def _get_nameservers_sync(answer, lifetime):
+ """Return a list of TLS-validated resolver nameservers extracted from an SVCB
+ answer."""
+ nameservers = []
+ infos = _extract_nameservers_from_svcb(answer)
+ for info in infos:
+ try:
+ if info.ddr_tls_check_sync(lifetime):
+ nameservers.extend(info.nameservers)
+ except Exception:
+ pass
+ return nameservers
+
+
+async def _get_nameservers_async(answer, lifetime):
+ """Return a list of TLS-validated resolver nameservers extracted from an SVCB
+ answer."""
+ nameservers = []
+ infos = _extract_nameservers_from_svcb(answer)
+ for info in infos:
+ try:
+ if await info.ddr_tls_check_async(lifetime):
+ nameservers.extend(info.nameservers)
+ except Exception:
+ pass
+ return nameservers
diff --git a/tapdown/lib/python3.11/site-packages/dns/_features.py b/tapdown/lib/python3.11/site-packages/dns/_features.py
new file mode 100644
index 0000000..65a9a2a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_features.py
@@ -0,0 +1,95 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import importlib.metadata
+import itertools
+import string
+from typing import Dict, List, Tuple
+
+
+def _tuple_from_text(version: str) -> Tuple:
+ text_parts = version.split(".")
+ int_parts = []
+ for text_part in text_parts:
+ digit_prefix = "".join(
+ itertools.takewhile(lambda x: x in string.digits, text_part)
+ )
+ try:
+ int_parts.append(int(digit_prefix))
+ except Exception:
+ break
+ return tuple(int_parts)
+
+
+def _version_check(
+ requirement: str,
+) -> bool:
+ """Is the requirement fulfilled?
+
+ The requirement must be of the form
+
+ package>=version
+ """
+ package, minimum = requirement.split(">=")
+ try:
+ version = importlib.metadata.version(package)
+ # This shouldn't happen, but it apparently can.
+ if version is None:
+ return False
+ except Exception:
+ return False
+ t_version = _tuple_from_text(version)
+ t_minimum = _tuple_from_text(minimum)
+ if t_version < t_minimum:
+ return False
+ return True
+
+
+_cache: Dict[str, bool] = {}
+
+
+def have(feature: str) -> bool:
+ """Is *feature* available?
+
+ This tests if all optional packages needed for the
+ feature are available and recent enough.
+
+ Returns ``True`` if the feature is available,
+ and ``False`` if it is not or if metadata is
+ missing.
+ """
+ value = _cache.get(feature)
+ if value is not None:
+ return value
+ requirements = _requirements.get(feature)
+ if requirements is None:
+ # we make a cache entry here for consistency not performance
+ _cache[feature] = False
+ return False
+ ok = True
+ for requirement in requirements:
+ if not _version_check(requirement):
+ ok = False
+ break
+ _cache[feature] = ok
+ return ok
+
+
+def force(feature: str, enabled: bool) -> None:
+ """Force the status of *feature* to be *enabled*.
+
+ This method is provided as a workaround for any cases
+ where importlib.metadata is ineffective, or for testing.
+ """
+ _cache[feature] = enabled
+
+
+_requirements: Dict[str, List[str]] = {
+ ### BEGIN generated requirements
+ "dnssec": ["cryptography>=45"],
+ "doh": ["httpcore>=1.0.0", "httpx>=0.28.0", "h2>=4.2.0"],
+ "doq": ["aioquic>=1.2.0"],
+ "idna": ["idna>=3.10"],
+ "trio": ["trio>=0.30"],
+ "wmi": ["wmi>=1.5.1"],
+ ### END generated requirements
+}
diff --git a/tapdown/lib/python3.11/site-packages/dns/_immutable_ctx.py b/tapdown/lib/python3.11/site-packages/dns/_immutable_ctx.py
new file mode 100644
index 0000000..b3d72de
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_immutable_ctx.py
@@ -0,0 +1,76 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# This implementation of the immutable decorator requires python >=
+# 3.7, and is significantly more storage efficient when making classes
+# with slots immutable. It's also faster.
+
+import contextvars
+import inspect
+
+_in__init__ = contextvars.ContextVar("_immutable_in__init__", default=False)
+
+
+class _Immutable:
+ """Immutable mixin class"""
+
+ # We set slots to the empty list to say "we don't have any attributes".
+ # We do this so that if we're mixed in with a class with __slots__, we
+ # don't cause a __dict__ to be added which would waste space.
+
+ __slots__ = ()
+
+ def __setattr__(self, name, value):
+ if _in__init__.get() is not self:
+ raise TypeError("object doesn't support attribute assignment")
+ else:
+ super().__setattr__(name, value)
+
+ def __delattr__(self, name):
+ if _in__init__.get() is not self:
+ raise TypeError("object doesn't support attribute assignment")
+ else:
+ super().__delattr__(name)
+
+
+def _immutable_init(f):
+ def nf(*args, **kwargs):
+ previous = _in__init__.set(args[0])
+ try:
+ # call the actual __init__
+ f(*args, **kwargs)
+ finally:
+ _in__init__.reset(previous)
+
+ nf.__signature__ = inspect.signature(f) # pyright: ignore
+ return nf
+
+
+def immutable(cls):
+ if _Immutable in cls.__mro__:
+ # Some ancestor already has the mixin, so just make sure we keep
+ # following the __init__ protocol.
+ cls.__init__ = _immutable_init(cls.__init__)
+ if hasattr(cls, "__setstate__"):
+ cls.__setstate__ = _immutable_init(cls.__setstate__)
+ ncls = cls
+ else:
+ # Mixin the Immutable class and follow the __init__ protocol.
+ class ncls(_Immutable, cls):
+ # We have to do the __slots__ declaration here too!
+ __slots__ = ()
+
+ @_immutable_init
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ if hasattr(cls, "__setstate__"):
+
+ @_immutable_init
+ def __setstate__(self, *args, **kwargs):
+ super().__setstate__(*args, **kwargs)
+
+ # make ncls have the same name and module as cls
+ ncls.__name__ = cls.__name__
+ ncls.__qualname__ = cls.__qualname__
+ ncls.__module__ = cls.__module__
+ return ncls
diff --git a/tapdown/lib/python3.11/site-packages/dns/_no_ssl.py b/tapdown/lib/python3.11/site-packages/dns/_no_ssl.py
new file mode 100644
index 0000000..edb452d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_no_ssl.py
@@ -0,0 +1,61 @@
+import enum
+from typing import Any
+
+CERT_NONE = 0
+
+
+class TLSVersion(enum.IntEnum):
+ TLSv1_2 = 12
+
+
+class WantReadException(Exception):
+ pass
+
+
+class WantWriteException(Exception):
+ pass
+
+
+class SSLWantReadError(Exception):
+ pass
+
+
+class SSLWantWriteError(Exception):
+ pass
+
+
+class SSLContext:
+ def __init__(self) -> None:
+ self.minimum_version: Any = TLSVersion.TLSv1_2
+ self.check_hostname: bool = False
+ self.verify_mode: int = CERT_NONE
+
+ def wrap_socket(self, *args, **kwargs) -> "SSLSocket": # type: ignore
+ raise Exception("no ssl support") # pylint: disable=broad-exception-raised
+
+ def set_alpn_protocols(self, *args, **kwargs): # type: ignore
+ raise Exception("no ssl support") # pylint: disable=broad-exception-raised
+
+
+class SSLSocket:
+ def pending(self) -> bool:
+ raise Exception("no ssl support") # pylint: disable=broad-exception-raised
+
+ def do_handshake(self) -> None:
+ raise Exception("no ssl support") # pylint: disable=broad-exception-raised
+
+ def settimeout(self, value: Any) -> None:
+ pass
+
+ def getpeercert(self) -> Any:
+ raise Exception("no ssl support") # pylint: disable=broad-exception-raised
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ return False
+
+
+def create_default_context(*args, **kwargs) -> SSLContext: # type: ignore
+ raise Exception("no ssl support") # pylint: disable=broad-exception-raised
diff --git a/tapdown/lib/python3.11/site-packages/dns/_tls_util.py b/tapdown/lib/python3.11/site-packages/dns/_tls_util.py
new file mode 100644
index 0000000..10ddf72
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_tls_util.py
@@ -0,0 +1,19 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import os
+from typing import Tuple
+
+
+def convert_verify_to_cafile_and_capath(
+ verify: bool | str,
+) -> Tuple[str | None, str | None]:
+ cafile: str | None = None
+ capath: str | None = None
+ if isinstance(verify, str):
+ if os.path.isfile(verify):
+ cafile = verify
+ elif os.path.isdir(verify):
+ capath = verify
+ else:
+ raise ValueError("invalid verify string")
+ return cafile, capath
diff --git a/tapdown/lib/python3.11/site-packages/dns/_trio_backend.py b/tapdown/lib/python3.11/site-packages/dns/_trio_backend.py
new file mode 100644
index 0000000..bde7e8b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/_trio_backend.py
@@ -0,0 +1,255 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+"""trio async I/O library query support"""
+
+import socket
+
+import trio
+import trio.socket # type: ignore
+
+import dns._asyncbackend
+import dns._features
+import dns.exception
+import dns.inet
+
+if not dns._features.have("trio"):
+ raise ImportError("trio not found or too old")
+
+
+def _maybe_timeout(timeout):
+ if timeout is not None:
+ return trio.move_on_after(timeout)
+ else:
+ return dns._asyncbackend.NullContext()
+
+
+# for brevity
+_lltuple = dns.inet.low_level_address_tuple
+
+# pylint: disable=redefined-outer-name
+
+
+class DatagramSocket(dns._asyncbackend.DatagramSocket):
+ def __init__(self, sock):
+ super().__init__(sock.family, socket.SOCK_DGRAM)
+ self.socket = sock
+
+ async def sendto(self, what, destination, timeout):
+ with _maybe_timeout(timeout):
+ if destination is None:
+ return await self.socket.send(what)
+ else:
+ return await self.socket.sendto(what, destination)
+ raise dns.exception.Timeout(
+ timeout=timeout
+ ) # pragma: no cover lgtm[py/unreachable-statement]
+
+ async def recvfrom(self, size, timeout):
+ with _maybe_timeout(timeout):
+ return await self.socket.recvfrom(size)
+ raise dns.exception.Timeout(timeout=timeout) # lgtm[py/unreachable-statement]
+
+ async def close(self):
+ self.socket.close()
+
+ async def getpeername(self):
+ return self.socket.getpeername()
+
+ async def getsockname(self):
+ return self.socket.getsockname()
+
+ async def getpeercert(self, timeout):
+ raise NotImplementedError
+
+
+class StreamSocket(dns._asyncbackend.StreamSocket):
+ def __init__(self, family, stream, tls=False):
+ super().__init__(family, socket.SOCK_STREAM)
+ self.stream = stream
+ self.tls = tls
+
+ async def sendall(self, what, timeout):
+ with _maybe_timeout(timeout):
+ return await self.stream.send_all(what)
+ raise dns.exception.Timeout(timeout=timeout) # lgtm[py/unreachable-statement]
+
+ async def recv(self, size, timeout):
+ with _maybe_timeout(timeout):
+ return await self.stream.receive_some(size)
+ raise dns.exception.Timeout(timeout=timeout) # lgtm[py/unreachable-statement]
+
+ async def close(self):
+ await self.stream.aclose()
+
+ async def getpeername(self):
+ if self.tls:
+ return self.stream.transport_stream.socket.getpeername()
+ else:
+ return self.stream.socket.getpeername()
+
+ async def getsockname(self):
+ if self.tls:
+ return self.stream.transport_stream.socket.getsockname()
+ else:
+ return self.stream.socket.getsockname()
+
+ async def getpeercert(self, timeout):
+ if self.tls:
+ with _maybe_timeout(timeout):
+ await self.stream.do_handshake()
+ return self.stream.getpeercert()
+ else:
+ raise NotImplementedError
+
+
+if dns._features.have("doh"):
+ import httpcore
+ import httpcore._backends.trio
+ import httpx
+
+ _CoreAsyncNetworkBackend = httpcore.AsyncNetworkBackend
+ _CoreTrioStream = httpcore._backends.trio.TrioStream
+
+ from dns.query import _compute_times, _expiration_for_this_attempt, _remaining
+
+ class _NetworkBackend(_CoreAsyncNetworkBackend):
+ def __init__(self, resolver, local_port, bootstrap_address, family):
+ super().__init__()
+ self._local_port = local_port
+ self._resolver = resolver
+ self._bootstrap_address = bootstrap_address
+ self._family = family
+
+ async def connect_tcp(
+ self, host, port, timeout=None, local_address=None, socket_options=None
+ ): # pylint: disable=signature-differs
+ addresses = []
+ _, expiration = _compute_times(timeout)
+ if dns.inet.is_address(host):
+ addresses.append(host)
+ elif self._bootstrap_address is not None:
+ addresses.append(self._bootstrap_address)
+ else:
+ timeout = _remaining(expiration)
+ family = self._family
+ if local_address:
+ family = dns.inet.af_for_address(local_address)
+ answers = await self._resolver.resolve_name(
+ host, family=family, lifetime=timeout
+ )
+ addresses = answers.addresses()
+ for address in addresses:
+ try:
+ af = dns.inet.af_for_address(address)
+ if local_address is not None or self._local_port != 0:
+ source = (local_address, self._local_port)
+ else:
+ source = None
+ destination = (address, port)
+ attempt_expiration = _expiration_for_this_attempt(2.0, expiration)
+ timeout = _remaining(attempt_expiration)
+ sock = await Backend().make_socket(
+ af, socket.SOCK_STREAM, 0, source, destination, timeout
+ )
+ assert isinstance(sock, StreamSocket)
+ return _CoreTrioStream(sock.stream)
+ except Exception:
+ continue
+ raise httpcore.ConnectError
+
+ async def connect_unix_socket(
+ self, path, timeout=None, socket_options=None
+ ): # pylint: disable=signature-differs
+ raise NotImplementedError
+
+ async def sleep(self, seconds): # pylint: disable=signature-differs
+ await trio.sleep(seconds)
+
+ class _HTTPTransport(httpx.AsyncHTTPTransport):
+ def __init__(
+ self,
+ *args,
+ local_port=0,
+ bootstrap_address=None,
+ resolver=None,
+ family=socket.AF_UNSPEC,
+ **kwargs,
+ ):
+ if resolver is None and bootstrap_address is None:
+ # pylint: disable=import-outside-toplevel,redefined-outer-name
+ import dns.asyncresolver
+
+ resolver = dns.asyncresolver.Resolver()
+ super().__init__(*args, **kwargs)
+ self._pool._network_backend = _NetworkBackend(
+ resolver, local_port, bootstrap_address, family
+ )
+
+else:
+ _HTTPTransport = dns._asyncbackend.NullTransport # type: ignore
+
+
+class Backend(dns._asyncbackend.Backend):
+ def name(self):
+ return "trio"
+
+ async def make_socket(
+ self,
+ af,
+ socktype,
+ proto=0,
+ source=None,
+ destination=None,
+ timeout=None,
+ ssl_context=None,
+ server_hostname=None,
+ ):
+ s = trio.socket.socket(af, socktype, proto)
+ stream = None
+ try:
+ if source:
+ await s.bind(_lltuple(source, af))
+ if socktype == socket.SOCK_STREAM or destination is not None:
+ connected = False
+ with _maybe_timeout(timeout):
+ assert destination is not None
+ await s.connect(_lltuple(destination, af))
+ connected = True
+ if not connected:
+ raise dns.exception.Timeout(
+ timeout=timeout
+ ) # lgtm[py/unreachable-statement]
+ except Exception: # pragma: no cover
+ s.close()
+ raise
+ if socktype == socket.SOCK_DGRAM:
+ return DatagramSocket(s)
+ elif socktype == socket.SOCK_STREAM:
+ stream = trio.SocketStream(s)
+ tls = False
+ if ssl_context:
+ tls = True
+ try:
+ stream = trio.SSLStream(
+ stream, ssl_context, server_hostname=server_hostname
+ )
+ except Exception: # pragma: no cover
+ await stream.aclose()
+ raise
+ return StreamSocket(af, stream, tls)
+ raise NotImplementedError(
+ "unsupported socket " + f"type {socktype}"
+ ) # pragma: no cover
+
+ async def sleep(self, interval):
+ await trio.sleep(interval)
+
+ def get_transport_class(self):
+ return _HTTPTransport
+
+ async def wait_for(self, awaitable, timeout):
+ with _maybe_timeout(timeout):
+ return await awaitable
+ raise dns.exception.Timeout(
+ timeout=timeout
+ ) # pragma: no cover lgtm[py/unreachable-statement]
diff --git a/tapdown/lib/python3.11/site-packages/dns/asyncbackend.py b/tapdown/lib/python3.11/site-packages/dns/asyncbackend.py
new file mode 100644
index 0000000..0ec58b0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/asyncbackend.py
@@ -0,0 +1,101 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+from typing import Dict
+
+import dns.exception
+
+# pylint: disable=unused-import
+from dns._asyncbackend import ( # noqa: F401 lgtm[py/unused-import]
+ Backend,
+ DatagramSocket,
+ Socket,
+ StreamSocket,
+)
+
+# pylint: enable=unused-import
+
+_default_backend = None
+
+_backends: Dict[str, Backend] = {}
+
+# Allow sniffio import to be disabled for testing purposes
+_no_sniffio = False
+
+
+class AsyncLibraryNotFoundError(dns.exception.DNSException):
+ pass
+
+
+def get_backend(name: str) -> Backend:
+ """Get the specified asynchronous backend.
+
+ *name*, a ``str``, the name of the backend. Currently the "trio"
+ and "asyncio" backends are available.
+
+ Raises NotImplementedError if an unknown backend name is specified.
+ """
+ # pylint: disable=import-outside-toplevel,redefined-outer-name
+ backend = _backends.get(name)
+ if backend:
+ return backend
+ if name == "trio":
+ import dns._trio_backend
+
+ backend = dns._trio_backend.Backend()
+ elif name == "asyncio":
+ import dns._asyncio_backend
+
+ backend = dns._asyncio_backend.Backend()
+ else:
+ raise NotImplementedError(f"unimplemented async backend {name}")
+ _backends[name] = backend
+ return backend
+
+
+def sniff() -> str:
+ """Attempt to determine the in-use asynchronous I/O library by using
+ the ``sniffio`` module if it is available.
+
+ Returns the name of the library, or raises AsyncLibraryNotFoundError
+ if the library cannot be determined.
+ """
+ # pylint: disable=import-outside-toplevel
+ try:
+ if _no_sniffio:
+ raise ImportError
+ import sniffio
+
+ try:
+ return sniffio.current_async_library()
+ except sniffio.AsyncLibraryNotFoundError:
+ raise AsyncLibraryNotFoundError("sniffio cannot determine async library")
+ except ImportError:
+ import asyncio
+
+ try:
+ asyncio.get_running_loop()
+ return "asyncio"
+ except RuntimeError:
+ raise AsyncLibraryNotFoundError("no async library detected")
+
+
+def get_default_backend() -> Backend:
+ """Get the default backend, initializing it if necessary."""
+ if _default_backend:
+ return _default_backend
+
+ return set_default_backend(sniff())
+
+
+def set_default_backend(name: str) -> Backend:
+ """Set the default backend.
+
+ It's not normally necessary to call this method, as
+ ``get_default_backend()`` will initialize the backend
+ appropriately in many cases. If ``sniffio`` is not installed, or
+ in testing situations, this function allows the backend to be set
+ explicitly.
+ """
+ global _default_backend
+ _default_backend = get_backend(name)
+ return _default_backend
diff --git a/tapdown/lib/python3.11/site-packages/dns/asyncquery.py b/tapdown/lib/python3.11/site-packages/dns/asyncquery.py
new file mode 100644
index 0000000..bb77045
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/asyncquery.py
@@ -0,0 +1,953 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Talk to a DNS server."""
+
+import base64
+import contextlib
+import random
+import socket
+import struct
+import time
+import urllib.parse
+from typing import Any, Dict, Optional, Tuple, cast
+
+import dns.asyncbackend
+import dns.exception
+import dns.inet
+import dns.message
+import dns.name
+import dns.quic
+import dns.rdatatype
+import dns.transaction
+import dns.tsig
+import dns.xfr
+from dns._asyncbackend import NullContext
+from dns.query import (
+ BadResponse,
+ HTTPVersion,
+ NoDOH,
+ NoDOQ,
+ UDPMode,
+ _check_status,
+ _compute_times,
+ _matches_destination,
+ _remaining,
+ have_doh,
+ make_ssl_context,
+)
+
+try:
+ import ssl
+except ImportError:
+ import dns._no_ssl as ssl # type: ignore
+
+if have_doh:
+ import httpx
+
+# for brevity
+_lltuple = dns.inet.low_level_address_tuple
+
+
+def _source_tuple(af, address, port):
+ # Make a high level source tuple, or return None if address and port
+ # are both None
+ if address or port:
+ if address is None:
+ if af == socket.AF_INET:
+ address = "0.0.0.0"
+ elif af == socket.AF_INET6:
+ address = "::"
+ else:
+ raise NotImplementedError(f"unknown address family {af}")
+ return (address, port)
+ else:
+ return None
+
+
+def _timeout(expiration, now=None):
+ if expiration is not None:
+ if not now:
+ now = time.time()
+ return max(expiration - now, 0)
+ else:
+ return None
+
+
+async def send_udp(
+ sock: dns.asyncbackend.DatagramSocket,
+ what: dns.message.Message | bytes,
+ destination: Any,
+ expiration: float | None = None,
+) -> Tuple[int, float]:
+ """Send a DNS message to the specified UDP socket.
+
+ *sock*, a ``dns.asyncbackend.DatagramSocket``.
+
+ *what*, a ``bytes`` or ``dns.message.Message``, the message to send.
+
+ *destination*, a destination tuple appropriate for the address family
+ of the socket, specifying where to send the query.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur. The expiration value is meaningless for the asyncio backend, as
+ asyncio's transport sendto() never blocks.
+
+ Returns an ``(int, float)`` tuple of bytes sent and the sent time.
+ """
+
+ if isinstance(what, dns.message.Message):
+ what = what.to_wire()
+ sent_time = time.time()
+ n = await sock.sendto(what, destination, _timeout(expiration, sent_time))
+ return (n, sent_time)
+
+
+async def receive_udp(
+ sock: dns.asyncbackend.DatagramSocket,
+ destination: Any | None = None,
+ expiration: float | None = None,
+ ignore_unexpected: bool = False,
+ one_rr_per_rrset: bool = False,
+ keyring: Dict[dns.name.Name, dns.tsig.Key] | None = None,
+ request_mac: bytes | None = b"",
+ ignore_trailing: bool = False,
+ raise_on_truncation: bool = False,
+ ignore_errors: bool = False,
+ query: dns.message.Message | None = None,
+) -> Any:
+ """Read a DNS message from a UDP socket.
+
+ *sock*, a ``dns.asyncbackend.DatagramSocket``.
+
+ See :py:func:`dns.query.receive_udp()` for the documentation of the other
+ parameters, and exceptions.
+
+ Returns a ``(dns.message.Message, float, tuple)`` tuple of the received message, the
+ received time, and the address where the message arrived from.
+ """
+
+ wire = b""
+ while True:
+ (wire, from_address) = await sock.recvfrom(65535, _timeout(expiration))
+ if not _matches_destination(
+ sock.family, from_address, destination, ignore_unexpected
+ ):
+ continue
+ received_time = time.time()
+ try:
+ r = dns.message.from_wire(
+ wire,
+ keyring=keyring,
+ request_mac=request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ raise_on_truncation=raise_on_truncation,
+ )
+ except dns.message.Truncated as e:
+ # See the comment in query.py for details.
+ if (
+ ignore_errors
+ and query is not None
+ and not query.is_response(e.message())
+ ):
+ continue
+ else:
+ raise
+ except Exception:
+ if ignore_errors:
+ continue
+ else:
+ raise
+ if ignore_errors and query is not None and not query.is_response(r):
+ continue
+ return (r, received_time, from_address)
+
+
+async def udp(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 53,
+ source: str | None = None,
+ source_port: int = 0,
+ ignore_unexpected: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ raise_on_truncation: bool = False,
+ sock: dns.asyncbackend.DatagramSocket | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+ ignore_errors: bool = False,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via UDP.
+
+ *sock*, a ``dns.asyncbackend.DatagramSocket``, or ``None``,
+ the socket to use for the query. If ``None``, the default, a
+ socket is created. Note that if a socket is provided, the
+ *source*, *source_port*, and *backend* are ignored.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.query.udp()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+ wire = q.to_wire()
+ (begin_time, expiration) = _compute_times(timeout)
+ af = dns.inet.af_for_address(where)
+ destination = _lltuple((where, port), af)
+ if sock:
+ cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
+ else:
+ if not backend:
+ backend = dns.asyncbackend.get_default_backend()
+ stuple = _source_tuple(af, source, source_port)
+ if backend.datagram_connection_required():
+ dtuple = (where, port)
+ else:
+ dtuple = None
+ cm = await backend.make_socket(af, socket.SOCK_DGRAM, 0, stuple, dtuple)
+ async with cm as s:
+ await send_udp(s, wire, destination, expiration) # pyright: ignore
+ (r, received_time, _) = await receive_udp(
+ s, # pyright: ignore
+ destination,
+ expiration,
+ ignore_unexpected,
+ one_rr_per_rrset,
+ q.keyring,
+ q.mac,
+ ignore_trailing,
+ raise_on_truncation,
+ ignore_errors,
+ q,
+ )
+ r.time = received_time - begin_time
+ # We don't need to check q.is_response() if we are in ignore_errors mode
+ # as receive_udp() will have checked it.
+ if not (ignore_errors or q.is_response(r)):
+ raise BadResponse
+ return r
+
+
+async def udp_with_fallback(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 53,
+ source: str | None = None,
+ source_port: int = 0,
+ ignore_unexpected: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ udp_sock: dns.asyncbackend.DatagramSocket | None = None,
+ tcp_sock: dns.asyncbackend.StreamSocket | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+ ignore_errors: bool = False,
+) -> Tuple[dns.message.Message, bool]:
+ """Return the response to the query, trying UDP first and falling back
+ to TCP if UDP results in a truncated response.
+
+ *udp_sock*, a ``dns.asyncbackend.DatagramSocket``, or ``None``,
+ the socket to use for the UDP query. If ``None``, the default, a
+ socket is created. Note that if a socket is provided the *source*,
+ *source_port*, and *backend* are ignored for the UDP query.
+
+ *tcp_sock*, a ``dns.asyncbackend.StreamSocket``, or ``None``, the
+ socket to use for the TCP query. If ``None``, the default, a
+ socket is created. Note that if a socket is provided *where*,
+ *source*, *source_port*, and *backend* are ignored for the TCP query.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.query.udp_with_fallback()` for the documentation
+ of the other parameters, exceptions, and return type of this
+ method.
+ """
+ try:
+ response = await udp(
+ q,
+ where,
+ timeout,
+ port,
+ source,
+ source_port,
+ ignore_unexpected,
+ one_rr_per_rrset,
+ ignore_trailing,
+ True,
+ udp_sock,
+ backend,
+ ignore_errors,
+ )
+ return (response, False)
+ except dns.message.Truncated:
+ response = await tcp(
+ q,
+ where,
+ timeout,
+ port,
+ source,
+ source_port,
+ one_rr_per_rrset,
+ ignore_trailing,
+ tcp_sock,
+ backend,
+ )
+ return (response, True)
+
+
+async def send_tcp(
+ sock: dns.asyncbackend.StreamSocket,
+ what: dns.message.Message | bytes,
+ expiration: float | None = None,
+) -> Tuple[int, float]:
+ """Send a DNS message to the specified TCP socket.
+
+ *sock*, a ``dns.asyncbackend.StreamSocket``.
+
+ See :py:func:`dns.query.send_tcp()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+
+ if isinstance(what, dns.message.Message):
+ tcpmsg = what.to_wire(prepend_length=True)
+ else:
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = len(what).to_bytes(2, "big") + what
+ sent_time = time.time()
+ await sock.sendall(tcpmsg, _timeout(expiration, sent_time))
+ return (len(tcpmsg), sent_time)
+
+
+async def _read_exactly(sock, count, expiration):
+ """Read the specified number of bytes from stream. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ """
+ s = b""
+ while count > 0:
+ n = await sock.recv(count, _timeout(expiration))
+ if n == b"":
+ raise EOFError("EOF")
+ count = count - len(n)
+ s = s + n
+ return s
+
+
+async def receive_tcp(
+ sock: dns.asyncbackend.StreamSocket,
+ expiration: float | None = None,
+ one_rr_per_rrset: bool = False,
+ keyring: Dict[dns.name.Name, dns.tsig.Key] | None = None,
+ request_mac: bytes | None = b"",
+ ignore_trailing: bool = False,
+) -> Tuple[dns.message.Message, float]:
+ """Read a DNS message from a TCP socket.
+
+ *sock*, a ``dns.asyncbackend.StreamSocket``.
+
+ See :py:func:`dns.query.receive_tcp()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+
+ ldata = await _read_exactly(sock, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = await _read_exactly(sock, l, expiration)
+ received_time = time.time()
+ r = dns.message.from_wire(
+ wire,
+ keyring=keyring,
+ request_mac=request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ return (r, received_time)
+
+
+async def tcp(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 53,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ sock: dns.asyncbackend.StreamSocket | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via TCP.
+
+ *sock*, a ``dns.asyncbacket.StreamSocket``, or ``None``, the
+ socket to use for the query. If ``None``, the default, a socket
+ is created. Note that if a socket is provided
+ *where*, *port*, *source*, *source_port*, and *backend* are ignored.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.query.tcp()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+
+ wire = q.to_wire()
+ (begin_time, expiration) = _compute_times(timeout)
+ if sock:
+ # Verify that the socket is connected, as if it's not connected,
+ # it's not writable, and the polling in send_tcp() will time out or
+ # hang forever.
+ await sock.getpeername()
+ cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
+ else:
+ # These are simple (address, port) pairs, not family-dependent tuples
+ # you pass to low-level socket code.
+ af = dns.inet.af_for_address(where)
+ stuple = _source_tuple(af, source, source_port)
+ dtuple = (where, port)
+ if not backend:
+ backend = dns.asyncbackend.get_default_backend()
+ cm = await backend.make_socket(
+ af, socket.SOCK_STREAM, 0, stuple, dtuple, timeout
+ )
+ async with cm as s:
+ await send_tcp(s, wire, expiration) # pyright: ignore
+ (r, received_time) = await receive_tcp(
+ s, # pyright: ignore
+ expiration,
+ one_rr_per_rrset,
+ q.keyring,
+ q.mac,
+ ignore_trailing,
+ )
+ r.time = received_time - begin_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+async def tls(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 853,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ sock: dns.asyncbackend.StreamSocket | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+ ssl_context: ssl.SSLContext | None = None,
+ server_hostname: str | None = None,
+ verify: bool | str = True,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via TLS.
+
+ *sock*, an ``asyncbackend.StreamSocket``, or ``None``, the socket
+ to use for the query. If ``None``, the default, a socket is
+ created. Note that if a socket is provided, it must be a
+ connected SSL stream socket, and *where*, *port*,
+ *source*, *source_port*, *backend*, *ssl_context*, and *server_hostname*
+ are ignored.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.query.tls()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+ (begin_time, expiration) = _compute_times(timeout)
+ if sock:
+ cm: contextlib.AbstractAsyncContextManager = NullContext(sock)
+ else:
+ if ssl_context is None:
+ ssl_context = make_ssl_context(verify, server_hostname is not None, ["dot"])
+ af = dns.inet.af_for_address(where)
+ stuple = _source_tuple(af, source, source_port)
+ dtuple = (where, port)
+ if not backend:
+ backend = dns.asyncbackend.get_default_backend()
+ cm = await backend.make_socket(
+ af,
+ socket.SOCK_STREAM,
+ 0,
+ stuple,
+ dtuple,
+ timeout,
+ ssl_context,
+ server_hostname,
+ )
+ async with cm as s:
+ timeout = _timeout(expiration)
+ response = await tcp(
+ q,
+ where,
+ timeout,
+ port,
+ source,
+ source_port,
+ one_rr_per_rrset,
+ ignore_trailing,
+ s,
+ backend,
+ )
+ end_time = time.time()
+ response.time = end_time - begin_time
+ return response
+
+
+def _maybe_get_resolver(
+ resolver: Optional["dns.asyncresolver.Resolver"], # pyright: ignore
+) -> "dns.asyncresolver.Resolver": # pyright: ignore
+ # We need a separate method for this to avoid overriding the global
+ # variable "dns" with the as-yet undefined local variable "dns"
+ # in https().
+ if resolver is None:
+ # pylint: disable=import-outside-toplevel,redefined-outer-name
+ import dns.asyncresolver
+
+ resolver = dns.asyncresolver.Resolver()
+ return resolver
+
+
+async def https(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 443,
+ source: str | None = None,
+ source_port: int = 0, # pylint: disable=W0613
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ client: Optional["httpx.AsyncClient|dns.quic.AsyncQuicConnection"] = None,
+ path: str = "/dns-query",
+ post: bool = True,
+ verify: bool | str | ssl.SSLContext = True,
+ bootstrap_address: str | None = None,
+ resolver: Optional["dns.asyncresolver.Resolver"] = None, # pyright: ignore
+ family: int = socket.AF_UNSPEC,
+ http_version: HTTPVersion = HTTPVersion.DEFAULT,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via DNS-over-HTTPS.
+
+ *client*, a ``httpx.AsyncClient``. If provided, the client to use for
+ the query.
+
+ Unlike the other dnspython async functions, a backend cannot be provided
+ in this function because httpx always auto-detects the async backend.
+
+ See :py:func:`dns.query.https()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+
+ try:
+ af = dns.inet.af_for_address(where)
+ except ValueError:
+ af = None
+ # we bind url and then override as pyright can't figure out all paths bind.
+ url = where
+ if af is not None and dns.inet.is_address(where):
+ if af == socket.AF_INET:
+ url = f"https://{where}:{port}{path}"
+ elif af == socket.AF_INET6:
+ url = f"https://[{where}]:{port}{path}"
+
+ extensions = {}
+ if bootstrap_address is None:
+ # pylint: disable=possibly-used-before-assignment
+ parsed = urllib.parse.urlparse(url)
+ if parsed.hostname is None:
+ raise ValueError("no hostname in URL")
+ if dns.inet.is_address(parsed.hostname):
+ bootstrap_address = parsed.hostname
+ extensions["sni_hostname"] = parsed.hostname
+ if parsed.port is not None:
+ port = parsed.port
+
+ if http_version == HTTPVersion.H3 or (
+ http_version == HTTPVersion.DEFAULT and not have_doh
+ ):
+ if bootstrap_address is None:
+ resolver = _maybe_get_resolver(resolver)
+ assert parsed.hostname is not None # pyright: ignore
+ answers = await resolver.resolve_name( # pyright: ignore
+ parsed.hostname, family # pyright: ignore
+ )
+ bootstrap_address = random.choice(list(answers.addresses()))
+ if client and not isinstance(
+ client, dns.quic.AsyncQuicConnection
+ ): # pyright: ignore
+ raise ValueError("client parameter must be a dns.quic.AsyncQuicConnection.")
+ assert client is None or isinstance(client, dns.quic.AsyncQuicConnection)
+ return await _http3(
+ q,
+ bootstrap_address,
+ url,
+ timeout,
+ port,
+ source,
+ source_port,
+ one_rr_per_rrset,
+ ignore_trailing,
+ verify=verify,
+ post=post,
+ connection=client,
+ )
+
+ if not have_doh:
+ raise NoDOH # pragma: no cover
+ # pylint: disable=possibly-used-before-assignment
+ if client and not isinstance(client, httpx.AsyncClient): # pyright: ignore
+ raise ValueError("client parameter must be an httpx.AsyncClient")
+ # pylint: enable=possibly-used-before-assignment
+
+ wire = q.to_wire()
+ headers = {"accept": "application/dns-message"}
+
+ h1 = http_version in (HTTPVersion.H1, HTTPVersion.DEFAULT)
+ h2 = http_version in (HTTPVersion.H2, HTTPVersion.DEFAULT)
+
+ backend = dns.asyncbackend.get_default_backend()
+
+ if source is None:
+ local_address = None
+ local_port = 0
+ else:
+ local_address = source
+ local_port = source_port
+
+ if client:
+ cm: contextlib.AbstractAsyncContextManager = NullContext(client)
+ else:
+ transport = backend.get_transport_class()(
+ local_address=local_address,
+ http1=h1,
+ http2=h2,
+ verify=verify,
+ local_port=local_port,
+ bootstrap_address=bootstrap_address,
+ resolver=resolver,
+ family=family,
+ )
+
+ cm = httpx.AsyncClient( # pyright: ignore
+ http1=h1, http2=h2, verify=verify, transport=transport # type: ignore
+ )
+
+ async with cm as the_client:
+ # see https://tools.ietf.org/html/rfc8484#section-4.1.1 for DoH
+ # GET and POST examples
+ if post:
+ headers.update(
+ {
+ "content-type": "application/dns-message",
+ "content-length": str(len(wire)),
+ }
+ )
+ response = await backend.wait_for(
+ the_client.post( # pyright: ignore
+ url,
+ headers=headers,
+ content=wire,
+ extensions=extensions,
+ ),
+ timeout,
+ )
+ else:
+ wire = base64.urlsafe_b64encode(wire).rstrip(b"=")
+ twire = wire.decode() # httpx does a repr() if we give it bytes
+ response = await backend.wait_for(
+ the_client.get( # pyright: ignore
+ url,
+ headers=headers,
+ params={"dns": twire},
+ extensions=extensions,
+ ),
+ timeout,
+ )
+
+ # see https://tools.ietf.org/html/rfc8484#section-4.2.1 for info about DoH
+ # status codes
+ if response.status_code < 200 or response.status_code > 299:
+ raise ValueError(
+ f"{where} responded with status code {response.status_code}"
+ f"\nResponse body: {response.content!r}"
+ )
+ r = dns.message.from_wire(
+ response.content,
+ keyring=q.keyring,
+ request_mac=q.request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ r.time = response.elapsed.total_seconds()
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+async def _http3(
+ q: dns.message.Message,
+ where: str,
+ url: str,
+ timeout: float | None = None,
+ port: int = 443,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ verify: bool | str | ssl.SSLContext = True,
+ backend: dns.asyncbackend.Backend | None = None,
+ post: bool = True,
+ connection: dns.quic.AsyncQuicConnection | None = None,
+) -> dns.message.Message:
+ if not dns.quic.have_quic:
+ raise NoDOH("DNS-over-HTTP3 is not available.") # pragma: no cover
+
+ url_parts = urllib.parse.urlparse(url)
+ hostname = url_parts.hostname
+ assert hostname is not None
+ if url_parts.port is not None:
+ port = url_parts.port
+
+ q.id = 0
+ wire = q.to_wire()
+ the_connection: dns.quic.AsyncQuicConnection
+ if connection:
+ cfactory = dns.quic.null_factory
+ mfactory = dns.quic.null_factory
+ else:
+ (cfactory, mfactory) = dns.quic.factories_for_backend(backend)
+
+ async with cfactory() as context:
+ async with mfactory(
+ context, verify_mode=verify, server_name=hostname, h3=True
+ ) as the_manager:
+ if connection:
+ the_connection = connection
+ else:
+ the_connection = the_manager.connect( # pyright: ignore
+ where, port, source, source_port
+ )
+ (start, expiration) = _compute_times(timeout)
+ stream = await the_connection.make_stream(timeout) # pyright: ignore
+ async with stream:
+ # note that send_h3() does not need await
+ stream.send_h3(url, wire, post)
+ wire = await stream.receive(_remaining(expiration))
+ _check_status(stream.headers(), where, wire)
+ finish = time.time()
+ r = dns.message.from_wire(
+ wire,
+ keyring=q.keyring,
+ request_mac=q.request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ r.time = max(finish - start, 0.0)
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+async def quic(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 853,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ connection: dns.quic.AsyncQuicConnection | None = None,
+ verify: bool | str = True,
+ backend: dns.asyncbackend.Backend | None = None,
+ hostname: str | None = None,
+ server_hostname: str | None = None,
+) -> dns.message.Message:
+ """Return the response obtained after sending an asynchronous query via
+ DNS-over-QUIC.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.query.quic()` for the documentation of the other
+ parameters, exceptions, and return type of this method.
+ """
+
+ if not dns.quic.have_quic:
+ raise NoDOQ("DNS-over-QUIC is not available.") # pragma: no cover
+
+ if server_hostname is not None and hostname is None:
+ hostname = server_hostname
+
+ q.id = 0
+ wire = q.to_wire()
+ the_connection: dns.quic.AsyncQuicConnection
+ if connection:
+ cfactory = dns.quic.null_factory
+ mfactory = dns.quic.null_factory
+ the_connection = connection
+ else:
+ (cfactory, mfactory) = dns.quic.factories_for_backend(backend)
+
+ async with cfactory() as context:
+ async with mfactory(
+ context,
+ verify_mode=verify,
+ server_name=server_hostname,
+ ) as the_manager:
+ if not connection:
+ the_connection = the_manager.connect( # pyright: ignore
+ where, port, source, source_port
+ )
+ (start, expiration) = _compute_times(timeout)
+ stream = await the_connection.make_stream(timeout) # pyright: ignore
+ async with stream:
+ await stream.send(wire, True)
+ wire = await stream.receive(_remaining(expiration))
+ finish = time.time()
+ r = dns.message.from_wire(
+ wire,
+ keyring=q.keyring,
+ request_mac=q.request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ r.time = max(finish - start, 0.0)
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+async def _inbound_xfr(
+ txn_manager: dns.transaction.TransactionManager,
+ s: dns.asyncbackend.Socket,
+ query: dns.message.Message,
+ serial: int | None,
+ timeout: float | None,
+ expiration: float,
+) -> Any:
+ """Given a socket, does the zone transfer."""
+ rdtype = query.question[0].rdtype
+ is_ixfr = rdtype == dns.rdatatype.IXFR
+ origin = txn_manager.from_wire_origin()
+ wire = query.to_wire()
+ is_udp = s.type == socket.SOCK_DGRAM
+ if is_udp:
+ udp_sock = cast(dns.asyncbackend.DatagramSocket, s)
+ await udp_sock.sendto(wire, None, _timeout(expiration))
+ else:
+ tcp_sock = cast(dns.asyncbackend.StreamSocket, s)
+ tcpmsg = struct.pack("!H", len(wire)) + wire
+ await tcp_sock.sendall(tcpmsg, expiration)
+ with dns.xfr.Inbound(txn_manager, rdtype, serial, is_udp) as inbound:
+ done = False
+ tsig_ctx = None
+ r: dns.message.Message | None = None
+ while not done:
+ (_, mexpiration) = _compute_times(timeout)
+ if mexpiration is None or (
+ expiration is not None and mexpiration > expiration
+ ):
+ mexpiration = expiration
+ if is_udp:
+ timeout = _timeout(mexpiration)
+ (rwire, _) = await udp_sock.recvfrom(65535, timeout) # pyright: ignore
+ else:
+ ldata = await _read_exactly(tcp_sock, 2, mexpiration) # pyright: ignore
+ (l,) = struct.unpack("!H", ldata)
+ rwire = await _read_exactly(tcp_sock, l, mexpiration) # pyright: ignore
+ r = dns.message.from_wire(
+ rwire,
+ keyring=query.keyring,
+ request_mac=query.mac,
+ xfr=True,
+ origin=origin,
+ tsig_ctx=tsig_ctx,
+ multi=(not is_udp),
+ one_rr_per_rrset=is_ixfr,
+ )
+ done = inbound.process_message(r)
+ yield r
+ tsig_ctx = r.tsig_ctx
+ if query.keyring and r is not None and not r.had_tsig:
+ raise dns.exception.FormError("missing TSIG")
+
+
+async def inbound_xfr(
+ where: str,
+ txn_manager: dns.transaction.TransactionManager,
+ query: dns.message.Message | None = None,
+ port: int = 53,
+ timeout: float | None = None,
+ lifetime: float | None = None,
+ source: str | None = None,
+ source_port: int = 0,
+ udp_mode: UDPMode = UDPMode.NEVER,
+ backend: dns.asyncbackend.Backend | None = None,
+) -> None:
+ """Conduct an inbound transfer and apply it via a transaction from the
+ txn_manager.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.query.inbound_xfr()` for the documentation of
+ the other parameters, exceptions, and return type of this method.
+ """
+ if query is None:
+ (query, serial) = dns.xfr.make_query(txn_manager)
+ else:
+ serial = dns.xfr.extract_serial_from_query(query)
+ af = dns.inet.af_for_address(where)
+ stuple = _source_tuple(af, source, source_port)
+ dtuple = (where, port)
+ if not backend:
+ backend = dns.asyncbackend.get_default_backend()
+ (_, expiration) = _compute_times(lifetime)
+ if query.question[0].rdtype == dns.rdatatype.IXFR and udp_mode != UDPMode.NEVER:
+ s = await backend.make_socket(
+ af, socket.SOCK_DGRAM, 0, stuple, dtuple, _timeout(expiration)
+ )
+ async with s:
+ try:
+ async for _ in _inbound_xfr( # pyright: ignore
+ txn_manager,
+ s,
+ query,
+ serial,
+ timeout,
+ expiration, # pyright: ignore
+ ):
+ pass
+ return
+ except dns.xfr.UseTCP:
+ if udp_mode == UDPMode.ONLY:
+ raise
+
+ s = await backend.make_socket(
+ af, socket.SOCK_STREAM, 0, stuple, dtuple, _timeout(expiration)
+ )
+ async with s:
+ async for _ in _inbound_xfr( # pyright: ignore
+ txn_manager, s, query, serial, timeout, expiration # pyright: ignore
+ ):
+ pass
diff --git a/tapdown/lib/python3.11/site-packages/dns/asyncresolver.py b/tapdown/lib/python3.11/site-packages/dns/asyncresolver.py
new file mode 100644
index 0000000..6f8c69f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/asyncresolver.py
@@ -0,0 +1,478 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Asynchronous DNS stub resolver."""
+
+import socket
+import time
+from typing import Any, Dict, List
+
+import dns._ddr
+import dns.asyncbackend
+import dns.asyncquery
+import dns.exception
+import dns.inet
+import dns.name
+import dns.nameserver
+import dns.query
+import dns.rdataclass
+import dns.rdatatype
+import dns.resolver # lgtm[py/import-and-import-from]
+import dns.reversename
+
+# import some resolver symbols for brevity
+from dns.resolver import NXDOMAIN, NoAnswer, NoRootSOA, NotAbsolute
+
+# for indentation purposes below
+_udp = dns.asyncquery.udp
+_tcp = dns.asyncquery.tcp
+
+
+class Resolver(dns.resolver.BaseResolver):
+ """Asynchronous DNS stub resolver."""
+
+ async def resolve(
+ self,
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ search: bool | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+ ) -> dns.resolver.Answer:
+ """Query nameservers asynchronously to find the answer to the question.
+
+ *backend*, a ``dns.asyncbackend.Backend``, or ``None``. If ``None``,
+ the default, then dnspython will use the default backend.
+
+ See :py:func:`dns.resolver.Resolver.resolve()` for the
+ documentation of the other parameters, exceptions, and return
+ type of this method.
+ """
+
+ resolution = dns.resolver._Resolution(
+ self, qname, rdtype, rdclass, tcp, raise_on_no_answer, search
+ )
+ if not backend:
+ backend = dns.asyncbackend.get_default_backend()
+ start = time.time()
+ while True:
+ (request, answer) = resolution.next_request()
+ # Note we need to say "if answer is not None" and not just
+ # "if answer" because answer implements __len__, and python
+ # will call that. We want to return if we have an answer
+ # object, including in cases where its length is 0.
+ if answer is not None:
+ # cache hit!
+ return answer
+ assert request is not None # needed for type checking
+ done = False
+ while not done:
+ (nameserver, tcp, backoff) = resolution.next_nameserver()
+ if backoff:
+ await backend.sleep(backoff)
+ timeout = self._compute_timeout(start, lifetime, resolution.errors)
+ try:
+ response = await nameserver.async_query(
+ request,
+ timeout=timeout,
+ source=source,
+ source_port=source_port,
+ max_size=tcp,
+ backend=backend,
+ )
+ except Exception as ex:
+ (_, done) = resolution.query_result(None, ex)
+ continue
+ (answer, done) = resolution.query_result(response, None)
+ # Note we need to say "if answer is not None" and not just
+ # "if answer" because answer implements __len__, and python
+ # will call that. We want to return if we have an answer
+ # object, including in cases where its length is 0.
+ if answer is not None:
+ return answer
+
+ async def resolve_address(
+ self, ipaddr: str, *args: Any, **kwargs: Any
+ ) -> dns.resolver.Answer:
+ """Use an asynchronous resolver to run a reverse query for PTR
+ records.
+
+ This utilizes the resolve() method to perform a PTR lookup on the
+ specified IP address.
+
+ *ipaddr*, a ``str``, the IPv4 or IPv6 address you want to get
+ the PTR record for.
+
+ All other arguments that can be passed to the resolve() function
+ except for rdtype and rdclass are also supported by this
+ function.
+
+ """
+ # We make a modified kwargs for type checking happiness, as otherwise
+ # we get a legit warning about possibly having rdtype and rdclass
+ # in the kwargs more than once.
+ modified_kwargs: Dict[str, Any] = {}
+ modified_kwargs.update(kwargs)
+ modified_kwargs["rdtype"] = dns.rdatatype.PTR
+ modified_kwargs["rdclass"] = dns.rdataclass.IN
+ return await self.resolve(
+ dns.reversename.from_address(ipaddr), *args, **modified_kwargs
+ )
+
+ async def resolve_name(
+ self,
+ name: dns.name.Name | str,
+ family: int = socket.AF_UNSPEC,
+ **kwargs: Any,
+ ) -> dns.resolver.HostAnswers:
+ """Use an asynchronous resolver to query for address records.
+
+ This utilizes the resolve() method to perform A and/or AAAA lookups on
+ the specified name.
+
+ *qname*, a ``dns.name.Name`` or ``str``, the name to resolve.
+
+ *family*, an ``int``, the address family. If socket.AF_UNSPEC
+ (the default), both A and AAAA records will be retrieved.
+
+ All other arguments that can be passed to the resolve() function
+ except for rdtype and rdclass are also supported by this
+ function.
+ """
+ # We make a modified kwargs for type checking happiness, as otherwise
+ # we get a legit warning about possibly having rdtype and rdclass
+ # in the kwargs more than once.
+ modified_kwargs: Dict[str, Any] = {}
+ modified_kwargs.update(kwargs)
+ modified_kwargs.pop("rdtype", None)
+ modified_kwargs["rdclass"] = dns.rdataclass.IN
+
+ if family == socket.AF_INET:
+ v4 = await self.resolve(name, dns.rdatatype.A, **modified_kwargs)
+ return dns.resolver.HostAnswers.make(v4=v4)
+ elif family == socket.AF_INET6:
+ v6 = await self.resolve(name, dns.rdatatype.AAAA, **modified_kwargs)
+ return dns.resolver.HostAnswers.make(v6=v6)
+ elif family != socket.AF_UNSPEC:
+ raise NotImplementedError(f"unknown address family {family}")
+
+ raise_on_no_answer = modified_kwargs.pop("raise_on_no_answer", True)
+ lifetime = modified_kwargs.pop("lifetime", None)
+ start = time.time()
+ v6 = await self.resolve(
+ name,
+ dns.rdatatype.AAAA,
+ raise_on_no_answer=False,
+ lifetime=self._compute_timeout(start, lifetime),
+ **modified_kwargs,
+ )
+ # Note that setting name ensures we query the same name
+ # for A as we did for AAAA. (This is just in case search lists
+ # are active by default in the resolver configuration and
+ # we might be talking to a server that says NXDOMAIN when it
+ # wants to say NOERROR no data.
+ name = v6.qname
+ v4 = await self.resolve(
+ name,
+ dns.rdatatype.A,
+ raise_on_no_answer=False,
+ lifetime=self._compute_timeout(start, lifetime),
+ **modified_kwargs,
+ )
+ answers = dns.resolver.HostAnswers.make(
+ v6=v6, v4=v4, add_empty=not raise_on_no_answer
+ )
+ if not answers:
+ raise NoAnswer(response=v6.response)
+ return answers
+
+ # pylint: disable=redefined-outer-name
+
+ async def canonical_name(self, name: dns.name.Name | str) -> dns.name.Name:
+ """Determine the canonical name of *name*.
+
+ The canonical name is the name the resolver uses for queries
+ after all CNAME and DNAME renamings have been applied.
+
+ *name*, a ``dns.name.Name`` or ``str``, the query name.
+
+ This method can raise any exception that ``resolve()`` can
+ raise, other than ``dns.resolver.NoAnswer`` and
+ ``dns.resolver.NXDOMAIN``.
+
+ Returns a ``dns.name.Name``.
+ """
+ try:
+ answer = await self.resolve(name, raise_on_no_answer=False)
+ canonical_name = answer.canonical_name
+ except dns.resolver.NXDOMAIN as e:
+ canonical_name = e.canonical_name
+ return canonical_name
+
+ async def try_ddr(self, lifetime: float = 5.0) -> None:
+ """Try to update the resolver's nameservers using Discovery of Designated
+ Resolvers (DDR). If successful, the resolver will subsequently use
+ DNS-over-HTTPS or DNS-over-TLS for future queries.
+
+ *lifetime*, a float, is the maximum time to spend attempting DDR. The default
+ is 5 seconds.
+
+ If the SVCB query is successful and results in a non-empty list of nameservers,
+ then the resolver's nameservers are set to the returned servers in priority
+ order.
+
+ The current implementation does not use any address hints from the SVCB record,
+ nor does it resolve addresses for the SCVB target name, rather it assumes that
+ the bootstrap nameserver will always be one of the addresses and uses it.
+ A future revision to the code may offer fuller support. The code verifies that
+ the bootstrap nameserver is in the Subject Alternative Name field of the
+ TLS certficate.
+ """
+ try:
+ expiration = time.time() + lifetime
+ answer = await self.resolve(
+ dns._ddr._local_resolver_name, "svcb", lifetime=lifetime
+ )
+ timeout = dns.query._remaining(expiration)
+ nameservers = await dns._ddr._get_nameservers_async(answer, timeout)
+ if len(nameservers) > 0:
+ self.nameservers = nameservers
+ except Exception:
+ pass
+
+
+default_resolver = None
+
+
+def get_default_resolver() -> Resolver:
+ """Get the default asynchronous resolver, initializing it if necessary."""
+ if default_resolver is None:
+ reset_default_resolver()
+ assert default_resolver is not None
+ return default_resolver
+
+
+def reset_default_resolver() -> None:
+ """Re-initialize default asynchronous resolver.
+
+ Note that the resolver configuration (i.e. /etc/resolv.conf on UNIX
+ systems) will be re-read immediately.
+ """
+
+ global default_resolver
+ default_resolver = Resolver()
+
+
+async def resolve(
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ search: bool | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+) -> dns.resolver.Answer:
+ """Query nameservers asynchronously to find the answer to the question.
+
+ This is a convenience function that uses the default resolver
+ object to make the query.
+
+ See :py:func:`dns.asyncresolver.Resolver.resolve` for more
+ information on the parameters.
+ """
+
+ return await get_default_resolver().resolve(
+ qname,
+ rdtype,
+ rdclass,
+ tcp,
+ source,
+ raise_on_no_answer,
+ source_port,
+ lifetime,
+ search,
+ backend,
+ )
+
+
+async def resolve_address(
+ ipaddr: str, *args: Any, **kwargs: Any
+) -> dns.resolver.Answer:
+ """Use a resolver to run a reverse query for PTR records.
+
+ See :py:func:`dns.asyncresolver.Resolver.resolve_address` for more
+ information on the parameters.
+ """
+
+ return await get_default_resolver().resolve_address(ipaddr, *args, **kwargs)
+
+
+async def resolve_name(
+ name: dns.name.Name | str, family: int = socket.AF_UNSPEC, **kwargs: Any
+) -> dns.resolver.HostAnswers:
+ """Use a resolver to asynchronously query for address records.
+
+ See :py:func:`dns.asyncresolver.Resolver.resolve_name` for more
+ information on the parameters.
+ """
+
+ return await get_default_resolver().resolve_name(name, family, **kwargs)
+
+
+async def canonical_name(name: dns.name.Name | str) -> dns.name.Name:
+ """Determine the canonical name of *name*.
+
+ See :py:func:`dns.resolver.Resolver.canonical_name` for more
+ information on the parameters and possible exceptions.
+ """
+
+ return await get_default_resolver().canonical_name(name)
+
+
+async def try_ddr(timeout: float = 5.0) -> None:
+ """Try to update the default resolver's nameservers using Discovery of Designated
+ Resolvers (DDR). If successful, the resolver will subsequently use
+ DNS-over-HTTPS or DNS-over-TLS for future queries.
+
+ See :py:func:`dns.resolver.Resolver.try_ddr` for more information.
+ """
+ return await get_default_resolver().try_ddr(timeout)
+
+
+async def zone_for_name(
+ name: dns.name.Name | str,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ tcp: bool = False,
+ resolver: Resolver | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+) -> dns.name.Name:
+ """Find the name of the zone which contains the specified name.
+
+ See :py:func:`dns.resolver.Resolver.zone_for_name` for more
+ information on the parameters and possible exceptions.
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, dns.name.root)
+ if resolver is None:
+ resolver = get_default_resolver()
+ if not name.is_absolute():
+ raise NotAbsolute(name)
+ while True:
+ try:
+ answer = await resolver.resolve(
+ name, dns.rdatatype.SOA, rdclass, tcp, backend=backend
+ )
+ assert answer.rrset is not None
+ if answer.rrset.name == name:
+ return name
+ # otherwise we were CNAMEd or DNAMEd and need to look higher
+ except (NXDOMAIN, NoAnswer):
+ pass
+ try:
+ name = name.parent()
+ except dns.name.NoParent: # pragma: no cover
+ raise NoRootSOA
+
+
+async def make_resolver_at(
+ where: dns.name.Name | str,
+ port: int = 53,
+ family: int = socket.AF_UNSPEC,
+ resolver: Resolver | None = None,
+) -> Resolver:
+ """Make a stub resolver using the specified destination as the full resolver.
+
+ *where*, a ``dns.name.Name`` or ``str`` the domain name or IP address of the
+ full resolver.
+
+ *port*, an ``int``, the port to use. If not specified, the default is 53.
+
+ *family*, an ``int``, the address family to use. This parameter is used if
+ *where* is not an address. The default is ``socket.AF_UNSPEC`` in which case
+ the first address returned by ``resolve_name()`` will be used, otherwise the
+ first address of the specified family will be used.
+
+ *resolver*, a ``dns.asyncresolver.Resolver`` or ``None``, the resolver to use for
+ resolution of hostnames. If not specified, the default resolver will be used.
+
+ Returns a ``dns.resolver.Resolver`` or raises an exception.
+ """
+ if resolver is None:
+ resolver = get_default_resolver()
+ nameservers: List[str | dns.nameserver.Nameserver] = []
+ if isinstance(where, str) and dns.inet.is_address(where):
+ nameservers.append(dns.nameserver.Do53Nameserver(where, port))
+ else:
+ answers = await resolver.resolve_name(where, family)
+ for address in answers.addresses():
+ nameservers.append(dns.nameserver.Do53Nameserver(address, port))
+ res = Resolver(configure=False)
+ res.nameservers = nameservers
+ return res
+
+
+async def resolve_at(
+ where: dns.name.Name | str,
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ search: bool | None = None,
+ backend: dns.asyncbackend.Backend | None = None,
+ port: int = 53,
+ family: int = socket.AF_UNSPEC,
+ resolver: Resolver | None = None,
+) -> dns.resolver.Answer:
+ """Query nameservers to find the answer to the question.
+
+ This is a convenience function that calls ``dns.asyncresolver.make_resolver_at()``
+ to make a resolver, and then uses it to resolve the query.
+
+ See ``dns.asyncresolver.Resolver.resolve`` for more information on the resolution
+ parameters, and ``dns.asyncresolver.make_resolver_at`` for information about the
+ resolver parameters *where*, *port*, *family*, and *resolver*.
+
+ If making more than one query, it is more efficient to call
+ ``dns.asyncresolver.make_resolver_at()`` and then use that resolver for the queries
+ instead of calling ``resolve_at()`` multiple times.
+ """
+ res = await make_resolver_at(where, port, family, resolver)
+ return await res.resolve(
+ qname,
+ rdtype,
+ rdclass,
+ tcp,
+ source,
+ raise_on_no_answer,
+ source_port,
+ lifetime,
+ search,
+ backend,
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/btree.py b/tapdown/lib/python3.11/site-packages/dns/btree.py
new file mode 100644
index 0000000..12da9f5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/btree.py
@@ -0,0 +1,850 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+"""
+A BTree in the style of Cormen, Leiserson, and Rivest's "Algorithms" book, with
+copy-on-write node updates, cursors, and optional space optimization for mostly-in-order
+insertion.
+"""
+
+from collections.abc import MutableMapping, MutableSet
+from typing import Any, Callable, Generic, Optional, Tuple, TypeVar, cast
+
+DEFAULT_T = 127
+
+KT = TypeVar("KT") # the type of a key in Element
+
+
+class Element(Generic[KT]):
+ """All items stored in the BTree are Elements."""
+
+ def key(self) -> KT:
+ """The key for this element; the returned type must implement comparison."""
+ raise NotImplementedError # pragma: no cover
+
+
+ET = TypeVar("ET", bound=Element) # the type of a value in a _KV
+
+
+def _MIN(t: int) -> int:
+ """The minimum number of keys in a non-root node for a BTree with the specified
+ ``t``
+ """
+ return t - 1
+
+
+def _MAX(t: int) -> int:
+ """The maximum number of keys in node for a BTree with the specified ``t``"""
+ return 2 * t - 1
+
+
+class _Creator:
+ """A _Creator class instance is used as a unique id for the BTree which created
+ a node.
+
+ We use a dedicated creator rather than just a BTree reference to avoid circularity
+ that would complicate GC.
+ """
+
+ def __str__(self): # pragma: no cover
+ return f"{id(self):x}"
+
+
+class _Node(Generic[KT, ET]):
+ """A Node in the BTree.
+
+ A Node (leaf or internal) of the BTree.
+ """
+
+ __slots__ = ["t", "creator", "is_leaf", "elts", "children"]
+
+ def __init__(self, t: int, creator: _Creator, is_leaf: bool):
+ assert t >= 3
+ self.t = t
+ self.creator = creator
+ self.is_leaf = is_leaf
+ self.elts: list[ET] = []
+ self.children: list[_Node[KT, ET]] = []
+
+ def is_maximal(self) -> bool:
+ """Does this node have the maximal number of keys?"""
+ assert len(self.elts) <= _MAX(self.t)
+ return len(self.elts) == _MAX(self.t)
+
+ def is_minimal(self) -> bool:
+ """Does this node have the minimal number of keys?"""
+ assert len(self.elts) >= _MIN(self.t)
+ return len(self.elts) == _MIN(self.t)
+
+ def search_in_node(self, key: KT) -> tuple[int, bool]:
+ """Get the index of the ``Element`` matching ``key`` or the index of its
+ least successor.
+
+ Returns a tuple of the index and an ``equal`` boolean that is ``True`` iff.
+ the key was found.
+ """
+ l = len(self.elts)
+ if l > 0 and key > self.elts[l - 1].key():
+ # This is optimizing near in-order insertion.
+ return l, False
+ l = 0
+ i = len(self.elts)
+ r = i - 1
+ equal = False
+ while l <= r:
+ m = (l + r) // 2
+ k = self.elts[m].key()
+ if key == k:
+ i = m
+ equal = True
+ break
+ elif key < k:
+ i = m
+ r = m - 1
+ else:
+ l = m + 1
+ return i, equal
+
+ def maybe_cow_child(self, index: int) -> "_Node[KT, ET]":
+ assert not self.is_leaf
+ child = self.children[index]
+ cloned = child.maybe_cow(self.creator)
+ if cloned:
+ self.children[index] = cloned
+ return cloned
+ else:
+ return child
+
+ def _get_node(self, key: KT) -> Tuple[Optional["_Node[KT, ET]"], int]:
+ """Get the node associated with key and its index, doing
+ copy-on-write if we have to descend.
+
+ Returns a tuple of the node and the index, or the tuple ``(None, 0)``
+ if the key was not found.
+ """
+ i, equal = self.search_in_node(key)
+ if equal:
+ return (self, i)
+ elif self.is_leaf:
+ return (None, 0)
+ else:
+ child = self.maybe_cow_child(i)
+ return child._get_node(key)
+
+ def get(self, key: KT) -> ET | None:
+ """Get the element associated with *key* or return ``None``"""
+ i, equal = self.search_in_node(key)
+ if equal:
+ return self.elts[i]
+ elif self.is_leaf:
+ return None
+ else:
+ return self.children[i].get(key)
+
+ def optimize_in_order_insertion(self, index: int) -> None:
+ """Try to minimize the number of Nodes in a BTree where the insertion
+ is done in-order or close to it, by stealing as much as we can from our
+ right sibling.
+
+ If we don't do this, then an in-order insertion will produce a BTree
+ where most of the nodes are minimal.
+ """
+ if index == 0:
+ return
+ left = self.children[index - 1]
+ if len(left.elts) == _MAX(self.t):
+ return
+ left = self.maybe_cow_child(index - 1)
+ while len(left.elts) < _MAX(self.t):
+ if not left.try_right_steal(self, index - 1):
+ break
+
+ def insert_nonfull(self, element: ET, in_order: bool) -> ET | None:
+ assert not self.is_maximal()
+ while True:
+ key = element.key()
+ i, equal = self.search_in_node(key)
+ if equal:
+ # replace
+ old = self.elts[i]
+ self.elts[i] = element
+ return old
+ elif self.is_leaf:
+ self.elts.insert(i, element)
+ return None
+ else:
+ child = self.maybe_cow_child(i)
+ if child.is_maximal():
+ self.adopt(*child.split())
+ # Splitting might result in our target moving to us, so
+ # search again.
+ continue
+ oelt = child.insert_nonfull(element, in_order)
+ if in_order:
+ self.optimize_in_order_insertion(i)
+ return oelt
+
+ def split(self) -> tuple["_Node[KT, ET]", ET, "_Node[KT, ET]"]:
+ """Split a maximal node into two minimal ones and a central element."""
+ assert self.is_maximal()
+ right = self.__class__(self.t, self.creator, self.is_leaf)
+ right.elts = list(self.elts[_MIN(self.t) + 1 :])
+ middle = self.elts[_MIN(self.t)]
+ self.elts = list(self.elts[: _MIN(self.t)])
+ if not self.is_leaf:
+ right.children = list(self.children[_MIN(self.t) + 1 :])
+ self.children = list(self.children[: _MIN(self.t) + 1])
+ return self, middle, right
+
+ def try_left_steal(self, parent: "_Node[KT, ET]", index: int) -> bool:
+ """Try to steal from this Node's left sibling for balancing purposes.
+
+ Returns ``True`` if the theft was successful, or ``False`` if not.
+ """
+ if index != 0:
+ left = parent.children[index - 1]
+ if not left.is_minimal():
+ left = parent.maybe_cow_child(index - 1)
+ elt = parent.elts[index - 1]
+ parent.elts[index - 1] = left.elts.pop()
+ self.elts.insert(0, elt)
+ if not left.is_leaf:
+ assert not self.is_leaf
+ child = left.children.pop()
+ self.children.insert(0, child)
+ return True
+ return False
+
+ def try_right_steal(self, parent: "_Node[KT, ET]", index: int) -> bool:
+ """Try to steal from this Node's right sibling for balancing purposes.
+
+ Returns ``True`` if the theft was successful, or ``False`` if not.
+ """
+ if index + 1 < len(parent.children):
+ right = parent.children[index + 1]
+ if not right.is_minimal():
+ right = parent.maybe_cow_child(index + 1)
+ elt = parent.elts[index]
+ parent.elts[index] = right.elts.pop(0)
+ self.elts.append(elt)
+ if not right.is_leaf:
+ assert not self.is_leaf
+ child = right.children.pop(0)
+ self.children.append(child)
+ return True
+ return False
+
+ def adopt(self, left: "_Node[KT, ET]", middle: ET, right: "_Node[KT, ET]") -> None:
+ """Adopt left, middle, and right into our Node (which must not be maximal,
+ and which must not be a leaf). In the case were we are not the new root,
+ then the left child must already be in the Node."""
+ assert not self.is_maximal()
+ assert not self.is_leaf
+ key = middle.key()
+ i, equal = self.search_in_node(key)
+ assert not equal
+ self.elts.insert(i, middle)
+ if len(self.children) == 0:
+ # We are the new root
+ self.children = [left, right]
+ else:
+ assert self.children[i] == left
+ self.children.insert(i + 1, right)
+
+ def merge(self, parent: "_Node[KT, ET]", index: int) -> None:
+ """Merge this node's parent and its right sibling into this node."""
+ right = parent.children.pop(index + 1)
+ self.elts.append(parent.elts.pop(index))
+ self.elts.extend(right.elts)
+ if not self.is_leaf:
+ self.children.extend(right.children)
+
+ def minimum(self) -> ET:
+ """The least element in this subtree."""
+ if self.is_leaf:
+ return self.elts[0]
+ else:
+ return self.children[0].minimum()
+
+ def maximum(self) -> ET:
+ """The greatest element in this subtree."""
+ if self.is_leaf:
+ return self.elts[-1]
+ else:
+ return self.children[-1].maximum()
+
+ def balance(self, parent: "_Node[KT, ET]", index: int) -> None:
+ """This Node is minimal, and we want to make it non-minimal so we can delete.
+ We try to steal from our siblings, and if that doesn't work we will merge
+ with one of them."""
+ assert not parent.is_leaf
+ if self.try_left_steal(parent, index):
+ return
+ if self.try_right_steal(parent, index):
+ return
+ # Stealing didn't work, so both siblings must be minimal.
+ if index == 0:
+ # We are the left-most node so merge with our right sibling.
+ self.merge(parent, index)
+ else:
+ # Have our left sibling merge with us. This lets us only have "merge right"
+ # code.
+ left = parent.maybe_cow_child(index - 1)
+ left.merge(parent, index - 1)
+
+ def delete(
+ self, key: KT, parent: Optional["_Node[KT, ET]"], exact: ET | None
+ ) -> ET | None:
+ """Delete an element matching *key* if it exists. If *exact* is not ``None``
+ then it must be an exact match with that element. The Node must not be
+ minimal unless it is the root."""
+ assert parent is None or not self.is_minimal()
+ i, equal = self.search_in_node(key)
+ original_key = None
+ if equal:
+ # Note we use "is" here as we meant "exactly this object".
+ if exact is not None and self.elts[i] is not exact:
+ raise ValueError("exact delete did not match existing elt")
+ if self.is_leaf:
+ return self.elts.pop(i)
+ # Note we need to ensure exact is None going forward as we've
+ # already checked exactness and are about to change our target key
+ # to the least successor.
+ exact = None
+ original_key = key
+ least_successor = self.children[i + 1].minimum()
+ key = least_successor.key()
+ i = i + 1
+ if self.is_leaf:
+ # No match
+ if exact is not None:
+ raise ValueError("exact delete had no match")
+ return None
+ # recursively delete in the appropriate child
+ child = self.maybe_cow_child(i)
+ if child.is_minimal():
+ child.balance(self, i)
+ # Things may have moved.
+ i, equal = self.search_in_node(key)
+ assert not equal
+ child = self.children[i]
+ assert not child.is_minimal()
+ elt = child.delete(key, self, exact)
+ if original_key is not None:
+ node, i = self._get_node(original_key)
+ assert node is not None
+ assert elt is not None
+ oelt = node.elts[i]
+ node.elts[i] = elt
+ elt = oelt
+ return elt
+
+ def visit_in_order(self, visit: Callable[[ET], None]) -> None:
+ """Call *visit* on all of the elements in order."""
+ for i, elt in enumerate(self.elts):
+ if not self.is_leaf:
+ self.children[i].visit_in_order(visit)
+ visit(elt)
+ if not self.is_leaf:
+ self.children[-1].visit_in_order(visit)
+
+ def _visit_preorder_by_node(self, visit: Callable[["_Node[KT, ET]"], None]) -> None:
+ """Visit nodes in preorder. This method is only used for testing."""
+ visit(self)
+ if not self.is_leaf:
+ for child in self.children:
+ child._visit_preorder_by_node(visit)
+
+ def maybe_cow(self, creator: _Creator) -> Optional["_Node[KT, ET]"]:
+ """Return a clone of this Node if it was not created by *creator*, or ``None``
+ otherwise (i.e. copy for copy-on-write if we haven't already copied it)."""
+ if self.creator is not creator:
+ return self.clone(creator)
+ else:
+ return None
+
+ def clone(self, creator: _Creator) -> "_Node[KT, ET]":
+ """Make a shallow-copy duplicate of this node."""
+ cloned = self.__class__(self.t, creator, self.is_leaf)
+ cloned.elts.extend(self.elts)
+ if not self.is_leaf:
+ cloned.children.extend(self.children)
+ return cloned
+
+ def __str__(self): # pragma: no cover
+ if not self.is_leaf:
+ children = " " + " ".join([f"{id(c):x}" for c in self.children])
+ else:
+ children = ""
+ return f"{id(self):x} {self.creator} {self.elts}{children}"
+
+
+class Cursor(Generic[KT, ET]):
+ """A seekable cursor for a BTree.
+
+ If you are going to use a cursor on a mutable BTree, you should use it
+ in a ``with`` block so that any mutations of the BTree automatically park
+ the cursor.
+ """
+
+ def __init__(self, btree: "BTree[KT, ET]"):
+ self.btree = btree
+ self.current_node: _Node | None = None
+ # The current index is the element index within the current node, or
+ # if there is no current node then it is 0 on the left boundary and 1
+ # on the right boundary.
+ self.current_index: int = 0
+ self.recurse = False
+ self.increasing = True
+ self.parents: list[tuple[_Node, int]] = []
+ self.parked = False
+ self.parking_key: KT | None = None
+ self.parking_key_read = False
+
+ def _seek_least(self) -> None:
+ # seek to the least value in the subtree beneath the current index of the
+ # current node
+ assert self.current_node is not None
+ while not self.current_node.is_leaf:
+ self.parents.append((self.current_node, self.current_index))
+ self.current_node = self.current_node.children[self.current_index]
+ assert self.current_node is not None
+ self.current_index = 0
+
+ def _seek_greatest(self) -> None:
+ # seek to the greatest value in the subtree beneath the current index of the
+ # current node
+ assert self.current_node is not None
+ while not self.current_node.is_leaf:
+ self.parents.append((self.current_node, self.current_index))
+ self.current_node = self.current_node.children[self.current_index]
+ assert self.current_node is not None
+ self.current_index = len(self.current_node.elts)
+
+ def park(self):
+ """Park the cursor.
+
+ A cursor must be "parked" before mutating the BTree to avoid undefined behavior.
+ Cursors created in a ``with`` block register with their BTree and will park
+ automatically. Note that a parked cursor may not observe some changes made when
+ it is parked; for example a cursor being iterated with next() will not see items
+ inserted before its current position.
+ """
+ if not self.parked:
+ self.parked = True
+
+ def _maybe_unpark(self):
+ if self.parked:
+ if self.parking_key is not None:
+ # remember our increasing hint, as seeking might change it
+ increasing = self.increasing
+ if self.parking_key_read:
+ # We've already returned the parking key, so we want to be before it
+ # if decreasing and after it if increasing.
+ before = not self.increasing
+ else:
+ # We haven't returned the parking key, so we've parked right
+ # after seeking or are on a boundary. Either way, the before
+ # hint we want is the value of self.increasing.
+ before = self.increasing
+ self.seek(self.parking_key, before)
+ self.increasing = increasing # might have been altered by seek()
+ self.parked = False
+ self.parking_key = None
+
+ def prev(self) -> ET | None:
+ """Get the previous element, or return None if on the left boundary."""
+ self._maybe_unpark()
+ self.parking_key = None
+ if self.current_node is None:
+ # on a boundary
+ if self.current_index == 0:
+ # left boundary, there is no prev
+ return None
+ else:
+ assert self.current_index == 1
+ # right boundary; seek to the actual boundary
+ # so we can do a prev()
+ self.current_node = self.btree.root
+ self.current_index = len(self.btree.root.elts)
+ self._seek_greatest()
+ while True:
+ if self.recurse:
+ if not self.increasing:
+ # We only want to recurse if we are continuing in the decreasing
+ # direction.
+ self._seek_greatest()
+ self.recurse = False
+ self.increasing = False
+ self.current_index -= 1
+ if self.current_index >= 0:
+ elt = self.current_node.elts[self.current_index]
+ if not self.current_node.is_leaf:
+ self.recurse = True
+ self.parking_key = elt.key()
+ self.parking_key_read = True
+ return elt
+ else:
+ if len(self.parents) > 0:
+ self.current_node, self.current_index = self.parents.pop()
+ else:
+ self.current_node = None
+ self.current_index = 0
+ return None
+
+ def next(self) -> ET | None:
+ """Get the next element, or return None if on the right boundary."""
+ self._maybe_unpark()
+ self.parking_key = None
+ if self.current_node is None:
+ # on a boundary
+ if self.current_index == 1:
+ # right boundary, there is no next
+ return None
+ else:
+ assert self.current_index == 0
+ # left boundary; seek to the actual boundary
+ # so we can do a next()
+ self.current_node = self.btree.root
+ self.current_index = 0
+ self._seek_least()
+ while True:
+ if self.recurse:
+ if self.increasing:
+ # We only want to recurse if we are continuing in the increasing
+ # direction.
+ self._seek_least()
+ self.recurse = False
+ self.increasing = True
+ if self.current_index < len(self.current_node.elts):
+ elt = self.current_node.elts[self.current_index]
+ self.current_index += 1
+ if not self.current_node.is_leaf:
+ self.recurse = True
+ self.parking_key = elt.key()
+ self.parking_key_read = True
+ return elt
+ else:
+ if len(self.parents) > 0:
+ self.current_node, self.current_index = self.parents.pop()
+ else:
+ self.current_node = None
+ self.current_index = 1
+ return None
+
+ def _adjust_for_before(self, before: bool, i: int) -> None:
+ if before:
+ self.current_index = i
+ else:
+ self.current_index = i + 1
+
+ def seek(self, key: KT, before: bool = True) -> None:
+ """Seek to the specified key.
+
+ If *before* is ``True`` (the default) then the cursor is positioned just
+ before *key* if it exists, or before its least successor if it doesn't. A
+ subsequent next() will retrieve this value. If *before* is ``False``, then
+ the cursor is positioned just after *key* if it exists, or its greatest
+ precessessor if it doesn't. A subsequent prev() will return this value.
+ """
+ self.current_node = self.btree.root
+ assert self.current_node is not None
+ self.recurse = False
+ self.parents = []
+ self.increasing = before
+ self.parked = False
+ self.parking_key = key
+ self.parking_key_read = False
+ while not self.current_node.is_leaf:
+ i, equal = self.current_node.search_in_node(key)
+ if equal:
+ self._adjust_for_before(before, i)
+ if before:
+ self._seek_greatest()
+ else:
+ self._seek_least()
+ return
+ self.parents.append((self.current_node, i))
+ self.current_node = self.current_node.children[i]
+ assert self.current_node is not None
+ i, equal = self.current_node.search_in_node(key)
+ if equal:
+ self._adjust_for_before(before, i)
+ else:
+ self.current_index = i
+
+ def seek_first(self) -> None:
+ """Seek to the left boundary (i.e. just before the least element).
+
+ A subsequent next() will return the least element if the BTree isn't empty."""
+ self.current_node = None
+ self.current_index = 0
+ self.recurse = False
+ self.increasing = True
+ self.parents = []
+ self.parked = False
+ self.parking_key = None
+
+ def seek_last(self) -> None:
+ """Seek to the right boundary (i.e. just after the greatest element).
+
+ A subsequent prev() will return the greatest element if the BTree isn't empty.
+ """
+ self.current_node = None
+ self.current_index = 1
+ self.recurse = False
+ self.increasing = False
+ self.parents = []
+ self.parked = False
+ self.parking_key = None
+
+ def __enter__(self):
+ self.btree.register_cursor(self)
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.btree.deregister_cursor(self)
+ return False
+
+
+class Immutable(Exception):
+ """The BTree is immutable."""
+
+
+class BTree(Generic[KT, ET]):
+ """An in-memory BTree with copy-on-write and cursors."""
+
+ def __init__(self, *, t: int = DEFAULT_T, original: Optional["BTree"] = None):
+ """Create a BTree.
+
+ If *original* is not ``None``, then the BTree is shallow-cloned from
+ *original* using copy-on-write. Otherwise a new BTree with the specified
+ *t* value is created.
+
+ The BTree is not thread-safe.
+ """
+ # We don't use a reference to ourselves as a creator as we don't want
+ # to prevent GC of old btrees.
+ self.creator = _Creator()
+ self._immutable = False
+ self.t: int
+ self.root: _Node
+ self.size: int
+ self.cursors: set[Cursor] = set()
+ if original is not None:
+ if not original._immutable:
+ raise ValueError("original BTree is not immutable")
+ self.t = original.t
+ self.root = original.root
+ self.size = original.size
+ else:
+ if t < 3:
+ raise ValueError("t must be >= 3")
+ self.t = t
+ self.root = _Node(self.t, self.creator, True)
+ self.size = 0
+
+ def make_immutable(self):
+ """Make the BTree immutable.
+
+ Attempts to alter the BTree after making it immutable will raise an
+ Immutable exception. This operation cannot be undone.
+ """
+ if not self._immutable:
+ self._immutable = True
+
+ def _check_mutable_and_park(self) -> None:
+ if self._immutable:
+ raise Immutable
+ for cursor in self.cursors:
+ cursor.park()
+
+ # Note that we don't use insert() and delete() but rather insert_element() and
+ # delete_key() so that BTreeDict can be a proper MutableMapping and supply the
+ # rest of the standard mapping API.
+
+ def insert_element(self, elt: ET, in_order: bool = False) -> ET | None:
+ """Insert the element into the BTree.
+
+ If *in_order* is ``True``, then extra work will be done to make left siblings
+ full, which optimizes storage space when the the elements are inserted in-order
+ or close to it.
+
+ Returns the previously existing element at the element's key or ``None``.
+ """
+ self._check_mutable_and_park()
+ cloned = self.root.maybe_cow(self.creator)
+ if cloned:
+ self.root = cloned
+ if self.root.is_maximal():
+ old_root = self.root
+ self.root = _Node(self.t, self.creator, False)
+ self.root.adopt(*old_root.split())
+ oelt = self.root.insert_nonfull(elt, in_order)
+ if oelt is None:
+ # We did not replace, so something was added.
+ self.size += 1
+ return oelt
+
+ def get_element(self, key: KT) -> ET | None:
+ """Get the element matching *key* from the BTree, or return ``None`` if it
+ does not exist.
+ """
+ return self.root.get(key)
+
+ def _delete(self, key: KT, exact: ET | None) -> ET | None:
+ self._check_mutable_and_park()
+ cloned = self.root.maybe_cow(self.creator)
+ if cloned:
+ self.root = cloned
+ elt = self.root.delete(key, None, exact)
+ if elt is not None:
+ # We deleted something
+ self.size -= 1
+ if len(self.root.elts) == 0:
+ # The root is now empty. If there is a child, then collapse this root
+ # level and make the child the new root.
+ if not self.root.is_leaf:
+ assert len(self.root.children) == 1
+ self.root = self.root.children[0]
+ return elt
+
+ def delete_key(self, key: KT) -> ET | None:
+ """Delete the element matching *key* from the BTree.
+
+ Returns the matching element or ``None`` if it does not exist.
+ """
+ return self._delete(key, None)
+
+ def delete_exact(self, element: ET) -> ET | None:
+ """Delete *element* from the BTree.
+
+ Returns the matching element or ``None`` if it was not in the BTree.
+ """
+ delt = self._delete(element.key(), element)
+ assert delt is element
+ return delt
+
+ def __len__(self):
+ return self.size
+
+ def visit_in_order(self, visit: Callable[[ET], None]) -> None:
+ """Call *visit*(element) on all elements in the tree in sorted order."""
+ self.root.visit_in_order(visit)
+
+ def _visit_preorder_by_node(self, visit: Callable[[_Node], None]) -> None:
+ self.root._visit_preorder_by_node(visit)
+
+ def cursor(self) -> Cursor[KT, ET]:
+ """Create a cursor."""
+ return Cursor(self)
+
+ def register_cursor(self, cursor: Cursor) -> None:
+ """Register a cursor for the automatic parking service."""
+ self.cursors.add(cursor)
+
+ def deregister_cursor(self, cursor: Cursor) -> None:
+ """Deregister a cursor from the automatic parking service."""
+ self.cursors.discard(cursor)
+
+ def __copy__(self):
+ return self.__class__(original=self)
+
+ def __iter__(self):
+ with self.cursor() as cursor:
+ while True:
+ elt = cursor.next()
+ if elt is None:
+ break
+ yield elt.key()
+
+
+VT = TypeVar("VT") # the type of a value in a BTreeDict
+
+
+class KV(Element, Generic[KT, VT]):
+ """The BTree element type used in a ``BTreeDict``."""
+
+ def __init__(self, key: KT, value: VT):
+ self._key = key
+ self._value = value
+
+ def key(self) -> KT:
+ return self._key
+
+ def value(self) -> VT:
+ return self._value
+
+ def __str__(self): # pragma: no cover
+ return f"KV({self._key}, {self._value})"
+
+ def __repr__(self): # pragma: no cover
+ return f"KV({self._key}, {self._value})"
+
+
+class BTreeDict(Generic[KT, VT], BTree[KT, KV[KT, VT]], MutableMapping[KT, VT]):
+ """A MutableMapping implemented with a BTree.
+
+ Unlike a normal Python dict, the BTreeDict may be mutated while iterating.
+ """
+
+ def __init__(
+ self,
+ *,
+ t: int = DEFAULT_T,
+ original: BTree | None = None,
+ in_order: bool = False,
+ ):
+ super().__init__(t=t, original=original)
+ self.in_order = in_order
+
+ def __getitem__(self, key: KT) -> VT:
+ elt = self.get_element(key)
+ if elt is None:
+ raise KeyError
+ else:
+ return cast(KV, elt).value()
+
+ def __setitem__(self, key: KT, value: VT) -> None:
+ elt = KV(key, value)
+ self.insert_element(elt, self.in_order)
+
+ def __delitem__(self, key: KT) -> None:
+ if self.delete_key(key) is None:
+ raise KeyError
+
+
+class Member(Element, Generic[KT]):
+ """The BTree element type used in a ``BTreeSet``."""
+
+ def __init__(self, key: KT):
+ self._key = key
+
+ def key(self) -> KT:
+ return self._key
+
+
+class BTreeSet(BTree, Generic[KT], MutableSet[KT]):
+ """A MutableSet implemented with a BTree.
+
+ Unlike a normal Python set, the BTreeSet may be mutated while iterating.
+ """
+
+ def __init__(
+ self,
+ *,
+ t: int = DEFAULT_T,
+ original: BTree | None = None,
+ in_order: bool = False,
+ ):
+ super().__init__(t=t, original=original)
+ self.in_order = in_order
+
+ def __contains__(self, key: Any) -> bool:
+ return self.get_element(key) is not None
+
+ def add(self, value: KT) -> None:
+ elt = Member(value)
+ self.insert_element(elt, self.in_order)
+
+ def discard(self, value: KT) -> None:
+ self.delete_key(value)
diff --git a/tapdown/lib/python3.11/site-packages/dns/btreezone.py b/tapdown/lib/python3.11/site-packages/dns/btreezone.py
new file mode 100644
index 0000000..27b5bb6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/btreezone.py
@@ -0,0 +1,367 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# A derivative of a dnspython VersionedZone and related classes, using a BTreeDict and
+# a separate per-version delegation index. These additions let us
+#
+# 1) Do efficient CoW versioning (useful for future online updates).
+# 2) Maintain sort order
+# 3) Allow delegations to be found easily
+# 4) Handle glue
+# 5) Add Node flags ORIGIN, DELEGATION, and GLUE whenever relevant. The ORIGIN
+# flag is set at the origin node, the DELEGATION FLAG is set at delegation
+# points, and the GLUE flag is set on nodes beneath delegation points.
+
+import enum
+from dataclasses import dataclass
+from typing import Callable, MutableMapping, Tuple, cast
+
+import dns.btree
+import dns.immutable
+import dns.name
+import dns.node
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.versioned
+import dns.zone
+
+
+class NodeFlags(enum.IntFlag):
+ ORIGIN = 0x01
+ DELEGATION = 0x02
+ GLUE = 0x04
+
+
+class Node(dns.node.Node):
+ __slots__ = ["flags", "id"]
+
+ def __init__(self, flags: NodeFlags | None = None):
+ super().__init__()
+ if flags is None:
+ # We allow optional flags rather than a default
+ # as pyright doesn't like assigning a literal 0
+ # to flags.
+ flags = NodeFlags(0)
+ self.flags = flags
+ self.id = 0
+
+ def is_delegation(self):
+ return (self.flags & NodeFlags.DELEGATION) != 0
+
+ def is_glue(self):
+ return (self.flags & NodeFlags.GLUE) != 0
+
+ def is_origin(self):
+ return (self.flags & NodeFlags.ORIGIN) != 0
+
+ def is_origin_or_glue(self):
+ return (self.flags & (NodeFlags.ORIGIN | NodeFlags.GLUE)) != 0
+
+
+@dns.immutable.immutable
+class ImmutableNode(Node):
+ def __init__(self, node: Node):
+ super().__init__()
+ self.id = node.id
+ self.rdatasets = tuple( # type: ignore
+ [dns.rdataset.ImmutableRdataset(rds) for rds in node.rdatasets]
+ )
+ self.flags = node.flags
+
+ def find_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset:
+ if create:
+ raise TypeError("immutable")
+ return super().find_rdataset(rdclass, rdtype, covers, False)
+
+ def get_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset | None:
+ if create:
+ raise TypeError("immutable")
+ return super().get_rdataset(rdclass, rdtype, covers, False)
+
+ def delete_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ ) -> None:
+ raise TypeError("immutable")
+
+ def replace_rdataset(self, replacement: dns.rdataset.Rdataset) -> None:
+ raise TypeError("immutable")
+
+ def is_immutable(self) -> bool:
+ return True
+
+
+class Delegations(dns.btree.BTreeSet[dns.name.Name]):
+ def get_delegation(self, name: dns.name.Name) -> Tuple[dns.name.Name | None, bool]:
+ """Get the delegation applicable to *name*, if it exists.
+
+ If there delegation, then return a tuple consisting of the name of
+ the delegation point, and a boolean which is `True` if the name is a proper
+ subdomain of the delegation point, and `False` if it is equal to the delegation
+ point.
+ """
+ cursor = self.cursor()
+ cursor.seek(name, before=False)
+ prev = cursor.prev()
+ if prev is None:
+ return None, False
+ cut = prev.key()
+ reln, _, _ = name.fullcompare(cut)
+ is_subdomain = reln == dns.name.NameRelation.SUBDOMAIN
+ if is_subdomain or reln == dns.name.NameRelation.EQUAL:
+ return cut, is_subdomain
+ else:
+ return None, False
+
+ def is_glue(self, name: dns.name.Name) -> bool:
+ """Is *name* glue, i.e. is it beneath a delegation?"""
+ cursor = self.cursor()
+ cursor.seek(name, before=False)
+ cut, is_subdomain = self.get_delegation(name)
+ if cut is None:
+ return False
+ return is_subdomain
+
+
+class WritableVersion(dns.zone.WritableVersion):
+ def __init__(self, zone: dns.zone.Zone, replacement: bool = False):
+ super().__init__(zone, True)
+ if not replacement:
+ assert isinstance(zone, dns.versioned.Zone)
+ version = zone._versions[-1]
+ self.nodes: dns.btree.BTreeDict[dns.name.Name, Node] = dns.btree.BTreeDict(
+ original=version.nodes # type: ignore
+ )
+ self.delegations = Delegations(original=version.delegations) # type: ignore
+ else:
+ self.delegations = Delegations()
+
+ def _is_origin(self, name: dns.name.Name) -> bool:
+ # Assumes name has already been validated (and thus adjusted to the right
+ # relativity too)
+ if self.zone.relativize:
+ return name == dns.name.empty
+ else:
+ return name == self.zone.origin
+
+ def _maybe_cow_with_name(
+ self, name: dns.name.Name
+ ) -> Tuple[dns.node.Node, dns.name.Name]:
+ (node, name) = super()._maybe_cow_with_name(name)
+ node = cast(Node, node)
+ if self._is_origin(name):
+ node.flags |= NodeFlags.ORIGIN
+ elif self.delegations.is_glue(name):
+ node.flags |= NodeFlags.GLUE
+ return (node, name)
+
+ def update_glue_flag(self, name: dns.name.Name, is_glue: bool) -> None:
+ cursor = self.nodes.cursor() # type: ignore
+ cursor.seek(name, False)
+ updates = []
+ while True:
+ elt = cursor.next()
+ if elt is None:
+ break
+ ename = elt.key()
+ if not ename.is_subdomain(name):
+ break
+ node = cast(dns.node.Node, elt.value())
+ if ename not in self.changed:
+ new_node = self.zone.node_factory()
+ new_node.id = self.id # type: ignore
+ new_node.rdatasets.extend(node.rdatasets)
+ self.changed.add(ename)
+ node = new_node
+ assert isinstance(node, Node)
+ if is_glue:
+ node.flags |= NodeFlags.GLUE
+ else:
+ node.flags &= ~NodeFlags.GLUE
+ # We don't update node here as any insertion could disturb the
+ # btree and invalidate our cursor. We could use the cursor in a
+ # with block and avoid this, but it would do a lot of parking and
+ # unparking so the deferred update mode may still be better.
+ updates.append((ename, node))
+ for ename, node in updates:
+ self.nodes[ename] = node
+
+ def delete_node(self, name: dns.name.Name) -> None:
+ name = self._validate_name(name)
+ node = self.nodes.get(name)
+ if node is not None:
+ if node.is_delegation(): # type: ignore
+ self.delegations.discard(name)
+ self.update_glue_flag(name, False)
+ del self.nodes[name]
+ self.changed.add(name)
+
+ def put_rdataset(
+ self, name: dns.name.Name, rdataset: dns.rdataset.Rdataset
+ ) -> None:
+ (node, name) = self._maybe_cow_with_name(name)
+ if (
+ rdataset.rdtype == dns.rdatatype.NS and not node.is_origin_or_glue() # type: ignore
+ ):
+ node.flags |= NodeFlags.DELEGATION # type: ignore
+ if name not in self.delegations:
+ self.delegations.add(name)
+ self.update_glue_flag(name, True)
+ node.replace_rdataset(rdataset)
+
+ def delete_rdataset(
+ self,
+ name: dns.name.Name,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType,
+ ) -> None:
+ (node, name) = self._maybe_cow_with_name(name)
+ if rdtype == dns.rdatatype.NS and name in self.delegations: # type: ignore
+ node.flags &= ~NodeFlags.DELEGATION # type: ignore
+ self.delegations.discard(name) # type: ignore
+ self.update_glue_flag(name, False)
+ node.delete_rdataset(self.zone.rdclass, rdtype, covers)
+ if len(node) == 0:
+ del self.nodes[name]
+
+
+@dataclass(frozen=True)
+class Bounds:
+ name: dns.name.Name
+ left: dns.name.Name
+ right: dns.name.Name | None
+ closest_encloser: dns.name.Name
+ is_equal: bool
+ is_delegation: bool
+
+ def __str__(self):
+ if self.is_equal:
+ op = "="
+ else:
+ op = "<"
+ if self.is_delegation:
+ zonecut = " zonecut"
+ else:
+ zonecut = ""
+ return (
+ f"{self.left} {op} {self.name} < {self.right}{zonecut}; "
+ f"{self.closest_encloser}"
+ )
+
+
+@dns.immutable.immutable
+class ImmutableVersion(dns.zone.Version):
+ def __init__(self, version: dns.zone.Version):
+ if not isinstance(version, WritableVersion):
+ raise ValueError(
+ "a dns.btreezone.ImmutableVersion requires a "
+ "dns.btreezone.WritableVersion"
+ )
+ super().__init__(version.zone, True)
+ self.id = version.id
+ self.origin = version.origin
+ for name in version.changed:
+ node = version.nodes.get(name)
+ if node:
+ version.nodes[name] = ImmutableNode(node)
+ # the cast below is for mypy
+ self.nodes = cast(MutableMapping[dns.name.Name, dns.node.Node], version.nodes)
+ self.nodes.make_immutable() # type: ignore
+ self.delegations = version.delegations
+ self.delegations.make_immutable()
+
+ def bounds(self, name: dns.name.Name | str) -> Bounds:
+ """Return the 'bounds' of *name* in its zone.
+
+ The bounds information is useful when making an authoritative response, as
+ it can be used to determine whether the query name is at or beneath a delegation
+ point. The other data in the ``Bounds`` object is useful for making on-the-fly
+ DNSSEC signatures.
+
+ The left bound of *name* is *name* itself if it is in the zone, or the greatest
+ predecessor which is in the zone.
+
+ The right bound of *name* is the least successor of *name*, or ``None`` if
+ no name in the zone is greater than *name*.
+
+ The closest encloser of *name* is *name* itself, if *name* is in the zone;
+ otherwise it is the name with the largest number of labels in common with
+ *name* that is in the zone, either explicitly or by the implied existence
+ of empty non-terminals.
+
+ The bounds *is_equal* field is ``True`` if and only if *name* is equal to
+ its left bound.
+
+ The bounds *is_delegation* field is ``True`` if and only if the left bound is a
+ delegation point.
+ """
+ assert self.origin is not None
+ # validate the origin because we may need to relativize
+ origin = self.zone._validate_name(self.origin)
+ name = self.zone._validate_name(name)
+ cut, _ = self.delegations.get_delegation(name)
+ if cut is not None:
+ target = cut
+ is_delegation = True
+ else:
+ target = name
+ is_delegation = False
+ c = cast(dns.btree.BTreeDict, self.nodes).cursor()
+ c.seek(target, False)
+ left = c.prev()
+ assert left is not None
+ c.next() # skip over left
+ while True:
+ right = c.next()
+ if right is None or not right.value().is_glue():
+ break
+ left_comparison = left.key().fullcompare(name)
+ if right is not None:
+ right_key = right.key()
+ right_comparison = right_key.fullcompare(name)
+ else:
+ right_comparison = (
+ dns.name.NAMERELN_COMMONANCESTOR,
+ -1,
+ len(origin),
+ )
+ right_key = None
+ closest_encloser = dns.name.Name(
+ name[-max(left_comparison[2], right_comparison[2]) :]
+ )
+ return Bounds(
+ name,
+ left.key(),
+ right_key,
+ closest_encloser,
+ left_comparison[0] == dns.name.NameRelation.EQUAL,
+ is_delegation,
+ )
+
+
+class Zone(dns.versioned.Zone):
+ node_factory: Callable[[], dns.node.Node] = Node
+ map_factory: Callable[[], MutableMapping[dns.name.Name, dns.node.Node]] = cast(
+ Callable[[], MutableMapping[dns.name.Name, dns.node.Node]],
+ dns.btree.BTreeDict[dns.name.Name, Node],
+ )
+ writable_version_factory: (
+ Callable[[dns.zone.Zone, bool], dns.zone.Version] | None
+ ) = WritableVersion
+ immutable_version_factory: Callable[[dns.zone.Version], dns.zone.Version] | None = (
+ ImmutableVersion
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssec.py b/tapdown/lib/python3.11/site-packages/dns/dnssec.py
new file mode 100644
index 0000000..0b2aa70
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssec.py
@@ -0,0 +1,1242 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNSSEC-related functions and constants."""
+
+# pylint: disable=unused-import
+
+import base64
+import contextlib
+import functools
+import hashlib
+import struct
+import time
+from datetime import datetime
+from typing import Callable, Dict, List, Set, Tuple, Union, cast
+
+import dns._features
+import dns.name
+import dns.node
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.rrset
+import dns.transaction
+import dns.zone
+from dns.dnssectypes import Algorithm, DSDigest, NSEC3Hash
+from dns.exception import AlgorithmKeyMismatch as AlgorithmKeyMismatch
+from dns.exception import DeniedByPolicy, UnsupportedAlgorithm, ValidationFailure
+from dns.rdtypes.ANY.CDNSKEY import CDNSKEY
+from dns.rdtypes.ANY.CDS import CDS
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+from dns.rdtypes.ANY.DS import DS
+from dns.rdtypes.ANY.NSEC import NSEC, Bitmap
+from dns.rdtypes.ANY.NSEC3PARAM import NSEC3PARAM
+from dns.rdtypes.ANY.RRSIG import RRSIG, sigtime_to_posixtime
+from dns.rdtypes.dnskeybase import Flag
+
+PublicKey = Union[
+ "GenericPublicKey",
+ "rsa.RSAPublicKey",
+ "ec.EllipticCurvePublicKey",
+ "ed25519.Ed25519PublicKey",
+ "ed448.Ed448PublicKey",
+]
+
+PrivateKey = Union[
+ "GenericPrivateKey",
+ "rsa.RSAPrivateKey",
+ "ec.EllipticCurvePrivateKey",
+ "ed25519.Ed25519PrivateKey",
+ "ed448.Ed448PrivateKey",
+]
+
+RRsetSigner = Callable[[dns.transaction.Transaction, dns.rrset.RRset], None]
+
+
+def algorithm_from_text(text: str) -> Algorithm:
+ """Convert text into a DNSSEC algorithm value.
+
+ *text*, a ``str``, the text to convert to into an algorithm value.
+
+ Returns an ``int``.
+ """
+
+ return Algorithm.from_text(text)
+
+
+def algorithm_to_text(value: Algorithm | int) -> str:
+ """Convert a DNSSEC algorithm value to text
+
+ *value*, a ``dns.dnssec.Algorithm``.
+
+ Returns a ``str``, the name of a DNSSEC algorithm.
+ """
+
+ return Algorithm.to_text(value)
+
+
+def to_timestamp(value: datetime | str | float | int) -> int:
+ """Convert various format to a timestamp"""
+ if isinstance(value, datetime):
+ return int(value.timestamp())
+ elif isinstance(value, str):
+ return sigtime_to_posixtime(value)
+ elif isinstance(value, float):
+ return int(value)
+ elif isinstance(value, int):
+ return value
+ else:
+ raise TypeError("Unsupported timestamp type")
+
+
+def key_id(key: DNSKEY | CDNSKEY) -> int:
+ """Return the key id (a 16-bit number) for the specified key.
+
+ *key*, a ``dns.rdtypes.ANY.DNSKEY.DNSKEY``
+
+ Returns an ``int`` between 0 and 65535
+ """
+
+ rdata = key.to_wire()
+ assert rdata is not None # for mypy
+ if key.algorithm == Algorithm.RSAMD5:
+ return (rdata[-3] << 8) + rdata[-2]
+ else:
+ total = 0
+ for i in range(len(rdata) // 2):
+ total += (rdata[2 * i] << 8) + rdata[2 * i + 1]
+ if len(rdata) % 2 != 0:
+ total += rdata[len(rdata) - 1] << 8
+ total += (total >> 16) & 0xFFFF
+ return total & 0xFFFF
+
+
+class Policy:
+ def __init__(self):
+ pass
+
+ def ok_to_sign(self, key: DNSKEY) -> bool: # pragma: no cover
+ return False
+
+ def ok_to_validate(self, key: DNSKEY) -> bool: # pragma: no cover
+ return False
+
+ def ok_to_create_ds(self, algorithm: DSDigest) -> bool: # pragma: no cover
+ return False
+
+ def ok_to_validate_ds(self, algorithm: DSDigest) -> bool: # pragma: no cover
+ return False
+
+
+class SimpleDeny(Policy):
+ def __init__(self, deny_sign, deny_validate, deny_create_ds, deny_validate_ds):
+ super().__init__()
+ self._deny_sign = deny_sign
+ self._deny_validate = deny_validate
+ self._deny_create_ds = deny_create_ds
+ self._deny_validate_ds = deny_validate_ds
+
+ def ok_to_sign(self, key: DNSKEY) -> bool:
+ return key.algorithm not in self._deny_sign
+
+ def ok_to_validate(self, key: DNSKEY) -> bool:
+ return key.algorithm not in self._deny_validate
+
+ def ok_to_create_ds(self, algorithm: DSDigest) -> bool:
+ return algorithm not in self._deny_create_ds
+
+ def ok_to_validate_ds(self, algorithm: DSDigest) -> bool:
+ return algorithm not in self._deny_validate_ds
+
+
+rfc_8624_policy = SimpleDeny(
+ {Algorithm.RSAMD5, Algorithm.DSA, Algorithm.DSANSEC3SHA1, Algorithm.ECCGOST},
+ {Algorithm.RSAMD5, Algorithm.DSA, Algorithm.DSANSEC3SHA1},
+ {DSDigest.NULL, DSDigest.SHA1, DSDigest.GOST},
+ {DSDigest.NULL},
+)
+
+allow_all_policy = SimpleDeny(set(), set(), set(), set())
+
+
+default_policy = rfc_8624_policy
+
+
+def make_ds(
+ name: dns.name.Name | str,
+ key: dns.rdata.Rdata,
+ algorithm: DSDigest | str,
+ origin: dns.name.Name | None = None,
+ policy: Policy | None = None,
+ validating: bool = False,
+) -> DS:
+ """Create a DS record for a DNSSEC key.
+
+ *name*, a ``dns.name.Name`` or ``str``, the owner name of the DS record.
+
+ *key*, a ``dns.rdtypes.ANY.DNSKEY.DNSKEY`` or ``dns.rdtypes.ANY.DNSKEY.CDNSKEY``,
+ the key the DS is about.
+
+ *algorithm*, a ``str`` or ``int`` specifying the hash algorithm.
+ The currently supported hashes are "SHA1", "SHA256", and "SHA384". Case
+ does not matter for these strings.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If *key* is a relative name,
+ then it will be made absolute using the specified origin.
+
+ *policy*, a ``dns.dnssec.Policy`` or ``None``. If ``None``, the default policy,
+ ``dns.dnssec.default_policy`` is used; this policy defaults to that of RFC 8624.
+
+ *validating*, a ``bool``. If ``True``, then policy is checked in
+ validating mode, i.e. "Is it ok to validate using this digest algorithm?".
+ Otherwise the policy is checked in creating mode, i.e. "Is it ok to create a DS with
+ this digest algorithm?".
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is unknown.
+
+ Raises ``DeniedByPolicy`` if the algorithm is denied by policy.
+
+ Returns a ``dns.rdtypes.ANY.DS.DS``
+ """
+
+ if policy is None:
+ policy = default_policy
+ try:
+ if isinstance(algorithm, str):
+ algorithm = DSDigest[algorithm.upper()]
+ except Exception:
+ raise UnsupportedAlgorithm(f'unsupported algorithm "{algorithm}"')
+ if validating:
+ check = policy.ok_to_validate_ds
+ else:
+ check = policy.ok_to_create_ds
+ if not check(algorithm):
+ raise DeniedByPolicy
+ if not isinstance(key, DNSKEY | CDNSKEY):
+ raise ValueError("key is not a DNSKEY | CDNSKEY")
+ if algorithm == DSDigest.SHA1:
+ dshash = hashlib.sha1()
+ elif algorithm == DSDigest.SHA256:
+ dshash = hashlib.sha256()
+ elif algorithm == DSDigest.SHA384:
+ dshash = hashlib.sha384()
+ else:
+ raise UnsupportedAlgorithm(f'unsupported algorithm "{algorithm}"')
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, origin)
+ wire = name.canonicalize().to_wire()
+ kwire = key.to_wire(origin=origin)
+ assert wire is not None and kwire is not None # for mypy
+ dshash.update(wire)
+ dshash.update(kwire)
+ digest = dshash.digest()
+
+ dsrdata = struct.pack("!HBB", key_id(key), key.algorithm, algorithm) + digest
+ ds = dns.rdata.from_wire(
+ dns.rdataclass.IN, dns.rdatatype.DS, dsrdata, 0, len(dsrdata)
+ )
+ return cast(DS, ds)
+
+
+def make_cds(
+ name: dns.name.Name | str,
+ key: dns.rdata.Rdata,
+ algorithm: DSDigest | str,
+ origin: dns.name.Name | None = None,
+) -> CDS:
+ """Create a CDS record for a DNSSEC key.
+
+ *name*, a ``dns.name.Name`` or ``str``, the owner name of the DS record.
+
+ *key*, a ``dns.rdtypes.ANY.DNSKEY.DNSKEY`` or ``dns.rdtypes.ANY.DNSKEY.CDNSKEY``,
+ the key the DS is about.
+
+ *algorithm*, a ``str`` or ``int`` specifying the hash algorithm.
+ The currently supported hashes are "SHA1", "SHA256", and "SHA384". Case
+ does not matter for these strings.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If *key* is a relative name,
+ then it will be made absolute using the specified origin.
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is unknown.
+
+ Returns a ``dns.rdtypes.ANY.DS.CDS``
+ """
+
+ ds = make_ds(name, key, algorithm, origin)
+ return CDS(
+ rdclass=ds.rdclass,
+ rdtype=dns.rdatatype.CDS,
+ key_tag=ds.key_tag,
+ algorithm=ds.algorithm,
+ digest_type=ds.digest_type,
+ digest=ds.digest,
+ )
+
+
+def _find_candidate_keys(
+ keys: Dict[dns.name.Name, dns.rdataset.Rdataset | dns.node.Node], rrsig: RRSIG
+) -> List[DNSKEY] | None:
+ value = keys.get(rrsig.signer)
+ if isinstance(value, dns.node.Node):
+ rdataset = value.get_rdataset(dns.rdataclass.IN, dns.rdatatype.DNSKEY)
+ else:
+ rdataset = value
+ if rdataset is None:
+ return None
+ return [
+ cast(DNSKEY, rd)
+ for rd in rdataset
+ if rd.algorithm == rrsig.algorithm
+ and key_id(rd) == rrsig.key_tag
+ and (rd.flags & Flag.ZONE) == Flag.ZONE # RFC 4034 2.1.1
+ and rd.protocol == 3 # RFC 4034 2.1.2
+ ]
+
+
+def _get_rrname_rdataset(
+ rrset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+) -> Tuple[dns.name.Name, dns.rdataset.Rdataset]:
+ if isinstance(rrset, tuple):
+ return rrset[0], rrset[1]
+ else:
+ return rrset.name, rrset
+
+
+def _validate_signature(sig: bytes, data: bytes, key: DNSKEY) -> None:
+ # pylint: disable=possibly-used-before-assignment
+ public_cls = get_algorithm_cls_from_dnskey(key).public_cls
+ try:
+ public_key = public_cls.from_dnskey(key)
+ except ValueError:
+ raise ValidationFailure("invalid public key")
+ public_key.verify(sig, data)
+
+
+def _validate_rrsig(
+ rrset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+ rrsig: RRSIG,
+ keys: Dict[dns.name.Name, dns.node.Node | dns.rdataset.Rdataset],
+ origin: dns.name.Name | None = None,
+ now: float | None = None,
+ policy: Policy | None = None,
+) -> None:
+ """Validate an RRset against a single signature rdata, throwing an
+ exception if validation is not successful.
+
+ *rrset*, the RRset to validate. This can be a
+ ``dns.rrset.RRset`` or a (``dns.name.Name``, ``dns.rdataset.Rdataset``)
+ tuple.
+
+ *rrsig*, a ``dns.rdata.Rdata``, the signature to validate.
+
+ *keys*, the key dictionary, used to find the DNSKEY associated
+ with a given name. The dictionary is keyed by a
+ ``dns.name.Name``, and has ``dns.node.Node`` or
+ ``dns.rdataset.Rdataset`` values.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin to use for relative
+ names.
+
+ *now*, a ``float`` or ``None``, the time, in seconds since the epoch, to
+ use as the current time when validating. If ``None``, the actual current
+ time is used.
+
+ *policy*, a ``dns.dnssec.Policy`` or ``None``. If ``None``, the default policy,
+ ``dns.dnssec.default_policy`` is used; this policy defaults to that of RFC 8624.
+
+ Raises ``ValidationFailure`` if the signature is expired, not yet valid,
+ the public key is invalid, the algorithm is unknown, the verification
+ fails, etc.
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is recognized by
+ dnspython but not implemented.
+ """
+
+ if policy is None:
+ policy = default_policy
+
+ candidate_keys = _find_candidate_keys(keys, rrsig)
+ if candidate_keys is None:
+ raise ValidationFailure("unknown key")
+
+ if now is None:
+ now = time.time()
+ if rrsig.expiration < now:
+ raise ValidationFailure("expired")
+ if rrsig.inception > now:
+ raise ValidationFailure("not yet valid")
+
+ data = _make_rrsig_signature_data(rrset, rrsig, origin)
+
+ # pylint: disable=possibly-used-before-assignment
+ for candidate_key in candidate_keys:
+ if not policy.ok_to_validate(candidate_key):
+ continue
+ try:
+ _validate_signature(rrsig.signature, data, candidate_key)
+ return
+ except (InvalidSignature, ValidationFailure):
+ # this happens on an individual validation failure
+ continue
+ # nothing verified -- raise failure:
+ raise ValidationFailure("verify failure")
+
+
+def _validate(
+ rrset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+ rrsigset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+ keys: Dict[dns.name.Name, dns.node.Node | dns.rdataset.Rdataset],
+ origin: dns.name.Name | None = None,
+ now: float | None = None,
+ policy: Policy | None = None,
+) -> None:
+ """Validate an RRset against a signature RRset, throwing an exception
+ if none of the signatures validate.
+
+ *rrset*, the RRset to validate. This can be a
+ ``dns.rrset.RRset`` or a (``dns.name.Name``, ``dns.rdataset.Rdataset``)
+ tuple.
+
+ *rrsigset*, the signature RRset. This can be a
+ ``dns.rrset.RRset`` or a (``dns.name.Name``, ``dns.rdataset.Rdataset``)
+ tuple.
+
+ *keys*, the key dictionary, used to find the DNSKEY associated
+ with a given name. The dictionary is keyed by a
+ ``dns.name.Name``, and has ``dns.node.Node`` or
+ ``dns.rdataset.Rdataset`` values.
+
+ *origin*, a ``dns.name.Name``, the origin to use for relative names;
+ defaults to None.
+
+ *now*, an ``int`` or ``None``, the time, in seconds since the epoch, to
+ use as the current time when validating. If ``None``, the actual current
+ time is used.
+
+ *policy*, a ``dns.dnssec.Policy`` or ``None``. If ``None``, the default policy,
+ ``dns.dnssec.default_policy`` is used; this policy defaults to that of RFC 8624.
+
+ Raises ``ValidationFailure`` if the signature is expired, not yet valid,
+ the public key is invalid, the algorithm is unknown, the verification
+ fails, etc.
+ """
+
+ if policy is None:
+ policy = default_policy
+
+ if isinstance(origin, str):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ if isinstance(rrset, tuple):
+ rrname = rrset[0]
+ else:
+ rrname = rrset.name
+
+ if isinstance(rrsigset, tuple):
+ rrsigname = rrsigset[0]
+ rrsigrdataset = rrsigset[1]
+ else:
+ rrsigname = rrsigset.name
+ rrsigrdataset = rrsigset
+
+ rrname = rrname.choose_relativity(origin)
+ rrsigname = rrsigname.choose_relativity(origin)
+ if rrname != rrsigname:
+ raise ValidationFailure("owner names do not match")
+
+ for rrsig in rrsigrdataset:
+ if not isinstance(rrsig, RRSIG):
+ raise ValidationFailure("expected an RRSIG")
+ try:
+ _validate_rrsig(rrset, rrsig, keys, origin, now, policy)
+ return
+ except (ValidationFailure, UnsupportedAlgorithm):
+ pass
+ raise ValidationFailure("no RRSIGs validated")
+
+
+def _sign(
+ rrset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+ private_key: PrivateKey,
+ signer: dns.name.Name,
+ dnskey: DNSKEY,
+ inception: datetime | str | int | float | None = None,
+ expiration: datetime | str | int | float | None = None,
+ lifetime: int | None = None,
+ verify: bool = False,
+ policy: Policy | None = None,
+ origin: dns.name.Name | None = None,
+ deterministic: bool = True,
+) -> RRSIG:
+ """Sign RRset using private key.
+
+ *rrset*, the RRset to validate. This can be a
+ ``dns.rrset.RRset`` or a (``dns.name.Name``, ``dns.rdataset.Rdataset``)
+ tuple.
+
+ *private_key*, the private key to use for signing, a
+ ``cryptography.hazmat.primitives.asymmetric`` private key class applicable
+ for DNSSEC.
+
+ *signer*, a ``dns.name.Name``, the Signer's name.
+
+ *dnskey*, a ``DNSKEY`` matching ``private_key``.
+
+ *inception*, a ``datetime``, ``str``, ``int``, ``float`` or ``None``, the
+ signature inception time. If ``None``, the current time is used. If a ``str``, the
+ format is "YYYYMMDDHHMMSS" or alternatively the number of seconds since the UNIX
+ epoch in text form; this is the same the RRSIG rdata's text form.
+ Values of type `int` or `float` are interpreted as seconds since the UNIX epoch.
+
+ *expiration*, a ``datetime``, ``str``, ``int``, ``float`` or ``None``, the signature
+ expiration time. If ``None``, the expiration time will be the inception time plus
+ the value of the *lifetime* parameter. See the description of *inception* above
+ for how the various parameter types are interpreted.
+
+ *lifetime*, an ``int`` or ``None``, the signature lifetime in seconds. This
+ parameter is only meaningful if *expiration* is ``None``.
+
+ *verify*, a ``bool``. If set to ``True``, the signer will verify signatures
+ after they are created; the default is ``False``.
+
+ *policy*, a ``dns.dnssec.Policy`` or ``None``. If ``None``, the default policy,
+ ``dns.dnssec.default_policy`` is used; this policy defaults to that of RFC 8624.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If ``None``, the default, then all
+ names in the rrset (including its owner name) must be absolute; otherwise the
+ specified origin will be used to make names absolute when signing.
+
+ *deterministic*, a ``bool``. If ``True``, the default, use deterministic
+ (reproducible) signatures when supported by the algorithm used for signing.
+ Currently, this only affects ECDSA.
+
+ Raises ``DeniedByPolicy`` if the signature is denied by policy.
+ """
+
+ if policy is None:
+ policy = default_policy
+ if not policy.ok_to_sign(dnskey):
+ raise DeniedByPolicy
+
+ if isinstance(rrset, tuple):
+ rdclass = rrset[1].rdclass
+ rdtype = rrset[1].rdtype
+ rrname = rrset[0]
+ original_ttl = rrset[1].ttl
+ else:
+ rdclass = rrset.rdclass
+ rdtype = rrset.rdtype
+ rrname = rrset.name
+ original_ttl = rrset.ttl
+
+ if inception is not None:
+ rrsig_inception = to_timestamp(inception)
+ else:
+ rrsig_inception = int(time.time())
+
+ if expiration is not None:
+ rrsig_expiration = to_timestamp(expiration)
+ elif lifetime is not None:
+ rrsig_expiration = rrsig_inception + lifetime
+ else:
+ raise ValueError("expiration or lifetime must be specified")
+
+ # Derelativize now because we need a correct labels length for the
+ # rrsig_template.
+ if origin is not None:
+ rrname = rrname.derelativize(origin)
+ labels = len(rrname) - 1
+
+ # Adjust labels appropriately for wildcards.
+ if rrname.is_wild():
+ labels -= 1
+
+ rrsig_template = RRSIG(
+ rdclass=rdclass,
+ rdtype=dns.rdatatype.RRSIG,
+ type_covered=rdtype,
+ algorithm=dnskey.algorithm,
+ labels=labels,
+ original_ttl=original_ttl,
+ expiration=rrsig_expiration,
+ inception=rrsig_inception,
+ key_tag=key_id(dnskey),
+ signer=signer,
+ signature=b"",
+ )
+
+ data = _make_rrsig_signature_data(rrset, rrsig_template, origin)
+
+ # pylint: disable=possibly-used-before-assignment
+ if isinstance(private_key, GenericPrivateKey):
+ signing_key = private_key
+ else:
+ try:
+ private_cls = get_algorithm_cls_from_dnskey(dnskey)
+ signing_key = private_cls(key=private_key)
+ except UnsupportedAlgorithm:
+ raise TypeError("Unsupported key algorithm")
+
+ signature = signing_key.sign(data, verify, deterministic)
+
+ return cast(RRSIG, rrsig_template.replace(signature=signature))
+
+
+def _make_rrsig_signature_data(
+ rrset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+ rrsig: RRSIG,
+ origin: dns.name.Name | None = None,
+) -> bytes:
+ """Create signature rdata.
+
+ *rrset*, the RRset to sign/validate. This can be a
+ ``dns.rrset.RRset`` or a (``dns.name.Name``, ``dns.rdataset.Rdataset``)
+ tuple.
+
+ *rrsig*, a ``dns.rdata.Rdata``, the signature to validate, or the
+ signature template used when signing.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin to use for relative
+ names.
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is recognized by
+ dnspython but not implemented.
+ """
+
+ if isinstance(origin, str):
+ origin = dns.name.from_text(origin, dns.name.root)
+
+ signer = rrsig.signer
+ if not signer.is_absolute():
+ if origin is None:
+ raise ValidationFailure("relative RR name without an origin specified")
+ signer = signer.derelativize(origin)
+
+ # For convenience, allow the rrset to be specified as a (name,
+ # rdataset) tuple as well as a proper rrset
+ rrname, rdataset = _get_rrname_rdataset(rrset)
+
+ data = b""
+ wire = rrsig.to_wire(origin=signer)
+ assert wire is not None # for mypy
+ data += wire[:18]
+ data += rrsig.signer.to_digestable(signer)
+
+ # Derelativize the name before considering labels.
+ if not rrname.is_absolute():
+ if origin is None:
+ raise ValidationFailure("relative RR name without an origin specified")
+ rrname = rrname.derelativize(origin)
+
+ name_len = len(rrname)
+ if rrname.is_wild() and rrsig.labels != name_len - 2:
+ raise ValidationFailure("wild owner name has wrong label length")
+ if name_len - 1 < rrsig.labels:
+ raise ValidationFailure("owner name longer than RRSIG labels")
+ elif rrsig.labels < name_len - 1:
+ suffix = rrname.split(rrsig.labels + 1)[1]
+ rrname = dns.name.from_text("*", suffix)
+ rrnamebuf = rrname.to_digestable()
+ rrfixed = struct.pack("!HHI", rdataset.rdtype, rdataset.rdclass, rrsig.original_ttl)
+ rdatas = [rdata.to_digestable(origin) for rdata in rdataset]
+ for rdata in sorted(rdatas):
+ data += rrnamebuf
+ data += rrfixed
+ rrlen = struct.pack("!H", len(rdata))
+ data += rrlen
+ data += rdata
+
+ return data
+
+
+def _make_dnskey(
+ public_key: PublicKey,
+ algorithm: int | str,
+ flags: int = Flag.ZONE,
+ protocol: int = 3,
+) -> DNSKEY:
+ """Convert a public key to DNSKEY Rdata
+
+ *public_key*, a ``PublicKey`` (``GenericPublicKey`` or
+ ``cryptography.hazmat.primitives.asymmetric``) to convert.
+
+ *algorithm*, a ``str`` or ``int`` specifying the DNSKEY algorithm.
+
+ *flags*: DNSKEY flags field as an integer.
+
+ *protocol*: DNSKEY protocol field as an integer.
+
+ Raises ``ValueError`` if the specified key algorithm parameters are not
+ unsupported, ``TypeError`` if the key type is unsupported,
+ `UnsupportedAlgorithm` if the algorithm is unknown and
+ `AlgorithmKeyMismatch` if the algorithm does not match the key type.
+
+ Return DNSKEY ``Rdata``.
+ """
+
+ algorithm = Algorithm.make(algorithm)
+
+ # pylint: disable=possibly-used-before-assignment
+ if isinstance(public_key, GenericPublicKey):
+ return public_key.to_dnskey(flags=flags, protocol=protocol)
+ else:
+ public_cls = get_algorithm_cls(algorithm).public_cls
+ return public_cls(key=public_key).to_dnskey(flags=flags, protocol=protocol)
+
+
+def _make_cdnskey(
+ public_key: PublicKey,
+ algorithm: int | str,
+ flags: int = Flag.ZONE,
+ protocol: int = 3,
+) -> CDNSKEY:
+ """Convert a public key to CDNSKEY Rdata
+
+ *public_key*, the public key to convert, a
+ ``cryptography.hazmat.primitives.asymmetric`` public key class applicable
+ for DNSSEC.
+
+ *algorithm*, a ``str`` or ``int`` specifying the DNSKEY algorithm.
+
+ *flags*: DNSKEY flags field as an integer.
+
+ *protocol*: DNSKEY protocol field as an integer.
+
+ Raises ``ValueError`` if the specified key algorithm parameters are not
+ unsupported, ``TypeError`` if the key type is unsupported,
+ `UnsupportedAlgorithm` if the algorithm is unknown and
+ `AlgorithmKeyMismatch` if the algorithm does not match the key type.
+
+ Return CDNSKEY ``Rdata``.
+ """
+
+ dnskey = _make_dnskey(public_key, algorithm, flags, protocol)
+
+ return CDNSKEY(
+ rdclass=dnskey.rdclass,
+ rdtype=dns.rdatatype.CDNSKEY,
+ flags=dnskey.flags,
+ protocol=dnskey.protocol,
+ algorithm=dnskey.algorithm,
+ key=dnskey.key,
+ )
+
+
+def nsec3_hash(
+ domain: dns.name.Name | str,
+ salt: str | bytes | None,
+ iterations: int,
+ algorithm: int | str,
+) -> str:
+ """
+ Calculate the NSEC3 hash, according to
+ https://tools.ietf.org/html/rfc5155#section-5
+
+ *domain*, a ``dns.name.Name`` or ``str``, the name to hash.
+
+ *salt*, a ``str``, ``bytes``, or ``None``, the hash salt. If a
+ string, it is decoded as a hex string.
+
+ *iterations*, an ``int``, the number of iterations.
+
+ *algorithm*, a ``str`` or ``int``, the hash algorithm.
+ The only defined algorithm is SHA1.
+
+ Returns a ``str``, the encoded NSEC3 hash.
+ """
+
+ b32_conversion = str.maketrans(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ234567", "0123456789ABCDEFGHIJKLMNOPQRSTUV"
+ )
+
+ try:
+ if isinstance(algorithm, str):
+ algorithm = NSEC3Hash[algorithm.upper()]
+ except Exception:
+ raise ValueError("Wrong hash algorithm (only SHA1 is supported)")
+
+ if algorithm != NSEC3Hash.SHA1:
+ raise ValueError("Wrong hash algorithm (only SHA1 is supported)")
+
+ if salt is None:
+ salt_encoded = b""
+ elif isinstance(salt, str):
+ if len(salt) % 2 == 0:
+ salt_encoded = bytes.fromhex(salt)
+ else:
+ raise ValueError("Invalid salt length")
+ else:
+ salt_encoded = salt
+
+ if not isinstance(domain, dns.name.Name):
+ domain = dns.name.from_text(domain)
+ domain_encoded = domain.canonicalize().to_wire()
+ assert domain_encoded is not None
+
+ digest = hashlib.sha1(domain_encoded + salt_encoded).digest()
+ for _ in range(iterations):
+ digest = hashlib.sha1(digest + salt_encoded).digest()
+
+ output = base64.b32encode(digest).decode("utf-8")
+ output = output.translate(b32_conversion)
+
+ return output
+
+
+def make_ds_rdataset(
+ rrset: dns.rrset.RRset | Tuple[dns.name.Name, dns.rdataset.Rdataset],
+ algorithms: Set[DSDigest | str],
+ origin: dns.name.Name | None = None,
+) -> dns.rdataset.Rdataset:
+ """Create a DS record from DNSKEY/CDNSKEY/CDS.
+
+ *rrset*, the RRset to create DS Rdataset for. This can be a
+ ``dns.rrset.RRset`` or a (``dns.name.Name``, ``dns.rdataset.Rdataset``)
+ tuple.
+
+ *algorithms*, a set of ``str`` or ``int`` specifying the hash algorithms.
+ The currently supported hashes are "SHA1", "SHA256", and "SHA384". Case
+ does not matter for these strings. If the RRset is a CDS, only digest
+ algorithms matching algorithms are accepted.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If `key` is a relative name,
+ then it will be made absolute using the specified origin.
+
+ Raises ``UnsupportedAlgorithm`` if any of the algorithms are unknown and
+ ``ValueError`` if the given RRset is not usable.
+
+ Returns a ``dns.rdataset.Rdataset``
+ """
+
+ rrname, rdataset = _get_rrname_rdataset(rrset)
+
+ if rdataset.rdtype not in (
+ dns.rdatatype.DNSKEY,
+ dns.rdatatype.CDNSKEY,
+ dns.rdatatype.CDS,
+ ):
+ raise ValueError("rrset not a DNSKEY/CDNSKEY/CDS")
+
+ _algorithms = set()
+ for algorithm in algorithms:
+ try:
+ if isinstance(algorithm, str):
+ algorithm = DSDigest[algorithm.upper()]
+ except Exception:
+ raise UnsupportedAlgorithm(f'unsupported algorithm "{algorithm}"')
+ _algorithms.add(algorithm)
+
+ if rdataset.rdtype == dns.rdatatype.CDS:
+ res = []
+ for rdata in cds_rdataset_to_ds_rdataset(rdataset):
+ if rdata.digest_type in _algorithms:
+ res.append(rdata)
+ if len(res) == 0:
+ raise ValueError("no acceptable CDS rdata found")
+ return dns.rdataset.from_rdata_list(rdataset.ttl, res)
+
+ res = []
+ for algorithm in _algorithms:
+ res.extend(dnskey_rdataset_to_cds_rdataset(rrname, rdataset, algorithm, origin))
+ return dns.rdataset.from_rdata_list(rdataset.ttl, res)
+
+
+def cds_rdataset_to_ds_rdataset(
+ rdataset: dns.rdataset.Rdataset,
+) -> dns.rdataset.Rdataset:
+ """Create a CDS record from DS.
+
+ *rdataset*, a ``dns.rdataset.Rdataset``, to create DS Rdataset for.
+
+ Raises ``ValueError`` if the rdataset is not CDS.
+
+ Returns a ``dns.rdataset.Rdataset``
+ """
+
+ if rdataset.rdtype != dns.rdatatype.CDS:
+ raise ValueError("rdataset not a CDS")
+ res = []
+ for rdata in rdataset:
+ res.append(
+ CDS(
+ rdclass=rdata.rdclass,
+ rdtype=dns.rdatatype.DS,
+ key_tag=rdata.key_tag,
+ algorithm=rdata.algorithm,
+ digest_type=rdata.digest_type,
+ digest=rdata.digest,
+ )
+ )
+ return dns.rdataset.from_rdata_list(rdataset.ttl, res)
+
+
+def dnskey_rdataset_to_cds_rdataset(
+ name: dns.name.Name | str,
+ rdataset: dns.rdataset.Rdataset,
+ algorithm: DSDigest | str,
+ origin: dns.name.Name | None = None,
+) -> dns.rdataset.Rdataset:
+ """Create a CDS record from DNSKEY/CDNSKEY.
+
+ *name*, a ``dns.name.Name`` or ``str``, the owner name of the CDS record.
+
+ *rdataset*, a ``dns.rdataset.Rdataset``, to create DS Rdataset for.
+
+ *algorithm*, a ``str`` or ``int`` specifying the hash algorithm.
+ The currently supported hashes are "SHA1", "SHA256", and "SHA384". Case
+ does not matter for these strings.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If `key` is a relative name,
+ then it will be made absolute using the specified origin.
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is unknown or
+ ``ValueError`` if the rdataset is not DNSKEY/CDNSKEY.
+
+ Returns a ``dns.rdataset.Rdataset``
+ """
+
+ if rdataset.rdtype not in (dns.rdatatype.DNSKEY, dns.rdatatype.CDNSKEY):
+ raise ValueError("rdataset not a DNSKEY/CDNSKEY")
+ res = []
+ for rdata in rdataset:
+ res.append(make_cds(name, rdata, algorithm, origin))
+ return dns.rdataset.from_rdata_list(rdataset.ttl, res)
+
+
+def dnskey_rdataset_to_cdnskey_rdataset(
+ rdataset: dns.rdataset.Rdataset,
+) -> dns.rdataset.Rdataset:
+ """Create a CDNSKEY record from DNSKEY.
+
+ *rdataset*, a ``dns.rdataset.Rdataset``, to create CDNSKEY Rdataset for.
+
+ Returns a ``dns.rdataset.Rdataset``
+ """
+
+ if rdataset.rdtype != dns.rdatatype.DNSKEY:
+ raise ValueError("rdataset not a DNSKEY")
+ res = []
+ for rdata in rdataset:
+ res.append(
+ CDNSKEY(
+ rdclass=rdataset.rdclass,
+ rdtype=rdataset.rdtype,
+ flags=rdata.flags,
+ protocol=rdata.protocol,
+ algorithm=rdata.algorithm,
+ key=rdata.key,
+ )
+ )
+ return dns.rdataset.from_rdata_list(rdataset.ttl, res)
+
+
+def default_rrset_signer(
+ txn: dns.transaction.Transaction,
+ rrset: dns.rrset.RRset,
+ signer: dns.name.Name,
+ ksks: List[Tuple[PrivateKey, DNSKEY]],
+ zsks: List[Tuple[PrivateKey, DNSKEY]],
+ inception: datetime | str | int | float | None = None,
+ expiration: datetime | str | int | float | None = None,
+ lifetime: int | None = None,
+ policy: Policy | None = None,
+ origin: dns.name.Name | None = None,
+ deterministic: bool = True,
+) -> None:
+ """Default RRset signer"""
+
+ if rrset.rdtype in set(
+ [
+ dns.rdatatype.RdataType.DNSKEY,
+ dns.rdatatype.RdataType.CDS,
+ dns.rdatatype.RdataType.CDNSKEY,
+ ]
+ ):
+ keys = ksks
+ else:
+ keys = zsks
+
+ for private_key, dnskey in keys:
+ rrsig = sign(
+ rrset=rrset,
+ private_key=private_key,
+ dnskey=dnskey,
+ inception=inception,
+ expiration=expiration,
+ lifetime=lifetime,
+ signer=signer,
+ policy=policy,
+ origin=origin,
+ deterministic=deterministic,
+ )
+ txn.add(rrset.name, rrset.ttl, rrsig)
+
+
+def sign_zone(
+ zone: dns.zone.Zone,
+ txn: dns.transaction.Transaction | None = None,
+ keys: List[Tuple[PrivateKey, DNSKEY]] | None = None,
+ add_dnskey: bool = True,
+ dnskey_ttl: int | None = None,
+ inception: datetime | str | int | float | None = None,
+ expiration: datetime | str | int | float | None = None,
+ lifetime: int | None = None,
+ nsec3: NSEC3PARAM | None = None,
+ rrset_signer: RRsetSigner | None = None,
+ policy: Policy | None = None,
+ deterministic: bool = True,
+) -> None:
+ """Sign zone.
+
+ *zone*, a ``dns.zone.Zone``, the zone to sign.
+
+ *txn*, a ``dns.transaction.Transaction``, an optional transaction to use for
+ signing.
+
+ *keys*, a list of (``PrivateKey``, ``DNSKEY``) tuples, to use for signing. KSK/ZSK
+ roles are assigned automatically if the SEP flag is used, otherwise all RRsets are
+ signed by all keys.
+
+ *add_dnskey*, a ``bool``. If ``True``, the default, all specified DNSKEYs are
+ automatically added to the zone on signing.
+
+ *dnskey_ttl*, a``int``, specifies the TTL for DNSKEY RRs. If not specified the TTL
+ of the existing DNSKEY RRset used or the TTL of the SOA RRset.
+
+ *inception*, a ``datetime``, ``str``, ``int``, ``float`` or ``None``, the signature
+ inception time. If ``None``, the current time is used. If a ``str``, the format is
+ "YYYYMMDDHHMMSS" or alternatively the number of seconds since the UNIX epoch in text
+ form; this is the same the RRSIG rdata's text form. Values of type `int` or `float`
+ are interpreted as seconds since the UNIX epoch.
+
+ *expiration*, a ``datetime``, ``str``, ``int``, ``float`` or ``None``, the signature
+ expiration time. If ``None``, the expiration time will be the inception time plus
+ the value of the *lifetime* parameter. See the description of *inception* above for
+ how the various parameter types are interpreted.
+
+ *lifetime*, an ``int`` or ``None``, the signature lifetime in seconds. This
+ parameter is only meaningful if *expiration* is ``None``.
+
+ *nsec3*, a ``NSEC3PARAM`` Rdata, configures signing using NSEC3. Not yet
+ implemented.
+
+ *rrset_signer*, a ``Callable``, an optional function for signing RRsets. The
+ function requires two arguments: transaction and RRset. If the not specified,
+ ``dns.dnssec.default_rrset_signer`` will be used.
+
+ *deterministic*, a ``bool``. If ``True``, the default, use deterministic
+ (reproducible) signatures when supported by the algorithm used for signing.
+ Currently, this only affects ECDSA.
+
+ Returns ``None``.
+ """
+
+ ksks = []
+ zsks = []
+
+ # if we have both KSKs and ZSKs, split by SEP flag. if not, sign all
+ # records with all keys
+ if keys:
+ for key in keys:
+ if key[1].flags & Flag.SEP:
+ ksks.append(key)
+ else:
+ zsks.append(key)
+ if not ksks:
+ ksks = keys
+ if not zsks:
+ zsks = keys
+ else:
+ keys = []
+
+ if txn:
+ cm: contextlib.AbstractContextManager = contextlib.nullcontext(txn)
+ else:
+ cm = zone.writer()
+
+ if zone.origin is None:
+ raise ValueError("no zone origin")
+
+ with cm as _txn:
+ if add_dnskey:
+ if dnskey_ttl is None:
+ dnskey = _txn.get(zone.origin, dns.rdatatype.DNSKEY)
+ if dnskey:
+ dnskey_ttl = dnskey.ttl
+ else:
+ soa = _txn.get(zone.origin, dns.rdatatype.SOA)
+ dnskey_ttl = soa.ttl
+ for _, dnskey in keys:
+ _txn.add(zone.origin, dnskey_ttl, dnskey)
+
+ if nsec3:
+ raise NotImplementedError("Signing with NSEC3 not yet implemented")
+ else:
+ _rrset_signer = rrset_signer or functools.partial(
+ default_rrset_signer,
+ signer=zone.origin,
+ ksks=ksks,
+ zsks=zsks,
+ inception=inception,
+ expiration=expiration,
+ lifetime=lifetime,
+ policy=policy,
+ origin=zone.origin,
+ deterministic=deterministic,
+ )
+ return _sign_zone_nsec(zone, _txn, _rrset_signer)
+
+
+def _sign_zone_nsec(
+ zone: dns.zone.Zone,
+ txn: dns.transaction.Transaction,
+ rrset_signer: RRsetSigner | None = None,
+) -> None:
+ """NSEC zone signer"""
+
+ def _txn_add_nsec(
+ txn: dns.transaction.Transaction,
+ name: dns.name.Name,
+ next_secure: dns.name.Name | None,
+ rdclass: dns.rdataclass.RdataClass,
+ ttl: int,
+ rrset_signer: RRsetSigner | None = None,
+ ) -> None:
+ """NSEC zone signer helper"""
+ mandatory_types = set(
+ [dns.rdatatype.RdataType.RRSIG, dns.rdatatype.RdataType.NSEC]
+ )
+ node = txn.get_node(name)
+ if node and next_secure:
+ types = (
+ set([rdataset.rdtype for rdataset in node.rdatasets]) | mandatory_types
+ )
+ windows = Bitmap.from_rdtypes(list(types))
+ rrset = dns.rrset.from_rdata(
+ name,
+ ttl,
+ NSEC(
+ rdclass=rdclass,
+ rdtype=dns.rdatatype.RdataType.NSEC,
+ next=next_secure,
+ windows=windows,
+ ),
+ )
+ txn.add(rrset)
+ if rrset_signer:
+ rrset_signer(txn, rrset)
+
+ rrsig_ttl = zone.get_soa(txn).minimum
+ delegation = None
+ last_secure = None
+
+ for name in sorted(txn.iterate_names()):
+ if delegation and name.is_subdomain(delegation):
+ # names below delegations are not secure
+ continue
+ elif txn.get(name, dns.rdatatype.NS) and name != zone.origin:
+ # inside delegation
+ delegation = name
+ else:
+ # outside delegation
+ delegation = None
+
+ if rrset_signer:
+ node = txn.get_node(name)
+ if node:
+ for rdataset in node.rdatasets:
+ if rdataset.rdtype == dns.rdatatype.RRSIG:
+ # do not sign RRSIGs
+ continue
+ elif delegation and rdataset.rdtype != dns.rdatatype.DS:
+ # do not sign delegations except DS records
+ continue
+ else:
+ rrset = dns.rrset.from_rdata(name, rdataset.ttl, *rdataset)
+ rrset_signer(txn, rrset)
+
+ # We need "is not None" as the empty name is False because its length is 0.
+ if last_secure is not None:
+ _txn_add_nsec(txn, last_secure, name, zone.rdclass, rrsig_ttl, rrset_signer)
+ last_secure = name
+
+ if last_secure:
+ _txn_add_nsec(
+ txn, last_secure, zone.origin, zone.rdclass, rrsig_ttl, rrset_signer
+ )
+
+
+def _need_pyca(*args, **kwargs):
+ raise ImportError(
+ "DNSSEC validation requires python cryptography"
+ ) # pragma: no cover
+
+
+if dns._features.have("dnssec"):
+ from cryptography.exceptions import InvalidSignature
+ from cryptography.hazmat.primitives.asymmetric import ec # pylint: disable=W0611
+ from cryptography.hazmat.primitives.asymmetric import ed448 # pylint: disable=W0611
+ from cryptography.hazmat.primitives.asymmetric import rsa # pylint: disable=W0611
+ from cryptography.hazmat.primitives.asymmetric import ( # pylint: disable=W0611
+ ed25519,
+ )
+
+ from dns.dnssecalgs import ( # pylint: disable=C0412
+ get_algorithm_cls,
+ get_algorithm_cls_from_dnskey,
+ )
+ from dns.dnssecalgs.base import GenericPrivateKey, GenericPublicKey
+
+ validate = _validate # type: ignore
+ validate_rrsig = _validate_rrsig # type: ignore
+ sign = _sign
+ make_dnskey = _make_dnskey
+ make_cdnskey = _make_cdnskey
+ _have_pyca = True
+else: # pragma: no cover
+ validate = _need_pyca
+ validate_rrsig = _need_pyca
+ sign = _need_pyca
+ make_dnskey = _need_pyca
+ make_cdnskey = _need_pyca
+ _have_pyca = False
+
+### BEGIN generated Algorithm constants
+
+RSAMD5 = Algorithm.RSAMD5
+DH = Algorithm.DH
+DSA = Algorithm.DSA
+ECC = Algorithm.ECC
+RSASHA1 = Algorithm.RSASHA1
+DSANSEC3SHA1 = Algorithm.DSANSEC3SHA1
+RSASHA1NSEC3SHA1 = Algorithm.RSASHA1NSEC3SHA1
+RSASHA256 = Algorithm.RSASHA256
+RSASHA512 = Algorithm.RSASHA512
+ECCGOST = Algorithm.ECCGOST
+ECDSAP256SHA256 = Algorithm.ECDSAP256SHA256
+ECDSAP384SHA384 = Algorithm.ECDSAP384SHA384
+ED25519 = Algorithm.ED25519
+ED448 = Algorithm.ED448
+INDIRECT = Algorithm.INDIRECT
+PRIVATEDNS = Algorithm.PRIVATEDNS
+PRIVATEOID = Algorithm.PRIVATEOID
+
+### END generated Algorithm constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/__init__.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/__init__.py
new file mode 100644
index 0000000..0810b19
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/__init__.py
@@ -0,0 +1,124 @@
+from typing import Dict, Tuple, Type
+
+import dns._features
+import dns.name
+from dns.dnssecalgs.base import GenericPrivateKey
+from dns.dnssectypes import Algorithm
+from dns.exception import UnsupportedAlgorithm
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+
+# pyright: reportPossiblyUnboundVariable=false
+
+if dns._features.have("dnssec"):
+ from dns.dnssecalgs.dsa import PrivateDSA, PrivateDSANSEC3SHA1
+ from dns.dnssecalgs.ecdsa import PrivateECDSAP256SHA256, PrivateECDSAP384SHA384
+ from dns.dnssecalgs.eddsa import PrivateED448, PrivateED25519
+ from dns.dnssecalgs.rsa import (
+ PrivateRSAMD5,
+ PrivateRSASHA1,
+ PrivateRSASHA1NSEC3SHA1,
+ PrivateRSASHA256,
+ PrivateRSASHA512,
+ )
+
+ _have_cryptography = True
+else:
+ _have_cryptography = False
+
+AlgorithmPrefix = bytes | dns.name.Name | None
+
+algorithms: Dict[Tuple[Algorithm, AlgorithmPrefix], Type[GenericPrivateKey]] = {}
+if _have_cryptography:
+ # pylint: disable=possibly-used-before-assignment
+ algorithms.update(
+ {
+ (Algorithm.RSAMD5, None): PrivateRSAMD5,
+ (Algorithm.DSA, None): PrivateDSA,
+ (Algorithm.RSASHA1, None): PrivateRSASHA1,
+ (Algorithm.DSANSEC3SHA1, None): PrivateDSANSEC3SHA1,
+ (Algorithm.RSASHA1NSEC3SHA1, None): PrivateRSASHA1NSEC3SHA1,
+ (Algorithm.RSASHA256, None): PrivateRSASHA256,
+ (Algorithm.RSASHA512, None): PrivateRSASHA512,
+ (Algorithm.ECDSAP256SHA256, None): PrivateECDSAP256SHA256,
+ (Algorithm.ECDSAP384SHA384, None): PrivateECDSAP384SHA384,
+ (Algorithm.ED25519, None): PrivateED25519,
+ (Algorithm.ED448, None): PrivateED448,
+ }
+ )
+
+
+def get_algorithm_cls(
+ algorithm: int | str, prefix: AlgorithmPrefix = None
+) -> Type[GenericPrivateKey]:
+ """Get Private Key class from Algorithm.
+
+ *algorithm*, a ``str`` or ``int`` specifying the DNSKEY algorithm.
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is unknown.
+
+ Returns a ``dns.dnssecalgs.GenericPrivateKey``
+ """
+ algorithm = Algorithm.make(algorithm)
+ cls = algorithms.get((algorithm, prefix))
+ if cls:
+ return cls
+ raise UnsupportedAlgorithm(
+ f'algorithm "{Algorithm.to_text(algorithm)}" not supported by dnspython'
+ )
+
+
+def get_algorithm_cls_from_dnskey(dnskey: DNSKEY) -> Type[GenericPrivateKey]:
+ """Get Private Key class from DNSKEY.
+
+ *dnskey*, a ``DNSKEY`` to get Algorithm class for.
+
+ Raises ``UnsupportedAlgorithm`` if the algorithm is unknown.
+
+ Returns a ``dns.dnssecalgs.GenericPrivateKey``
+ """
+ prefix: AlgorithmPrefix = None
+ if dnskey.algorithm == Algorithm.PRIVATEDNS:
+ prefix, _ = dns.name.from_wire(dnskey.key, 0)
+ elif dnskey.algorithm == Algorithm.PRIVATEOID:
+ length = int(dnskey.key[0])
+ prefix = dnskey.key[0 : length + 1]
+ return get_algorithm_cls(dnskey.algorithm, prefix)
+
+
+def register_algorithm_cls(
+ algorithm: int | str,
+ algorithm_cls: Type[GenericPrivateKey],
+ name: dns.name.Name | str | None = None,
+ oid: bytes | None = None,
+) -> None:
+ """Register Algorithm Private Key class.
+
+ *algorithm*, a ``str`` or ``int`` specifying the DNSKEY algorithm.
+
+ *algorithm_cls*: A `GenericPrivateKey` class.
+
+ *name*, an optional ``dns.name.Name`` or ``str``, for for PRIVATEDNS algorithms.
+
+ *oid*: an optional BER-encoded `bytes` for PRIVATEOID algorithms.
+
+ Raises ``ValueError`` if a name or oid is specified incorrectly.
+ """
+ if not issubclass(algorithm_cls, GenericPrivateKey):
+ raise TypeError("Invalid algorithm class")
+ algorithm = Algorithm.make(algorithm)
+ prefix: AlgorithmPrefix = None
+ if algorithm == Algorithm.PRIVATEDNS:
+ if name is None:
+ raise ValueError("Name required for PRIVATEDNS algorithms")
+ if isinstance(name, str):
+ name = dns.name.from_text(name)
+ prefix = name
+ elif algorithm == Algorithm.PRIVATEOID:
+ if oid is None:
+ raise ValueError("OID required for PRIVATEOID algorithms")
+ prefix = bytes([len(oid)]) + oid
+ elif name:
+ raise ValueError("Name only supported for PRIVATEDNS algorithm")
+ elif oid:
+ raise ValueError("OID only supported for PRIVATEOID algorithm")
+ algorithms[(algorithm, prefix)] = algorithm_cls
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/base.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/base.py
new file mode 100644
index 0000000..0334fe6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/base.py
@@ -0,0 +1,89 @@
+from abc import ABC, abstractmethod # pylint: disable=no-name-in-module
+from typing import Any, Type
+
+import dns.rdataclass
+import dns.rdatatype
+from dns.dnssectypes import Algorithm
+from dns.exception import AlgorithmKeyMismatch
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+from dns.rdtypes.dnskeybase import Flag
+
+
+class GenericPublicKey(ABC):
+ algorithm: Algorithm
+
+ @abstractmethod
+ def __init__(self, key: Any) -> None:
+ pass
+
+ @abstractmethod
+ def verify(self, signature: bytes, data: bytes) -> None:
+ """Verify signed DNSSEC data"""
+
+ @abstractmethod
+ def encode_key_bytes(self) -> bytes:
+ """Encode key as bytes for DNSKEY"""
+
+ @classmethod
+ def _ensure_algorithm_key_combination(cls, key: DNSKEY) -> None:
+ if key.algorithm != cls.algorithm:
+ raise AlgorithmKeyMismatch
+
+ def to_dnskey(self, flags: int = Flag.ZONE, protocol: int = 3) -> DNSKEY:
+ """Return public key as DNSKEY"""
+ return DNSKEY(
+ rdclass=dns.rdataclass.IN,
+ rdtype=dns.rdatatype.DNSKEY,
+ flags=flags,
+ protocol=protocol,
+ algorithm=self.algorithm,
+ key=self.encode_key_bytes(),
+ )
+
+ @classmethod
+ @abstractmethod
+ def from_dnskey(cls, key: DNSKEY) -> "GenericPublicKey":
+ """Create public key from DNSKEY"""
+
+ @classmethod
+ @abstractmethod
+ def from_pem(cls, public_pem: bytes) -> "GenericPublicKey":
+ """Create public key from PEM-encoded SubjectPublicKeyInfo as specified
+ in RFC 5280"""
+
+ @abstractmethod
+ def to_pem(self) -> bytes:
+ """Return public-key as PEM-encoded SubjectPublicKeyInfo as specified
+ in RFC 5280"""
+
+
+class GenericPrivateKey(ABC):
+ public_cls: Type[GenericPublicKey]
+
+ @abstractmethod
+ def __init__(self, key: Any) -> None:
+ pass
+
+ @abstractmethod
+ def sign(
+ self,
+ data: bytes,
+ verify: bool = False,
+ deterministic: bool = True,
+ ) -> bytes:
+ """Sign DNSSEC data"""
+
+ @abstractmethod
+ def public_key(self) -> "GenericPublicKey":
+ """Return public key instance"""
+
+ @classmethod
+ @abstractmethod
+ def from_pem(
+ cls, private_pem: bytes, password: bytes | None = None
+ ) -> "GenericPrivateKey":
+ """Create private key from PEM-encoded PKCS#8"""
+
+ @abstractmethod
+ def to_pem(self, password: bytes | None = None) -> bytes:
+ """Return private key as PEM-encoded PKCS#8"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/cryptography.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/cryptography.py
new file mode 100644
index 0000000..a5dde6a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/cryptography.py
@@ -0,0 +1,68 @@
+from typing import Any, Type
+
+from cryptography.hazmat.primitives import serialization
+
+from dns.dnssecalgs.base import GenericPrivateKey, GenericPublicKey
+from dns.exception import AlgorithmKeyMismatch
+
+
+class CryptographyPublicKey(GenericPublicKey):
+ key: Any = None
+ key_cls: Any = None
+
+ def __init__(self, key: Any) -> None: # pylint: disable=super-init-not-called
+ if self.key_cls is None:
+ raise TypeError("Undefined private key class")
+ if not isinstance( # pylint: disable=isinstance-second-argument-not-valid-type
+ key, self.key_cls
+ ):
+ raise AlgorithmKeyMismatch
+ self.key = key
+
+ @classmethod
+ def from_pem(cls, public_pem: bytes) -> "GenericPublicKey":
+ key = serialization.load_pem_public_key(public_pem)
+ return cls(key=key)
+
+ def to_pem(self) -> bytes:
+ return self.key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo,
+ )
+
+
+class CryptographyPrivateKey(GenericPrivateKey):
+ key: Any = None
+ key_cls: Any = None
+ public_cls: Type[CryptographyPublicKey] # pyright: ignore
+
+ def __init__(self, key: Any) -> None: # pylint: disable=super-init-not-called
+ if self.key_cls is None:
+ raise TypeError("Undefined private key class")
+ if not isinstance( # pylint: disable=isinstance-second-argument-not-valid-type
+ key, self.key_cls
+ ):
+ raise AlgorithmKeyMismatch
+ self.key = key
+
+ def public_key(self) -> "CryptographyPublicKey":
+ return self.public_cls(key=self.key.public_key())
+
+ @classmethod
+ def from_pem(
+ cls, private_pem: bytes, password: bytes | None = None
+ ) -> "GenericPrivateKey":
+ key = serialization.load_pem_private_key(private_pem, password=password)
+ return cls(key=key)
+
+ def to_pem(self, password: bytes | None = None) -> bytes:
+ encryption_algorithm: serialization.KeySerializationEncryption
+ if password:
+ encryption_algorithm = serialization.BestAvailableEncryption(password)
+ else:
+ encryption_algorithm = serialization.NoEncryption()
+ return self.key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.PKCS8,
+ encryption_algorithm=encryption_algorithm,
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/dsa.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/dsa.py
new file mode 100644
index 0000000..a4eb987
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/dsa.py
@@ -0,0 +1,108 @@
+import struct
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.asymmetric import dsa, utils
+
+from dns.dnssecalgs.cryptography import CryptographyPrivateKey, CryptographyPublicKey
+from dns.dnssectypes import Algorithm
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+
+
+class PublicDSA(CryptographyPublicKey):
+ key: dsa.DSAPublicKey
+ key_cls = dsa.DSAPublicKey
+ algorithm = Algorithm.DSA
+ chosen_hash = hashes.SHA1()
+
+ def verify(self, signature: bytes, data: bytes) -> None:
+ sig_r = signature[1:21]
+ sig_s = signature[21:]
+ sig = utils.encode_dss_signature(
+ int.from_bytes(sig_r, "big"), int.from_bytes(sig_s, "big")
+ )
+ self.key.verify(sig, data, self.chosen_hash)
+
+ def encode_key_bytes(self) -> bytes:
+ """Encode a public key per RFC 2536, section 2."""
+ pn = self.key.public_numbers()
+ dsa_t = (self.key.key_size // 8 - 64) // 8
+ if dsa_t > 8:
+ raise ValueError("unsupported DSA key size")
+ octets = 64 + dsa_t * 8
+ res = struct.pack("!B", dsa_t)
+ res += pn.parameter_numbers.q.to_bytes(20, "big")
+ res += pn.parameter_numbers.p.to_bytes(octets, "big")
+ res += pn.parameter_numbers.g.to_bytes(octets, "big")
+ res += pn.y.to_bytes(octets, "big")
+ return res
+
+ @classmethod
+ def from_dnskey(cls, key: DNSKEY) -> "PublicDSA":
+ cls._ensure_algorithm_key_combination(key)
+ keyptr = key.key
+ (t,) = struct.unpack("!B", keyptr[0:1])
+ keyptr = keyptr[1:]
+ octets = 64 + t * 8
+ dsa_q = keyptr[0:20]
+ keyptr = keyptr[20:]
+ dsa_p = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_g = keyptr[0:octets]
+ keyptr = keyptr[octets:]
+ dsa_y = keyptr[0:octets]
+ return cls(
+ key=dsa.DSAPublicNumbers( # type: ignore
+ int.from_bytes(dsa_y, "big"),
+ dsa.DSAParameterNumbers(
+ int.from_bytes(dsa_p, "big"),
+ int.from_bytes(dsa_q, "big"),
+ int.from_bytes(dsa_g, "big"),
+ ),
+ ).public_key(default_backend()),
+ )
+
+
+class PrivateDSA(CryptographyPrivateKey):
+ key: dsa.DSAPrivateKey
+ key_cls = dsa.DSAPrivateKey
+ public_cls = PublicDSA
+
+ def sign(
+ self,
+ data: bytes,
+ verify: bool = False,
+ deterministic: bool = True,
+ ) -> bytes:
+ """Sign using a private key per RFC 2536, section 3."""
+ public_dsa_key = self.key.public_key()
+ if public_dsa_key.key_size > 1024:
+ raise ValueError("DSA key size overflow")
+ der_signature = self.key.sign(
+ data, self.public_cls.chosen_hash # pyright: ignore
+ )
+ dsa_r, dsa_s = utils.decode_dss_signature(der_signature)
+ dsa_t = (public_dsa_key.key_size // 8 - 64) // 8
+ octets = 20
+ signature = (
+ struct.pack("!B", dsa_t)
+ + int.to_bytes(dsa_r, length=octets, byteorder="big")
+ + int.to_bytes(dsa_s, length=octets, byteorder="big")
+ )
+ if verify:
+ self.public_key().verify(signature, data)
+ return signature
+
+ @classmethod
+ def generate(cls, key_size: int) -> "PrivateDSA":
+ return cls(
+ key=dsa.generate_private_key(key_size=key_size),
+ )
+
+
+class PublicDSANSEC3SHA1(PublicDSA):
+ algorithm = Algorithm.DSANSEC3SHA1
+
+
+class PrivateDSANSEC3SHA1(PrivateDSA):
+ public_cls = PublicDSANSEC3SHA1
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/ecdsa.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/ecdsa.py
new file mode 100644
index 0000000..e3f3f06
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/ecdsa.py
@@ -0,0 +1,100 @@
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.asymmetric import ec, utils
+
+from dns.dnssecalgs.cryptography import CryptographyPrivateKey, CryptographyPublicKey
+from dns.dnssectypes import Algorithm
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+
+
+class PublicECDSA(CryptographyPublicKey):
+ key: ec.EllipticCurvePublicKey
+ key_cls = ec.EllipticCurvePublicKey
+ algorithm: Algorithm
+ chosen_hash: hashes.HashAlgorithm
+ curve: ec.EllipticCurve
+ octets: int
+
+ def verify(self, signature: bytes, data: bytes) -> None:
+ sig_r = signature[0 : self.octets]
+ sig_s = signature[self.octets :]
+ sig = utils.encode_dss_signature(
+ int.from_bytes(sig_r, "big"), int.from_bytes(sig_s, "big")
+ )
+ self.key.verify(sig, data, ec.ECDSA(self.chosen_hash))
+
+ def encode_key_bytes(self) -> bytes:
+ """Encode a public key per RFC 6605, section 4."""
+ pn = self.key.public_numbers()
+ return pn.x.to_bytes(self.octets, "big") + pn.y.to_bytes(self.octets, "big")
+
+ @classmethod
+ def from_dnskey(cls, key: DNSKEY) -> "PublicECDSA":
+ cls._ensure_algorithm_key_combination(key)
+ ecdsa_x = key.key[0 : cls.octets]
+ ecdsa_y = key.key[cls.octets : cls.octets * 2]
+ return cls(
+ key=ec.EllipticCurvePublicNumbers(
+ curve=cls.curve,
+ x=int.from_bytes(ecdsa_x, "big"),
+ y=int.from_bytes(ecdsa_y, "big"),
+ ).public_key(default_backend()),
+ )
+
+
+class PrivateECDSA(CryptographyPrivateKey):
+ key: ec.EllipticCurvePrivateKey
+ key_cls = ec.EllipticCurvePrivateKey
+ public_cls = PublicECDSA
+
+ def sign(
+ self,
+ data: bytes,
+ verify: bool = False,
+ deterministic: bool = True,
+ ) -> bytes:
+ """Sign using a private key per RFC 6605, section 4."""
+ algorithm = ec.ECDSA(
+ self.public_cls.chosen_hash, # pyright: ignore
+ deterministic_signing=deterministic,
+ )
+ der_signature = self.key.sign(data, algorithm)
+ dsa_r, dsa_s = utils.decode_dss_signature(der_signature)
+ signature = int.to_bytes(
+ dsa_r, length=self.public_cls.octets, byteorder="big" # pyright: ignore
+ ) + int.to_bytes(
+ dsa_s, length=self.public_cls.octets, byteorder="big" # pyright: ignore
+ )
+ if verify:
+ self.public_key().verify(signature, data)
+ return signature
+
+ @classmethod
+ def generate(cls) -> "PrivateECDSA":
+ return cls(
+ key=ec.generate_private_key(
+ curve=cls.public_cls.curve, backend=default_backend() # pyright: ignore
+ ),
+ )
+
+
+class PublicECDSAP256SHA256(PublicECDSA):
+ algorithm = Algorithm.ECDSAP256SHA256
+ chosen_hash = hashes.SHA256()
+ curve = ec.SECP256R1()
+ octets = 32
+
+
+class PrivateECDSAP256SHA256(PrivateECDSA):
+ public_cls = PublicECDSAP256SHA256
+
+
+class PublicECDSAP384SHA384(PublicECDSA):
+ algorithm = Algorithm.ECDSAP384SHA384
+ chosen_hash = hashes.SHA384()
+ curve = ec.SECP384R1()
+ octets = 48
+
+
+class PrivateECDSAP384SHA384(PrivateECDSA):
+ public_cls = PublicECDSAP384SHA384
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/eddsa.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/eddsa.py
new file mode 100644
index 0000000..1cbb407
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/eddsa.py
@@ -0,0 +1,70 @@
+from typing import Type
+
+from cryptography.hazmat.primitives import serialization
+from cryptography.hazmat.primitives.asymmetric import ed448, ed25519
+
+from dns.dnssecalgs.cryptography import CryptographyPrivateKey, CryptographyPublicKey
+from dns.dnssectypes import Algorithm
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+
+
+class PublicEDDSA(CryptographyPublicKey):
+ def verify(self, signature: bytes, data: bytes) -> None:
+ self.key.verify(signature, data)
+
+ def encode_key_bytes(self) -> bytes:
+ """Encode a public key per RFC 8080, section 3."""
+ return self.key.public_bytes(
+ encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw
+ )
+
+ @classmethod
+ def from_dnskey(cls, key: DNSKEY) -> "PublicEDDSA":
+ cls._ensure_algorithm_key_combination(key)
+ return cls(
+ key=cls.key_cls.from_public_bytes(key.key),
+ )
+
+
+class PrivateEDDSA(CryptographyPrivateKey):
+ public_cls: Type[PublicEDDSA] # pyright: ignore
+
+ def sign(
+ self,
+ data: bytes,
+ verify: bool = False,
+ deterministic: bool = True,
+ ) -> bytes:
+ """Sign using a private key per RFC 8080, section 4."""
+ signature = self.key.sign(data)
+ if verify:
+ self.public_key().verify(signature, data)
+ return signature
+
+ @classmethod
+ def generate(cls) -> "PrivateEDDSA":
+ return cls(key=cls.key_cls.generate())
+
+
+class PublicED25519(PublicEDDSA):
+ key: ed25519.Ed25519PublicKey
+ key_cls = ed25519.Ed25519PublicKey
+ algorithm = Algorithm.ED25519
+
+
+class PrivateED25519(PrivateEDDSA):
+ key: ed25519.Ed25519PrivateKey
+ key_cls = ed25519.Ed25519PrivateKey
+ public_cls = PublicED25519
+
+
+class PublicED448(PublicEDDSA):
+ key: ed448.Ed448PublicKey
+ key_cls = ed448.Ed448PublicKey
+ algorithm = Algorithm.ED448
+
+
+class PrivateED448(PrivateEDDSA):
+ key: ed448.Ed448PrivateKey
+ key_cls = ed448.Ed448PrivateKey
+ public_cls = PublicED448
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/rsa.py b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/rsa.py
new file mode 100644
index 0000000..de9160b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssecalgs/rsa.py
@@ -0,0 +1,126 @@
+import math
+import struct
+
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives import hashes
+from cryptography.hazmat.primitives.asymmetric import padding, rsa
+
+from dns.dnssecalgs.cryptography import CryptographyPrivateKey, CryptographyPublicKey
+from dns.dnssectypes import Algorithm
+from dns.rdtypes.ANY.DNSKEY import DNSKEY
+
+
+class PublicRSA(CryptographyPublicKey):
+ key: rsa.RSAPublicKey
+ key_cls = rsa.RSAPublicKey
+ algorithm: Algorithm
+ chosen_hash: hashes.HashAlgorithm
+
+ def verify(self, signature: bytes, data: bytes) -> None:
+ self.key.verify(signature, data, padding.PKCS1v15(), self.chosen_hash)
+
+ def encode_key_bytes(self) -> bytes:
+ """Encode a public key per RFC 3110, section 2."""
+ pn = self.key.public_numbers()
+ _exp_len = math.ceil(int.bit_length(pn.e) / 8)
+ exp = int.to_bytes(pn.e, length=_exp_len, byteorder="big")
+ if _exp_len > 255:
+ exp_header = b"\0" + struct.pack("!H", _exp_len)
+ else:
+ exp_header = struct.pack("!B", _exp_len)
+ if pn.n.bit_length() < 512 or pn.n.bit_length() > 4096:
+ raise ValueError("unsupported RSA key length")
+ return exp_header + exp + pn.n.to_bytes((pn.n.bit_length() + 7) // 8, "big")
+
+ @classmethod
+ def from_dnskey(cls, key: DNSKEY) -> "PublicRSA":
+ cls._ensure_algorithm_key_combination(key)
+ keyptr = key.key
+ (bytes_,) = struct.unpack("!B", keyptr[0:1])
+ keyptr = keyptr[1:]
+ if bytes_ == 0:
+ (bytes_,) = struct.unpack("!H", keyptr[0:2])
+ keyptr = keyptr[2:]
+ rsa_e = keyptr[0:bytes_]
+ rsa_n = keyptr[bytes_:]
+ return cls(
+ key=rsa.RSAPublicNumbers(
+ int.from_bytes(rsa_e, "big"), int.from_bytes(rsa_n, "big")
+ ).public_key(default_backend())
+ )
+
+
+class PrivateRSA(CryptographyPrivateKey):
+ key: rsa.RSAPrivateKey
+ key_cls = rsa.RSAPrivateKey
+ public_cls = PublicRSA
+ default_public_exponent = 65537
+
+ def sign(
+ self,
+ data: bytes,
+ verify: bool = False,
+ deterministic: bool = True,
+ ) -> bytes:
+ """Sign using a private key per RFC 3110, section 3."""
+ signature = self.key.sign(
+ data, padding.PKCS1v15(), self.public_cls.chosen_hash # pyright: ignore
+ )
+ if verify:
+ self.public_key().verify(signature, data)
+ return signature
+
+ @classmethod
+ def generate(cls, key_size: int) -> "PrivateRSA":
+ return cls(
+ key=rsa.generate_private_key(
+ public_exponent=cls.default_public_exponent,
+ key_size=key_size,
+ backend=default_backend(),
+ )
+ )
+
+
+class PublicRSAMD5(PublicRSA):
+ algorithm = Algorithm.RSAMD5
+ chosen_hash = hashes.MD5()
+
+
+class PrivateRSAMD5(PrivateRSA):
+ public_cls = PublicRSAMD5
+
+
+class PublicRSASHA1(PublicRSA):
+ algorithm = Algorithm.RSASHA1
+ chosen_hash = hashes.SHA1()
+
+
+class PrivateRSASHA1(PrivateRSA):
+ public_cls = PublicRSASHA1
+
+
+class PublicRSASHA1NSEC3SHA1(PublicRSA):
+ algorithm = Algorithm.RSASHA1NSEC3SHA1
+ chosen_hash = hashes.SHA1()
+
+
+class PrivateRSASHA1NSEC3SHA1(PrivateRSA):
+ public_cls = PublicRSASHA1NSEC3SHA1
+
+
+class PublicRSASHA256(PublicRSA):
+ algorithm = Algorithm.RSASHA256
+ chosen_hash = hashes.SHA256()
+
+
+class PrivateRSASHA256(PrivateRSA):
+ public_cls = PublicRSASHA256
+
+
+class PublicRSASHA512(PublicRSA):
+ algorithm = Algorithm.RSASHA512
+ chosen_hash = hashes.SHA512()
+
+
+class PrivateRSASHA512(PrivateRSA):
+ public_cls = PublicRSASHA512
diff --git a/tapdown/lib/python3.11/site-packages/dns/dnssectypes.py b/tapdown/lib/python3.11/site-packages/dns/dnssectypes.py
new file mode 100644
index 0000000..02131e0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/dnssectypes.py
@@ -0,0 +1,71 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNSSEC-related types."""
+
+# This is a separate file to avoid import circularity between dns.dnssec and
+# the implementations of the DS and DNSKEY types.
+
+import dns.enum
+
+
+class Algorithm(dns.enum.IntEnum):
+ RSAMD5 = 1
+ DH = 2
+ DSA = 3
+ ECC = 4
+ RSASHA1 = 5
+ DSANSEC3SHA1 = 6
+ RSASHA1NSEC3SHA1 = 7
+ RSASHA256 = 8
+ RSASHA512 = 10
+ ECCGOST = 12
+ ECDSAP256SHA256 = 13
+ ECDSAP384SHA384 = 14
+ ED25519 = 15
+ ED448 = 16
+ INDIRECT = 252
+ PRIVATEDNS = 253
+ PRIVATEOID = 254
+
+ @classmethod
+ def _maximum(cls):
+ return 255
+
+
+class DSDigest(dns.enum.IntEnum):
+ """DNSSEC Delegation Signer Digest Algorithm"""
+
+ NULL = 0
+ SHA1 = 1
+ SHA256 = 2
+ GOST = 3
+ SHA384 = 4
+
+ @classmethod
+ def _maximum(cls):
+ return 255
+
+
+class NSEC3Hash(dns.enum.IntEnum):
+ """NSEC3 hash algorithm"""
+
+ SHA1 = 1
+
+ @classmethod
+ def _maximum(cls):
+ return 255
diff --git a/tapdown/lib/python3.11/site-packages/dns/e164.py b/tapdown/lib/python3.11/site-packages/dns/e164.py
new file mode 100644
index 0000000..942d2c0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/e164.py
@@ -0,0 +1,116 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS E.164 helpers."""
+
+from typing import Iterable
+
+import dns.exception
+import dns.name
+import dns.resolver
+
+#: The public E.164 domain.
+public_enum_domain = dns.name.from_text("e164.arpa.")
+
+
+def from_e164(
+ text: str, origin: dns.name.Name | None = public_enum_domain
+) -> dns.name.Name:
+ """Convert an E.164 number in textual form into a Name object whose
+ value is the ENUM domain name for that number.
+
+ Non-digits in the text are ignored, i.e. "16505551212",
+ "+1.650.555.1212" and "1 (650) 555-1212" are all the same.
+
+ *text*, a ``str``, is an E.164 number in textual form.
+
+ *origin*, a ``dns.name.Name``, the domain in which the number
+ should be constructed. The default is ``e164.arpa.``.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ parts = [d for d in text if d.isdigit()]
+ parts.reverse()
+ return dns.name.from_text(".".join(parts), origin=origin)
+
+
+def to_e164(
+ name: dns.name.Name,
+ origin: dns.name.Name | None = public_enum_domain,
+ want_plus_prefix: bool = True,
+) -> str:
+ """Convert an ENUM domain name into an E.164 number.
+
+ Note that dnspython does not have any information about preferred
+ number formats within national numbering plans, so all numbers are
+ emitted as a simple string of digits, prefixed by a '+' (unless
+ *want_plus_prefix* is ``False``).
+
+ *name* is a ``dns.name.Name``, the ENUM domain name.
+
+ *origin* is a ``dns.name.Name``, a domain containing the ENUM
+ domain name. The name is relativized to this domain before being
+ converted to text. If ``None``, no relativization is done.
+
+ *want_plus_prefix* is a ``bool``. If True, add a '+' to the beginning of
+ the returned number.
+
+ Returns a ``str``.
+
+ """
+ if origin is not None:
+ name = name.relativize(origin)
+ dlabels = [d for d in name.labels if d.isdigit() and len(d) == 1]
+ if len(dlabels) != len(name.labels):
+ raise dns.exception.SyntaxError("non-digit labels in ENUM domain name")
+ dlabels.reverse()
+ text = b"".join(dlabels)
+ if want_plus_prefix:
+ text = b"+" + text
+ return text.decode()
+
+
+def query(
+ number: str,
+ domains: Iterable[dns.name.Name | str],
+ resolver: dns.resolver.Resolver | None = None,
+) -> dns.resolver.Answer:
+ """Look for NAPTR RRs for the specified number in the specified domains.
+
+ e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
+
+ *number*, a ``str`` is the number to look for.
+
+ *domains* is an iterable containing ``dns.name.Name`` values.
+
+ *resolver*, a ``dns.resolver.Resolver``, is the resolver to use. If
+ ``None``, the default resolver is used.
+ """
+
+ if resolver is None:
+ resolver = dns.resolver.get_default_resolver()
+ e_nx = dns.resolver.NXDOMAIN()
+ for domain in domains:
+ if isinstance(domain, str):
+ domain = dns.name.from_text(domain)
+ qname = from_e164(number, domain)
+ try:
+ return resolver.resolve(qname, "NAPTR")
+ except dns.resolver.NXDOMAIN as e:
+ e_nx += e
+ raise e_nx
diff --git a/tapdown/lib/python3.11/site-packages/dns/edns.py b/tapdown/lib/python3.11/site-packages/dns/edns.py
new file mode 100644
index 0000000..eb98548
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/edns.py
@@ -0,0 +1,591 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2009-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""EDNS Options"""
+
+import binascii
+import math
+import socket
+import struct
+from typing import Any, Dict
+
+import dns.enum
+import dns.inet
+import dns.ipv4
+import dns.ipv6
+import dns.name
+import dns.rdata
+import dns.wire
+
+
+class OptionType(dns.enum.IntEnum):
+ """EDNS option type codes"""
+
+ #: NSID
+ NSID = 3
+ #: DAU
+ DAU = 5
+ #: DHU
+ DHU = 6
+ #: N3U
+ N3U = 7
+ #: ECS (client-subnet)
+ ECS = 8
+ #: EXPIRE
+ EXPIRE = 9
+ #: COOKIE
+ COOKIE = 10
+ #: KEEPALIVE
+ KEEPALIVE = 11
+ #: PADDING
+ PADDING = 12
+ #: CHAIN
+ CHAIN = 13
+ #: EDE (extended-dns-error)
+ EDE = 15
+ #: REPORTCHANNEL
+ REPORTCHANNEL = 18
+
+ @classmethod
+ def _maximum(cls):
+ return 65535
+
+
+class Option:
+ """Base class for all EDNS option types."""
+
+ def __init__(self, otype: OptionType | str):
+ """Initialize an option.
+
+ *otype*, a ``dns.edns.OptionType``, is the option type.
+ """
+ self.otype = OptionType.make(otype)
+
+ def to_wire(self, file: Any | None = None) -> bytes | None:
+ """Convert an option to wire format.
+
+ Returns a ``bytes`` or ``None``.
+
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def to_text(self) -> str:
+ raise NotImplementedError # pragma: no cover
+
+ def to_generic(self) -> "GenericOption":
+ """Creates a dns.edns.GenericOption equivalent of this rdata.
+
+ Returns a ``dns.edns.GenericOption``.
+ """
+ wire = self.to_wire()
+ assert wire is not None # for mypy
+ return GenericOption(self.otype, wire)
+
+ @classmethod
+ def from_wire_parser(cls, otype: OptionType, parser: "dns.wire.Parser") -> "Option":
+ """Build an EDNS option object from wire format.
+
+ *otype*, a ``dns.edns.OptionType``, is the option type.
+
+ *parser*, a ``dns.wire.Parser``, the parser, which should be
+ restructed to the option length.
+
+ Returns a ``dns.edns.Option``.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _cmp(self, other):
+ """Compare an EDNS option with another option of the same type.
+
+ Returns < 0 if < *other*, 0 if == *other*, and > 0 if > *other*.
+ """
+ wire = self.to_wire()
+ owire = other.to_wire()
+ if wire == owire:
+ return 0
+ if wire > owire:
+ return 1
+ return -1
+
+ def __eq__(self, other):
+ if not isinstance(other, Option):
+ return False
+ if self.otype != other.otype:
+ return False
+ return self._cmp(other) == 0
+
+ def __ne__(self, other):
+ if not isinstance(other, Option):
+ return True
+ if self.otype != other.otype:
+ return True
+ return self._cmp(other) != 0
+
+ def __lt__(self, other):
+ if not isinstance(other, Option) or self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if not isinstance(other, Option) or self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if not isinstance(other, Option) or self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if not isinstance(other, Option) or self.otype != other.otype:
+ return NotImplemented
+ return self._cmp(other) > 0
+
+ def __str__(self):
+ return self.to_text()
+
+
+class GenericOption(Option): # lgtm[py/missing-equals]
+ """Generic Option Class
+
+ This class is used for EDNS option types for which we have no better
+ implementation.
+ """
+
+ def __init__(self, otype: OptionType | str, data: bytes | str):
+ super().__init__(otype)
+ self.data = dns.rdata.Rdata._as_bytes(data, True)
+
+ def to_wire(self, file: Any | None = None) -> bytes | None:
+ if file:
+ file.write(self.data)
+ return None
+ else:
+ return self.data
+
+ def to_text(self) -> str:
+ return f"Generic {self.otype}"
+
+ def to_generic(self) -> "GenericOption":
+ return self
+
+ @classmethod
+ def from_wire_parser(
+ cls, otype: OptionType | str, parser: "dns.wire.Parser"
+ ) -> Option:
+ return cls(otype, parser.get_remaining())
+
+
+class ECSOption(Option): # lgtm[py/missing-equals]
+ """EDNS Client Subnet (ECS, RFC7871)"""
+
+ def __init__(self, address: str, srclen: int | None = None, scopelen: int = 0):
+ """*address*, a ``str``, is the client address information.
+
+ *srclen*, an ``int``, the source prefix length, which is the
+ leftmost number of bits of the address to be used for the
+ lookup. The default is 24 for IPv4 and 56 for IPv6.
+
+ *scopelen*, an ``int``, the scope prefix length. This value
+ must be 0 in queries, and should be set in responses.
+ """
+
+ super().__init__(OptionType.ECS)
+ af = dns.inet.af_for_address(address)
+
+ if af == socket.AF_INET6:
+ self.family = 2
+ if srclen is None:
+ srclen = 56
+ address = dns.rdata.Rdata._as_ipv6_address(address)
+ srclen = dns.rdata.Rdata._as_int(srclen, 0, 128)
+ scopelen = dns.rdata.Rdata._as_int(scopelen, 0, 128)
+ elif af == socket.AF_INET:
+ self.family = 1
+ if srclen is None:
+ srclen = 24
+ address = dns.rdata.Rdata._as_ipv4_address(address)
+ srclen = dns.rdata.Rdata._as_int(srclen, 0, 32)
+ scopelen = dns.rdata.Rdata._as_int(scopelen, 0, 32)
+ else: # pragma: no cover (this will never happen)
+ raise ValueError("Bad address family")
+
+ assert srclen is not None
+ self.address = address
+ self.srclen = srclen
+ self.scopelen = scopelen
+
+ addrdata = dns.inet.inet_pton(af, address)
+ nbytes = int(math.ceil(srclen / 8.0))
+
+ # Truncate to srclen and pad to the end of the last octet needed
+ # See RFC section 6
+ self.addrdata = addrdata[:nbytes]
+ nbits = srclen % 8
+ if nbits != 0:
+ last = struct.pack("B", ord(self.addrdata[-1:]) & (0xFF << (8 - nbits)))
+ self.addrdata = self.addrdata[:-1] + last
+
+ def to_text(self) -> str:
+ return f"ECS {self.address}/{self.srclen} scope/{self.scopelen}"
+
+ @staticmethod
+ def from_text(text: str) -> Option:
+ """Convert a string into a `dns.edns.ECSOption`
+
+ *text*, a `str`, the text form of the option.
+
+ Returns a `dns.edns.ECSOption`.
+
+ Examples:
+
+ >>> import dns.edns
+ >>>
+ >>> # basic example
+ >>> dns.edns.ECSOption.from_text('1.2.3.4/24')
+ >>>
+ >>> # also understands scope
+ >>> dns.edns.ECSOption.from_text('1.2.3.4/24/32')
+ >>>
+ >>> # IPv6
+ >>> dns.edns.ECSOption.from_text('2001:4b98::1/64/64')
+ >>>
+ >>> # it understands results from `dns.edns.ECSOption.to_text()`
+ >>> dns.edns.ECSOption.from_text('ECS 1.2.3.4/24/32')
+ """
+ optional_prefix = "ECS"
+ tokens = text.split()
+ ecs_text = None
+ if len(tokens) == 1:
+ ecs_text = tokens[0]
+ elif len(tokens) == 2:
+ if tokens[0] != optional_prefix:
+ raise ValueError(f'could not parse ECS from "{text}"')
+ ecs_text = tokens[1]
+ else:
+ raise ValueError(f'could not parse ECS from "{text}"')
+ n_slashes = ecs_text.count("/")
+ if n_slashes == 1:
+ address, tsrclen = ecs_text.split("/")
+ tscope = "0"
+ elif n_slashes == 2:
+ address, tsrclen, tscope = ecs_text.split("/")
+ else:
+ raise ValueError(f'could not parse ECS from "{text}"')
+ try:
+ scope = int(tscope)
+ except ValueError:
+ raise ValueError("invalid scope " + f'"{tscope}": scope must be an integer')
+ try:
+ srclen = int(tsrclen)
+ except ValueError:
+ raise ValueError(
+ "invalid srclen " + f'"{tsrclen}": srclen must be an integer'
+ )
+ return ECSOption(address, srclen, scope)
+
+ def to_wire(self, file: Any | None = None) -> bytes | None:
+ value = (
+ struct.pack("!HBB", self.family, self.srclen, self.scopelen) + self.addrdata
+ )
+ if file:
+ file.write(value)
+ return None
+ else:
+ return value
+
+ @classmethod
+ def from_wire_parser(
+ cls, otype: OptionType | str, parser: "dns.wire.Parser"
+ ) -> Option:
+ family, src, scope = parser.get_struct("!HBB")
+ addrlen = int(math.ceil(src / 8.0))
+ prefix = parser.get_bytes(addrlen)
+ if family == 1:
+ pad = 4 - addrlen
+ addr = dns.ipv4.inet_ntoa(prefix + b"\x00" * pad)
+ elif family == 2:
+ pad = 16 - addrlen
+ addr = dns.ipv6.inet_ntoa(prefix + b"\x00" * pad)
+ else:
+ raise ValueError("unsupported family")
+
+ return cls(addr, src, scope)
+
+
+class EDECode(dns.enum.IntEnum):
+ """Extended DNS Error (EDE) codes"""
+
+ OTHER = 0
+ UNSUPPORTED_DNSKEY_ALGORITHM = 1
+ UNSUPPORTED_DS_DIGEST_TYPE = 2
+ STALE_ANSWER = 3
+ FORGED_ANSWER = 4
+ DNSSEC_INDETERMINATE = 5
+ DNSSEC_BOGUS = 6
+ SIGNATURE_EXPIRED = 7
+ SIGNATURE_NOT_YET_VALID = 8
+ DNSKEY_MISSING = 9
+ RRSIGS_MISSING = 10
+ NO_ZONE_KEY_BIT_SET = 11
+ NSEC_MISSING = 12
+ CACHED_ERROR = 13
+ NOT_READY = 14
+ BLOCKED = 15
+ CENSORED = 16
+ FILTERED = 17
+ PROHIBITED = 18
+ STALE_NXDOMAIN_ANSWER = 19
+ NOT_AUTHORITATIVE = 20
+ NOT_SUPPORTED = 21
+ NO_REACHABLE_AUTHORITY = 22
+ NETWORK_ERROR = 23
+ INVALID_DATA = 24
+
+ @classmethod
+ def _maximum(cls):
+ return 65535
+
+
+class EDEOption(Option): # lgtm[py/missing-equals]
+ """Extended DNS Error (EDE, RFC8914)"""
+
+ _preserve_case = {"DNSKEY", "DS", "DNSSEC", "RRSIGs", "NSEC", "NXDOMAIN"}
+
+ def __init__(self, code: EDECode | str, text: str | None = None):
+ """*code*, a ``dns.edns.EDECode`` or ``str``, the info code of the
+ extended error.
+
+ *text*, a ``str`` or ``None``, specifying additional information about
+ the error.
+ """
+
+ super().__init__(OptionType.EDE)
+
+ self.code = EDECode.make(code)
+ if text is not None and not isinstance(text, str):
+ raise ValueError("text must be string or None")
+ self.text = text
+
+ def to_text(self) -> str:
+ output = f"EDE {self.code}"
+ if self.code in EDECode:
+ desc = EDECode.to_text(self.code)
+ desc = " ".join(
+ word if word in self._preserve_case else word.title()
+ for word in desc.split("_")
+ )
+ output += f" ({desc})"
+ if self.text is not None:
+ output += f": {self.text}"
+ return output
+
+ def to_wire(self, file: Any | None = None) -> bytes | None:
+ value = struct.pack("!H", self.code)
+ if self.text is not None:
+ value += self.text.encode("utf8")
+
+ if file:
+ file.write(value)
+ return None
+ else:
+ return value
+
+ @classmethod
+ def from_wire_parser(
+ cls, otype: OptionType | str, parser: "dns.wire.Parser"
+ ) -> Option:
+ code = EDECode.make(parser.get_uint16())
+ text = parser.get_remaining()
+
+ if text:
+ if text[-1] == 0: # text MAY be null-terminated
+ text = text[:-1]
+ btext = text.decode("utf8")
+ else:
+ btext = None
+
+ return cls(code, btext)
+
+
+class NSIDOption(Option):
+ def __init__(self, nsid: bytes):
+ super().__init__(OptionType.NSID)
+ self.nsid = nsid
+
+ def to_wire(self, file: Any = None) -> bytes | None:
+ if file:
+ file.write(self.nsid)
+ return None
+ else:
+ return self.nsid
+
+ def to_text(self) -> str:
+ if all(c >= 0x20 and c <= 0x7E for c in self.nsid):
+ # All ASCII printable, so it's probably a string.
+ value = self.nsid.decode()
+ else:
+ value = binascii.hexlify(self.nsid).decode()
+ return f"NSID {value}"
+
+ @classmethod
+ def from_wire_parser(
+ cls, otype: OptionType | str, parser: dns.wire.Parser
+ ) -> Option:
+ return cls(parser.get_remaining())
+
+
+class CookieOption(Option):
+ def __init__(self, client: bytes, server: bytes):
+ super().__init__(OptionType.COOKIE)
+ self.client = client
+ self.server = server
+ if len(client) != 8:
+ raise ValueError("client cookie must be 8 bytes")
+ if len(server) != 0 and (len(server) < 8 or len(server) > 32):
+ raise ValueError("server cookie must be empty or between 8 and 32 bytes")
+
+ def to_wire(self, file: Any = None) -> bytes | None:
+ if file:
+ file.write(self.client)
+ if len(self.server) > 0:
+ file.write(self.server)
+ return None
+ else:
+ return self.client + self.server
+
+ def to_text(self) -> str:
+ client = binascii.hexlify(self.client).decode()
+ if len(self.server) > 0:
+ server = binascii.hexlify(self.server).decode()
+ else:
+ server = ""
+ return f"COOKIE {client}{server}"
+
+ @classmethod
+ def from_wire_parser(
+ cls, otype: OptionType | str, parser: dns.wire.Parser
+ ) -> Option:
+ return cls(parser.get_bytes(8), parser.get_remaining())
+
+
+class ReportChannelOption(Option):
+ # RFC 9567
+ def __init__(self, agent_domain: dns.name.Name):
+ super().__init__(OptionType.REPORTCHANNEL)
+ self.agent_domain = agent_domain
+
+ def to_wire(self, file: Any = None) -> bytes | None:
+ return self.agent_domain.to_wire(file)
+
+ def to_text(self) -> str:
+ return "REPORTCHANNEL " + self.agent_domain.to_text()
+
+ @classmethod
+ def from_wire_parser(
+ cls, otype: OptionType | str, parser: dns.wire.Parser
+ ) -> Option:
+ return cls(parser.get_name())
+
+
+_type_to_class: Dict[OptionType, Any] = {
+ OptionType.ECS: ECSOption,
+ OptionType.EDE: EDEOption,
+ OptionType.NSID: NSIDOption,
+ OptionType.COOKIE: CookieOption,
+ OptionType.REPORTCHANNEL: ReportChannelOption,
+}
+
+
+def get_option_class(otype: OptionType) -> Any:
+ """Return the class for the specified option type.
+
+ The GenericOption class is used if a more specific class is not
+ known.
+ """
+
+ cls = _type_to_class.get(otype)
+ if cls is None:
+ cls = GenericOption
+ return cls
+
+
+def option_from_wire_parser(
+ otype: OptionType | str, parser: "dns.wire.Parser"
+) -> Option:
+ """Build an EDNS option object from wire format.
+
+ *otype*, an ``int``, is the option type.
+
+ *parser*, a ``dns.wire.Parser``, the parser, which should be
+ restricted to the option length.
+
+ Returns an instance of a subclass of ``dns.edns.Option``.
+ """
+ otype = OptionType.make(otype)
+ cls = get_option_class(otype)
+ return cls.from_wire_parser(otype, parser)
+
+
+def option_from_wire(
+ otype: OptionType | str, wire: bytes, current: int, olen: int
+) -> Option:
+ """Build an EDNS option object from wire format.
+
+ *otype*, an ``int``, is the option type.
+
+ *wire*, a ``bytes``, is the wire-format message.
+
+ *current*, an ``int``, is the offset in *wire* of the beginning
+ of the rdata.
+
+ *olen*, an ``int``, is the length of the wire-format option data
+
+ Returns an instance of a subclass of ``dns.edns.Option``.
+ """
+ parser = dns.wire.Parser(wire, current)
+ with parser.restrict_to(olen):
+ return option_from_wire_parser(otype, parser)
+
+
+def register_type(implementation: Any, otype: OptionType) -> None:
+ """Register the implementation of an option type.
+
+ *implementation*, a ``class``, is a subclass of ``dns.edns.Option``.
+
+ *otype*, an ``int``, is the option type.
+ """
+
+ _type_to_class[otype] = implementation
+
+
+### BEGIN generated OptionType constants
+
+NSID = OptionType.NSID
+DAU = OptionType.DAU
+DHU = OptionType.DHU
+N3U = OptionType.N3U
+ECS = OptionType.ECS
+EXPIRE = OptionType.EXPIRE
+COOKIE = OptionType.COOKIE
+KEEPALIVE = OptionType.KEEPALIVE
+PADDING = OptionType.PADDING
+CHAIN = OptionType.CHAIN
+EDE = OptionType.EDE
+REPORTCHANNEL = OptionType.REPORTCHANNEL
+
+### END generated OptionType constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/entropy.py b/tapdown/lib/python3.11/site-packages/dns/entropy.py
new file mode 100644
index 0000000..6430926
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/entropy.py
@@ -0,0 +1,130 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2009-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import hashlib
+import os
+import random
+import threading
+import time
+from typing import Any
+
+
+class EntropyPool:
+ # This is an entropy pool for Python implementations that do not
+ # have a working SystemRandom. I'm not sure there are any, but
+ # leaving this code doesn't hurt anything as the library code
+ # is used if present.
+
+ def __init__(self, seed: bytes | None = None):
+ self.pool_index = 0
+ self.digest: bytearray | None = None
+ self.next_byte = 0
+ self.lock = threading.Lock()
+ self.hash = hashlib.sha1()
+ self.hash_len = 20
+ self.pool = bytearray(b"\0" * self.hash_len)
+ if seed is not None:
+ self._stir(seed)
+ self.seeded = True
+ self.seed_pid = os.getpid()
+ else:
+ self.seeded = False
+ self.seed_pid = 0
+
+ def _stir(self, entropy: bytes | bytearray) -> None:
+ for c in entropy:
+ if self.pool_index == self.hash_len:
+ self.pool_index = 0
+ b = c & 0xFF
+ self.pool[self.pool_index] ^= b
+ self.pool_index += 1
+
+ def stir(self, entropy: bytes | bytearray) -> None:
+ with self.lock:
+ self._stir(entropy)
+
+ def _maybe_seed(self) -> None:
+ if not self.seeded or self.seed_pid != os.getpid():
+ try:
+ seed = os.urandom(16)
+ except Exception: # pragma: no cover
+ try:
+ with open("/dev/urandom", "rb", 0) as r:
+ seed = r.read(16)
+ except Exception:
+ seed = str(time.time()).encode()
+ self.seeded = True
+ self.seed_pid = os.getpid()
+ self.digest = None
+ seed = bytearray(seed)
+ self._stir(seed)
+
+ def random_8(self) -> int:
+ with self.lock:
+ self._maybe_seed()
+ if self.digest is None or self.next_byte == self.hash_len:
+ self.hash.update(bytes(self.pool))
+ self.digest = bytearray(self.hash.digest())
+ self._stir(self.digest)
+ self.next_byte = 0
+ value = self.digest[self.next_byte]
+ self.next_byte += 1
+ return value
+
+ def random_16(self) -> int:
+ return self.random_8() * 256 + self.random_8()
+
+ def random_32(self) -> int:
+ return self.random_16() * 65536 + self.random_16()
+
+ def random_between(self, first: int, last: int) -> int:
+ size = last - first + 1
+ if size > 4294967296:
+ raise ValueError("too big")
+ if size > 65536:
+ rand = self.random_32
+ max = 4294967295
+ elif size > 256:
+ rand = self.random_16
+ max = 65535
+ else:
+ rand = self.random_8
+ max = 255
+ return first + size * rand() // (max + 1)
+
+
+pool = EntropyPool()
+
+system_random: Any | None
+try:
+ system_random = random.SystemRandom()
+except Exception: # pragma: no cover
+ system_random = None
+
+
+def random_16() -> int:
+ if system_random is not None:
+ return system_random.randrange(0, 65536)
+ else:
+ return pool.random_16()
+
+
+def between(first: int, last: int) -> int:
+ if system_random is not None:
+ return system_random.randrange(first, last + 1)
+ else:
+ return pool.random_between(first, last)
diff --git a/tapdown/lib/python3.11/site-packages/dns/enum.py b/tapdown/lib/python3.11/site-packages/dns/enum.py
new file mode 100644
index 0000000..822c995
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/enum.py
@@ -0,0 +1,113 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import enum
+from typing import Any, Type, TypeVar
+
+TIntEnum = TypeVar("TIntEnum", bound="IntEnum")
+
+
+class IntEnum(enum.IntEnum):
+ @classmethod
+ def _missing_(cls, value):
+ cls._check_value(value)
+ val = int.__new__(cls, value) # pyright: ignore
+ val._name_ = cls._extra_to_text(value, None) or f"{cls._prefix()}{value}"
+ val._value_ = value # pyright: ignore
+ return val
+
+ @classmethod
+ def _check_value(cls, value):
+ max = cls._maximum()
+ if not isinstance(value, int):
+ raise TypeError
+ if value < 0 or value > max:
+ name = cls._short_name()
+ raise ValueError(f"{name} must be an int between >= 0 and <= {max}")
+
+ @classmethod
+ def from_text(cls: Type[TIntEnum], text: str) -> TIntEnum:
+ text = text.upper()
+ try:
+ return cls[text]
+ except KeyError:
+ pass
+ value = cls._extra_from_text(text)
+ if value:
+ return value
+ prefix = cls._prefix()
+ if text.startswith(prefix) and text[len(prefix) :].isdigit():
+ value = int(text[len(prefix) :])
+ cls._check_value(value)
+ return cls(value)
+ raise cls._unknown_exception_class()
+
+ @classmethod
+ def to_text(cls: Type[TIntEnum], value: int) -> str:
+ cls._check_value(value)
+ try:
+ text = cls(value).name
+ except ValueError:
+ text = None
+ text = cls._extra_to_text(value, text)
+ if text is None:
+ text = f"{cls._prefix()}{value}"
+ return text
+
+ @classmethod
+ def make(cls: Type[TIntEnum], value: int | str) -> TIntEnum:
+ """Convert text or a value into an enumerated type, if possible.
+
+ *value*, the ``int`` or ``str`` to convert.
+
+ Raises a class-specific exception if a ``str`` is provided that
+ cannot be converted.
+
+ Raises ``ValueError`` if the value is out of range.
+
+ Returns an enumeration from the calling class corresponding to the
+ value, if one is defined, or an ``int`` otherwise.
+ """
+
+ if isinstance(value, str):
+ return cls.from_text(value)
+ cls._check_value(value)
+ return cls(value)
+
+ @classmethod
+ def _maximum(cls):
+ raise NotImplementedError # pragma: no cover
+
+ @classmethod
+ def _short_name(cls):
+ return cls.__name__.lower()
+
+ @classmethod
+ def _prefix(cls) -> str:
+ return ""
+
+ @classmethod
+ def _extra_from_text(cls, text: str) -> Any | None: # pylint: disable=W0613
+ return None
+
+ @classmethod
+ def _extra_to_text(cls, value, current_text): # pylint: disable=W0613
+ return current_text
+
+ @classmethod
+ def _unknown_exception_class(cls) -> Type[Exception]:
+ return ValueError
diff --git a/tapdown/lib/python3.11/site-packages/dns/exception.py b/tapdown/lib/python3.11/site-packages/dns/exception.py
new file mode 100644
index 0000000..c3d42ff
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/exception.py
@@ -0,0 +1,169 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Common DNS Exceptions.
+
+Dnspython modules may also define their own exceptions, which will
+always be subclasses of ``DNSException``.
+"""
+
+
+from typing import Set
+
+
+class DNSException(Exception):
+ """Abstract base class shared by all dnspython exceptions.
+
+ It supports two basic modes of operation:
+
+ a) Old/compatible mode is used if ``__init__`` was called with
+ empty *kwargs*. In compatible mode all *args* are passed
+ to the standard Python Exception class as before and all *args* are
+ printed by the standard ``__str__`` implementation. Class variable
+ ``msg`` (or doc string if ``msg`` is ``None``) is returned from ``str()``
+ if *args* is empty.
+
+ b) New/parametrized mode is used if ``__init__`` was called with
+ non-empty *kwargs*.
+ In the new mode *args* must be empty and all kwargs must match
+ those set in class variable ``supp_kwargs``. All kwargs are stored inside
+ ``self.kwargs`` and used in a new ``__str__`` implementation to construct
+ a formatted message based on the ``fmt`` class variable, a ``string``.
+
+ In the simplest case it is enough to override the ``supp_kwargs``
+ and ``fmt`` class variables to get nice parametrized messages.
+ """
+
+ msg: str | None = None # non-parametrized message
+ supp_kwargs: Set[str] = set() # accepted parameters for _fmt_kwargs (sanity check)
+ fmt: str | None = None # message parametrized with results from _fmt_kwargs
+
+ def __init__(self, *args, **kwargs):
+ self._check_params(*args, **kwargs)
+ if kwargs:
+ # This call to a virtual method from __init__ is ok in our usage
+ self.kwargs = self._check_kwargs(**kwargs) # lgtm[py/init-calls-subclass]
+ self.msg = str(self)
+ else:
+ self.kwargs = dict() # defined but empty for old mode exceptions
+ if self.msg is None:
+ # doc string is better implicit message than empty string
+ self.msg = self.__doc__
+ if args:
+ super().__init__(*args)
+ else:
+ super().__init__(self.msg)
+
+ def _check_params(self, *args, **kwargs):
+ """Old exceptions supported only args and not kwargs.
+
+ For sanity we do not allow to mix old and new behavior."""
+ if args or kwargs:
+ assert bool(args) != bool(
+ kwargs
+ ), "keyword arguments are mutually exclusive with positional args"
+
+ def _check_kwargs(self, **kwargs):
+ if kwargs:
+ assert (
+ set(kwargs.keys()) == self.supp_kwargs
+ ), f"following set of keyword args is required: {self.supp_kwargs}"
+ return kwargs
+
+ def _fmt_kwargs(self, **kwargs):
+ """Format kwargs before printing them.
+
+ Resulting dictionary has to have keys necessary for str.format call
+ on fmt class variable.
+ """
+ fmtargs = {}
+ for kw, data in kwargs.items():
+ if isinstance(data, list | set):
+ # convert list of to list of str()
+ fmtargs[kw] = list(map(str, data))
+ if len(fmtargs[kw]) == 1:
+ # remove list brackets [] from single-item lists
+ fmtargs[kw] = fmtargs[kw].pop()
+ else:
+ fmtargs[kw] = data
+ return fmtargs
+
+ def __str__(self):
+ if self.kwargs and self.fmt:
+ # provide custom message constructed from keyword arguments
+ fmtargs = self._fmt_kwargs(**self.kwargs)
+ return self.fmt.format(**fmtargs)
+ else:
+ # print *args directly in the same way as old DNSException
+ return super().__str__()
+
+
+class FormError(DNSException):
+ """DNS message is malformed."""
+
+
+class SyntaxError(DNSException):
+ """Text input is malformed."""
+
+
+class UnexpectedEnd(SyntaxError):
+ """Text input ended unexpectedly."""
+
+
+class TooBig(DNSException):
+ """The DNS message is too big."""
+
+
+class Timeout(DNSException):
+ """The DNS operation timed out."""
+
+ supp_kwargs = {"timeout"}
+ fmt = "The DNS operation timed out after {timeout:.3f} seconds"
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+
+class UnsupportedAlgorithm(DNSException):
+ """The DNSSEC algorithm is not supported."""
+
+
+class AlgorithmKeyMismatch(UnsupportedAlgorithm):
+ """The DNSSEC algorithm is not supported for the given key type."""
+
+
+class ValidationFailure(DNSException):
+ """The DNSSEC signature is invalid."""
+
+
+class DeniedByPolicy(DNSException):
+ """Denied by DNSSEC policy."""
+
+
+class ExceptionWrapper:
+ def __init__(self, exception_class):
+ self.exception_class = exception_class
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if exc_type is not None and not isinstance(exc_val, self.exception_class):
+ raise self.exception_class(str(exc_val)) from exc_val
+ return False
diff --git a/tapdown/lib/python3.11/site-packages/dns/flags.py b/tapdown/lib/python3.11/site-packages/dns/flags.py
new file mode 100644
index 0000000..4c60be1
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/flags.py
@@ -0,0 +1,123 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Message Flags."""
+
+import enum
+from typing import Any
+
+# Standard DNS flags
+
+
+class Flag(enum.IntFlag):
+ #: Query Response
+ QR = 0x8000
+ #: Authoritative Answer
+ AA = 0x0400
+ #: Truncated Response
+ TC = 0x0200
+ #: Recursion Desired
+ RD = 0x0100
+ #: Recursion Available
+ RA = 0x0080
+ #: Authentic Data
+ AD = 0x0020
+ #: Checking Disabled
+ CD = 0x0010
+
+
+# EDNS flags
+
+
+class EDNSFlag(enum.IntFlag):
+ #: DNSSEC answer OK
+ DO = 0x8000
+
+
+def _from_text(text: str, enum_class: Any) -> int:
+ flags = 0
+ tokens = text.split()
+ for t in tokens:
+ flags |= enum_class[t.upper()]
+ return flags
+
+
+def _to_text(flags: int, enum_class: Any) -> str:
+ text_flags = []
+ for k, v in enum_class.__members__.items():
+ if flags & v != 0:
+ text_flags.append(k)
+ return " ".join(text_flags)
+
+
+def from_text(text: str) -> int:
+ """Convert a space-separated list of flag text values into a flags
+ value.
+
+ Returns an ``int``
+ """
+
+ return _from_text(text, Flag)
+
+
+def to_text(flags: int) -> str:
+ """Convert a flags value into a space-separated list of flag text
+ values.
+
+ Returns a ``str``.
+ """
+
+ return _to_text(flags, Flag)
+
+
+def edns_from_text(text: str) -> int:
+ """Convert a space-separated list of EDNS flag text values into a EDNS
+ flags value.
+
+ Returns an ``int``
+ """
+
+ return _from_text(text, EDNSFlag)
+
+
+def edns_to_text(flags: int) -> str:
+ """Convert an EDNS flags value into a space-separated list of EDNS flag
+ text values.
+
+ Returns a ``str``.
+ """
+
+ return _to_text(flags, EDNSFlag)
+
+
+### BEGIN generated Flag constants
+
+QR = Flag.QR
+AA = Flag.AA
+TC = Flag.TC
+RD = Flag.RD
+RA = Flag.RA
+AD = Flag.AD
+CD = Flag.CD
+
+### END generated Flag constants
+
+### BEGIN generated EDNSFlag constants
+
+DO = EDNSFlag.DO
+
+### END generated EDNSFlag constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/grange.py b/tapdown/lib/python3.11/site-packages/dns/grange.py
new file mode 100644
index 0000000..8d366dc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/grange.py
@@ -0,0 +1,72 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2012-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS GENERATE range conversion."""
+
+from typing import Tuple
+
+import dns.exception
+
+
+def from_text(text: str) -> Tuple[int, int, int]:
+ """Convert the text form of a range in a ``$GENERATE`` statement to an
+ integer.
+
+ *text*, a ``str``, the textual range in ``$GENERATE`` form.
+
+ Returns a tuple of three ``int`` values ``(start, stop, step)``.
+ """
+
+ start = -1
+ stop = -1
+ step = 1
+ cur = ""
+ state = 0
+ # state 0 1 2
+ # x - y / z
+
+ if text and text[0] == "-":
+ raise dns.exception.SyntaxError("Start cannot be a negative number")
+
+ for c in text:
+ if c == "-" and state == 0:
+ start = int(cur)
+ cur = ""
+ state = 1
+ elif c == "/":
+ stop = int(cur)
+ cur = ""
+ state = 2
+ elif c.isdigit():
+ cur += c
+ else:
+ raise dns.exception.SyntaxError(f"Could not parse {c}")
+
+ if state == 0:
+ raise dns.exception.SyntaxError("no stop value specified")
+ elif state == 1:
+ stop = int(cur)
+ else:
+ assert state == 2
+ step = int(cur)
+
+ assert step >= 1
+ assert start >= 0
+ if start > stop:
+ raise dns.exception.SyntaxError("start must be <= stop")
+
+ return (start, stop, step)
diff --git a/tapdown/lib/python3.11/site-packages/dns/immutable.py b/tapdown/lib/python3.11/site-packages/dns/immutable.py
new file mode 100644
index 0000000..36b0362
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/immutable.py
@@ -0,0 +1,68 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import collections.abc
+from typing import Any, Callable
+
+from dns._immutable_ctx import immutable
+
+
+@immutable
+class Dict(collections.abc.Mapping): # lgtm[py/missing-equals]
+ def __init__(
+ self,
+ dictionary: Any,
+ no_copy: bool = False,
+ map_factory: Callable[[], collections.abc.MutableMapping] = dict,
+ ):
+ """Make an immutable dictionary from the specified dictionary.
+
+ If *no_copy* is `True`, then *dictionary* will be wrapped instead
+ of copied. Only set this if you are sure there will be no external
+ references to the dictionary.
+ """
+ if no_copy and isinstance(dictionary, collections.abc.MutableMapping):
+ self._odict = dictionary
+ else:
+ self._odict = map_factory()
+ self._odict.update(dictionary)
+ self._hash = None
+
+ def __getitem__(self, key):
+ return self._odict.__getitem__(key)
+
+ def __hash__(self): # pylint: disable=invalid-hash-returned
+ if self._hash is None:
+ h = 0
+ for key in sorted(self._odict.keys()):
+ h ^= hash(key)
+ object.__setattr__(self, "_hash", h)
+ # this does return an int, but pylint doesn't figure that out
+ return self._hash
+
+ def __len__(self):
+ return len(self._odict)
+
+ def __iter__(self):
+ return iter(self._odict)
+
+
+def constify(o: Any) -> Any:
+ """
+ Convert mutable types to immutable types.
+ """
+ if isinstance(o, bytearray):
+ return bytes(o)
+ if isinstance(o, tuple):
+ try:
+ hash(o)
+ return o
+ except Exception:
+ return tuple(constify(elt) for elt in o)
+ if isinstance(o, list):
+ return tuple(constify(elt) for elt in o)
+ if isinstance(o, dict):
+ cdict = dict()
+ for k, v in o.items():
+ cdict[k] = constify(v)
+ return Dict(cdict, True)
+ return o
diff --git a/tapdown/lib/python3.11/site-packages/dns/inet.py b/tapdown/lib/python3.11/site-packages/dns/inet.py
new file mode 100644
index 0000000..765203b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/inet.py
@@ -0,0 +1,195 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Generic Internet address helper functions."""
+
+import socket
+from typing import Any, Tuple
+
+import dns.ipv4
+import dns.ipv6
+
+# We assume that AF_INET and AF_INET6 are always defined. We keep
+# these here for the benefit of any old code (unlikely though that
+# is!).
+AF_INET = socket.AF_INET
+AF_INET6 = socket.AF_INET6
+
+
+def inet_pton(family: int, text: str) -> bytes:
+ """Convert the textual form of a network address into its binary form.
+
+ *family* is an ``int``, the address family.
+
+ *text* is a ``str``, the textual address.
+
+ Raises ``NotImplementedError`` if the address family specified is not
+ implemented.
+
+ Returns a ``bytes``.
+ """
+
+ if family == AF_INET:
+ return dns.ipv4.inet_aton(text)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_aton(text, True)
+ else:
+ raise NotImplementedError
+
+
+def inet_ntop(family: int, address: bytes) -> str:
+ """Convert the binary form of a network address into its textual form.
+
+ *family* is an ``int``, the address family.
+
+ *address* is a ``bytes``, the network address in binary form.
+
+ Raises ``NotImplementedError`` if the address family specified is not
+ implemented.
+
+ Returns a ``str``.
+ """
+
+ if family == AF_INET:
+ return dns.ipv4.inet_ntoa(address)
+ elif family == AF_INET6:
+ return dns.ipv6.inet_ntoa(address)
+ else:
+ raise NotImplementedError
+
+
+def af_for_address(text: str) -> int:
+ """Determine the address family of a textual-form network address.
+
+ *text*, a ``str``, the textual address.
+
+ Raises ``ValueError`` if the address family cannot be determined
+ from the input.
+
+ Returns an ``int``.
+ """
+
+ try:
+ dns.ipv4.inet_aton(text)
+ return AF_INET
+ except Exception:
+ try:
+ dns.ipv6.inet_aton(text, True)
+ return AF_INET6
+ except Exception:
+ raise ValueError
+
+
+def is_multicast(text: str) -> bool:
+ """Is the textual-form network address a multicast address?
+
+ *text*, a ``str``, the textual address.
+
+ Raises ``ValueError`` if the address family cannot be determined
+ from the input.
+
+ Returns a ``bool``.
+ """
+
+ try:
+ first = dns.ipv4.inet_aton(text)[0]
+ return first >= 224 and first <= 239
+ except Exception:
+ try:
+ first = dns.ipv6.inet_aton(text, True)[0]
+ return first == 255
+ except Exception:
+ raise ValueError
+
+
+def is_address(text: str) -> bool:
+ """Is the specified string an IPv4 or IPv6 address?
+
+ *text*, a ``str``, the textual address.
+
+ Returns a ``bool``.
+ """
+
+ try:
+ dns.ipv4.inet_aton(text)
+ return True
+ except Exception:
+ try:
+ dns.ipv6.inet_aton(text, True)
+ return True
+ except Exception:
+ return False
+
+
+def low_level_address_tuple(high_tuple: Tuple[str, int], af: int | None = None) -> Any:
+ """Given a "high-level" address tuple, i.e.
+ an (address, port) return the appropriate "low-level" address tuple
+ suitable for use in socket calls.
+
+ If an *af* other than ``None`` is provided, it is assumed the
+ address in the high-level tuple is valid and has that af. If af
+ is ``None``, then af_for_address will be called.
+ """
+ address, port = high_tuple
+ if af is None:
+ af = af_for_address(address)
+ if af == AF_INET:
+ return (address, port)
+ elif af == AF_INET6:
+ i = address.find("%")
+ if i < 0:
+ # no scope, shortcut!
+ return (address, port, 0, 0)
+ # try to avoid getaddrinfo()
+ addrpart = address[:i]
+ scope = address[i + 1 :]
+ if scope.isdigit():
+ return (addrpart, port, 0, int(scope))
+ try:
+ return (addrpart, port, 0, socket.if_nametoindex(scope))
+ except AttributeError: # pragma: no cover (we can't really test this)
+ ai_flags = socket.AI_NUMERICHOST
+ ((*_, tup), *_) = socket.getaddrinfo(address, port, flags=ai_flags)
+ return tup
+ else:
+ raise NotImplementedError(f"unknown address family {af}")
+
+
+def any_for_af(af):
+ """Return the 'any' address for the specified address family."""
+ if af == socket.AF_INET:
+ return "0.0.0.0"
+ elif af == socket.AF_INET6:
+ return "::"
+ raise NotImplementedError(f"unknown address family {af}")
+
+
+def canonicalize(text: str) -> str:
+ """Verify that *address* is a valid text form IPv4 or IPv6 address and return its
+ canonical text form. IPv6 addresses with scopes are rejected.
+
+ *text*, a ``str``, the address in textual form.
+
+ Raises ``ValueError`` if the text is not valid.
+ """
+ try:
+ return dns.ipv6.canonicalize(text)
+ except Exception:
+ try:
+ return dns.ipv4.canonicalize(text)
+ except Exception:
+ raise ValueError
diff --git a/tapdown/lib/python3.11/site-packages/dns/ipv4.py b/tapdown/lib/python3.11/site-packages/dns/ipv4.py
new file mode 100644
index 0000000..a7161bc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/ipv4.py
@@ -0,0 +1,76 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv4 helper functions."""
+
+import struct
+
+import dns.exception
+
+
+def inet_ntoa(address: bytes) -> str:
+ """Convert an IPv4 address in binary form to text form.
+
+ *address*, a ``bytes``, the IPv4 address in binary form.
+
+ Returns a ``str``.
+ """
+
+ if len(address) != 4:
+ raise dns.exception.SyntaxError
+ return f"{address[0]}.{address[1]}.{address[2]}.{address[3]}"
+
+
+def inet_aton(text: str | bytes) -> bytes:
+ """Convert an IPv4 address in text form to binary form.
+
+ *text*, a ``str`` or ``bytes``, the IPv4 address in textual form.
+
+ Returns a ``bytes``.
+ """
+
+ if not isinstance(text, bytes):
+ btext = text.encode()
+ else:
+ btext = text
+ parts = btext.split(b".")
+ if len(parts) != 4:
+ raise dns.exception.SyntaxError
+ for part in parts:
+ if not part.isdigit():
+ raise dns.exception.SyntaxError
+ if len(part) > 1 and part[0] == ord("0"):
+ # No leading zeros
+ raise dns.exception.SyntaxError
+ try:
+ b = [int(part) for part in parts]
+ return struct.pack("BBBB", *b)
+ except Exception:
+ raise dns.exception.SyntaxError
+
+
+def canonicalize(text: str | bytes) -> str:
+ """Verify that *address* is a valid text form IPv4 address and return its
+ canonical text form.
+
+ *text*, a ``str`` or ``bytes``, the IPv4 address in textual form.
+
+ Raises ``dns.exception.SyntaxError`` if the text is not valid.
+ """
+ # Note that inet_aton() only accepts canonial form, but we still run through
+ # inet_ntoa() to ensure the output is a str.
+ return inet_ntoa(inet_aton(text))
diff --git a/tapdown/lib/python3.11/site-packages/dns/ipv6.py b/tapdown/lib/python3.11/site-packages/dns/ipv6.py
new file mode 100644
index 0000000..eaa0f6c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/ipv6.py
@@ -0,0 +1,217 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""IPv6 helper functions."""
+
+import binascii
+import re
+from typing import List
+
+import dns.exception
+import dns.ipv4
+
+_leading_zero = re.compile(r"0+([0-9a-f]+)")
+
+
+def inet_ntoa(address: bytes) -> str:
+ """Convert an IPv6 address in binary form to text form.
+
+ *address*, a ``bytes``, the IPv6 address in binary form.
+
+ Raises ``ValueError`` if the address isn't 16 bytes long.
+ Returns a ``str``.
+ """
+
+ if len(address) != 16:
+ raise ValueError("IPv6 addresses are 16 bytes long")
+ hex = binascii.hexlify(address)
+ chunks = []
+ i = 0
+ l = len(hex)
+ while i < l:
+ chunk = hex[i : i + 4].decode()
+ # strip leading zeros. we do this with an re instead of
+ # with lstrip() because lstrip() didn't support chars until
+ # python 2.2.2
+ m = _leading_zero.match(chunk)
+ if m is not None:
+ chunk = m.group(1)
+ chunks.append(chunk)
+ i += 4
+ #
+ # Compress the longest subsequence of 0-value chunks to ::
+ #
+ best_start = 0
+ best_len = 0
+ start = -1
+ last_was_zero = False
+ for i in range(8):
+ if chunks[i] != "0":
+ if last_was_zero:
+ end = i
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ last_was_zero = False
+ elif not last_was_zero:
+ start = i
+ last_was_zero = True
+ if last_was_zero:
+ end = 8
+ current_len = end - start
+ if current_len > best_len:
+ best_start = start
+ best_len = current_len
+ if best_len > 1:
+ if best_start == 0 and (best_len == 6 or best_len == 5 and chunks[5] == "ffff"):
+ # We have an embedded IPv4 address
+ if best_len == 6:
+ prefix = "::"
+ else:
+ prefix = "::ffff:"
+ thex = prefix + dns.ipv4.inet_ntoa(address[12:])
+ else:
+ thex = (
+ ":".join(chunks[:best_start])
+ + "::"
+ + ":".join(chunks[best_start + best_len :])
+ )
+ else:
+ thex = ":".join(chunks)
+ return thex
+
+
+_v4_ending = re.compile(rb"(.*):(\d+\.\d+\.\d+\.\d+)$")
+_colon_colon_start = re.compile(rb"::.*")
+_colon_colon_end = re.compile(rb".*::$")
+
+
+def inet_aton(text: str | bytes, ignore_scope: bool = False) -> bytes:
+ """Convert an IPv6 address in text form to binary form.
+
+ *text*, a ``str`` or ``bytes``, the IPv6 address in textual form.
+
+ *ignore_scope*, a ``bool``. If ``True``, a scope will be ignored.
+ If ``False``, the default, it is an error for a scope to be present.
+
+ Returns a ``bytes``.
+ """
+
+ #
+ # Our aim here is not something fast; we just want something that works.
+ #
+ if not isinstance(text, bytes):
+ btext = text.encode()
+ else:
+ btext = text
+
+ if ignore_scope:
+ parts = btext.split(b"%")
+ l = len(parts)
+ if l == 2:
+ btext = parts[0]
+ elif l > 2:
+ raise dns.exception.SyntaxError
+
+ if btext == b"":
+ raise dns.exception.SyntaxError
+ elif btext.endswith(b":") and not btext.endswith(b"::"):
+ raise dns.exception.SyntaxError
+ elif btext.startswith(b":") and not btext.startswith(b"::"):
+ raise dns.exception.SyntaxError
+ elif btext == b"::":
+ btext = b"0::"
+ #
+ # Get rid of the icky dot-quad syntax if we have it.
+ #
+ m = _v4_ending.match(btext)
+ if m is not None:
+ b = dns.ipv4.inet_aton(m.group(2))
+ btext = (
+ f"{m.group(1).decode()}:{b[0]:02x}{b[1]:02x}:{b[2]:02x}{b[3]:02x}"
+ ).encode()
+ #
+ # Try to turn '::' into ':'; if no match try to
+ # turn '::' into ':'
+ #
+ m = _colon_colon_start.match(btext)
+ if m is not None:
+ btext = btext[1:]
+ else:
+ m = _colon_colon_end.match(btext)
+ if m is not None:
+ btext = btext[:-1]
+ #
+ # Now canonicalize into 8 chunks of 4 hex digits each
+ #
+ chunks = btext.split(b":")
+ l = len(chunks)
+ if l > 8:
+ raise dns.exception.SyntaxError
+ seen_empty = False
+ canonical: List[bytes] = []
+ for c in chunks:
+ if c == b"":
+ if seen_empty:
+ raise dns.exception.SyntaxError
+ seen_empty = True
+ for _ in range(0, 8 - l + 1):
+ canonical.append(b"0000")
+ else:
+ lc = len(c)
+ if lc > 4:
+ raise dns.exception.SyntaxError
+ if lc != 4:
+ c = (b"0" * (4 - lc)) + c
+ canonical.append(c)
+ if l < 8 and not seen_empty:
+ raise dns.exception.SyntaxError
+ btext = b"".join(canonical)
+
+ #
+ # Finally we can go to binary.
+ #
+ try:
+ return binascii.unhexlify(btext)
+ except (binascii.Error, TypeError):
+ raise dns.exception.SyntaxError
+
+
+_mapped_prefix = b"\x00" * 10 + b"\xff\xff"
+
+
+def is_mapped(address: bytes) -> bool:
+ """Is the specified address a mapped IPv4 address?
+
+ *address*, a ``bytes`` is an IPv6 address in binary form.
+
+ Returns a ``bool``.
+ """
+
+ return address.startswith(_mapped_prefix)
+
+
+def canonicalize(text: str | bytes) -> str:
+ """Verify that *address* is a valid text form IPv6 address and return its
+ canonical text form. Addresses with scopes are rejected.
+
+ *text*, a ``str`` or ``bytes``, the IPv6 address in textual form.
+
+ Raises ``dns.exception.SyntaxError`` if the text is not valid.
+ """
+ return inet_ntoa(inet_aton(text))
diff --git a/tapdown/lib/python3.11/site-packages/dns/message.py b/tapdown/lib/python3.11/site-packages/dns/message.py
new file mode 100644
index 0000000..bbfccfc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/message.py
@@ -0,0 +1,1954 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Messages"""
+
+import contextlib
+import enum
+import io
+import time
+from typing import Any, Dict, List, Tuple, cast
+
+import dns.edns
+import dns.entropy
+import dns.enum
+import dns.exception
+import dns.flags
+import dns.name
+import dns.opcode
+import dns.rcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdtypes.ANY.OPT
+import dns.rdtypes.ANY.SOA
+import dns.rdtypes.ANY.TSIG
+import dns.renderer
+import dns.rrset
+import dns.tokenizer
+import dns.tsig
+import dns.ttl
+import dns.wire
+
+
+class ShortHeader(dns.exception.FormError):
+ """The DNS packet passed to from_wire() is too short."""
+
+
+class TrailingJunk(dns.exception.FormError):
+ """The DNS packet passed to from_wire() has extra junk at the end of it."""
+
+
+class UnknownHeaderField(dns.exception.DNSException):
+ """The header field name was not recognized when converting from text
+ into a message."""
+
+
+class BadEDNS(dns.exception.FormError):
+ """An OPT record occurred somewhere other than
+ the additional data section."""
+
+
+class BadTSIG(dns.exception.FormError):
+ """A TSIG record occurred somewhere other than the end of
+ the additional data section."""
+
+
+class UnknownTSIGKey(dns.exception.DNSException):
+ """A TSIG with an unknown key was received."""
+
+
+class Truncated(dns.exception.DNSException):
+ """The truncated flag is set."""
+
+ supp_kwargs = {"message"}
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def message(self):
+ """As much of the message as could be processed.
+
+ Returns a ``dns.message.Message``.
+ """
+ return self.kwargs["message"]
+
+
+class NotQueryResponse(dns.exception.DNSException):
+ """Message is not a response to a query."""
+
+
+class ChainTooLong(dns.exception.DNSException):
+ """The CNAME chain is too long."""
+
+
+class AnswerForNXDOMAIN(dns.exception.DNSException):
+ """The rcode is NXDOMAIN but an answer was found."""
+
+
+class NoPreviousName(dns.exception.SyntaxError):
+ """No previous name was known."""
+
+
+class MessageSection(dns.enum.IntEnum):
+ """Message sections"""
+
+ QUESTION = 0
+ ANSWER = 1
+ AUTHORITY = 2
+ ADDITIONAL = 3
+
+ @classmethod
+ def _maximum(cls):
+ return 3
+
+
+class MessageError:
+ def __init__(self, exception: Exception, offset: int):
+ self.exception = exception
+ self.offset = offset
+
+
+DEFAULT_EDNS_PAYLOAD = 1232
+MAX_CHAIN = 16
+
+IndexKeyType = Tuple[
+ int,
+ dns.name.Name,
+ dns.rdataclass.RdataClass,
+ dns.rdatatype.RdataType,
+ dns.rdatatype.RdataType | None,
+ dns.rdataclass.RdataClass | None,
+]
+IndexType = Dict[IndexKeyType, dns.rrset.RRset]
+SectionType = int | str | List[dns.rrset.RRset]
+
+
+class Message:
+ """A DNS message."""
+
+ _section_enum = MessageSection
+
+ def __init__(self, id: int | None = None):
+ if id is None:
+ self.id = dns.entropy.random_16()
+ else:
+ self.id = id
+ self.flags = 0
+ self.sections: List[List[dns.rrset.RRset]] = [[], [], [], []]
+ self.opt: dns.rrset.RRset | None = None
+ self.request_payload = 0
+ self.pad = 0
+ self.keyring: Any = None
+ self.tsig: dns.rrset.RRset | None = None
+ self.want_tsig_sign = False
+ self.request_mac = b""
+ self.xfr = False
+ self.origin: dns.name.Name | None = None
+ self.tsig_ctx: Any | None = None
+ self.index: IndexType = {}
+ self.errors: List[MessageError] = []
+ self.time = 0.0
+ self.wire: bytes | None = None
+
+ @property
+ def question(self) -> List[dns.rrset.RRset]:
+ """The question section."""
+ return self.sections[0]
+
+ @question.setter
+ def question(self, v):
+ self.sections[0] = v
+
+ @property
+ def answer(self) -> List[dns.rrset.RRset]:
+ """The answer section."""
+ return self.sections[1]
+
+ @answer.setter
+ def answer(self, v):
+ self.sections[1] = v
+
+ @property
+ def authority(self) -> List[dns.rrset.RRset]:
+ """The authority section."""
+ return self.sections[2]
+
+ @authority.setter
+ def authority(self, v):
+ self.sections[2] = v
+
+ @property
+ def additional(self) -> List[dns.rrset.RRset]:
+ """The additional data section."""
+ return self.sections[3]
+
+ @additional.setter
+ def additional(self, v):
+ self.sections[3] = v
+
+ def __repr__(self):
+ return ""
+
+ def __str__(self):
+ return self.to_text()
+
+ def to_text(
+ self,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ **kw: Dict[str, Any],
+ ) -> str:
+ """Convert the message to text.
+
+ The *origin*, *relativize*, and any other keyword
+ arguments are passed to the RRset ``to_wire()`` method.
+
+ Returns a ``str``.
+ """
+
+ s = io.StringIO()
+ s.write(f"id {self.id}\n")
+ s.write(f"opcode {dns.opcode.to_text(self.opcode())}\n")
+ s.write(f"rcode {dns.rcode.to_text(self.rcode())}\n")
+ s.write(f"flags {dns.flags.to_text(self.flags)}\n")
+ if self.edns >= 0:
+ s.write(f"edns {self.edns}\n")
+ if self.ednsflags != 0:
+ s.write(f"eflags {dns.flags.edns_to_text(self.ednsflags)}\n")
+ s.write(f"payload {self.payload}\n")
+ for opt in self.options:
+ s.write(f"option {opt.to_text()}\n")
+ for name, which in self._section_enum.__members__.items():
+ s.write(f";{name}\n")
+ for rrset in self.section_from_number(which):
+ s.write(rrset.to_text(origin, relativize, **kw))
+ s.write("\n")
+ if self.tsig is not None:
+ s.write(self.tsig.to_text(origin, relativize, **kw))
+ s.write("\n")
+ #
+ # We strip off the final \n so the caller can print the result without
+ # doing weird things to get around eccentricities in Python print
+ # formatting
+ #
+ return s.getvalue()[:-1]
+
+ def __eq__(self, other):
+ """Two messages are equal if they have the same content in the
+ header, question, answer, and authority sections.
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Message):
+ return False
+ if self.id != other.id:
+ return False
+ if self.flags != other.flags:
+ return False
+ for i, section in enumerate(self.sections):
+ other_section = other.sections[i]
+ for n in section:
+ if n not in other_section:
+ return False
+ for n in other_section:
+ if n not in section:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def is_response(self, other: "Message") -> bool:
+ """Is *other*, also a ``dns.message.Message``, a response to this
+ message?
+
+ Returns a ``bool``.
+ """
+
+ if (
+ other.flags & dns.flags.QR == 0
+ or self.id != other.id
+ or dns.opcode.from_flags(self.flags) != dns.opcode.from_flags(other.flags)
+ ):
+ return False
+ if other.rcode() in {
+ dns.rcode.FORMERR,
+ dns.rcode.SERVFAIL,
+ dns.rcode.NOTIMP,
+ dns.rcode.REFUSED,
+ }:
+ # We don't check the question section in these cases if
+ # the other question section is empty, even though they
+ # still really ought to have a question section.
+ if len(other.question) == 0:
+ return True
+ if dns.opcode.is_update(self.flags):
+ # This is assuming the "sender doesn't include anything
+ # from the update", but we don't care to check the other
+ # case, which is that all the sections are returned and
+ # identical.
+ return True
+ for n in self.question:
+ if n not in other.question:
+ return False
+ for n in other.question:
+ if n not in self.question:
+ return False
+ return True
+
+ def section_number(self, section: List[dns.rrset.RRset]) -> int:
+ """Return the "section number" of the specified section for use
+ in indexing.
+
+ *section* is one of the section attributes of this message.
+
+ Raises ``ValueError`` if the section isn't known.
+
+ Returns an ``int``.
+ """
+
+ for i, our_section in enumerate(self.sections):
+ if section is our_section:
+ return self._section_enum(i)
+ raise ValueError("unknown section")
+
+ def section_from_number(self, number: int) -> List[dns.rrset.RRset]:
+ """Return the section list associated with the specified section
+ number.
+
+ *number* is a section number `int` or the text form of a section
+ name.
+
+ Raises ``ValueError`` if the section isn't known.
+
+ Returns a ``list``.
+ """
+
+ section = self._section_enum.make(number)
+ return self.sections[section]
+
+ def find_rrset(
+ self,
+ section: SectionType,
+ name: dns.name.Name,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ deleting: dns.rdataclass.RdataClass | None = None,
+ create: bool = False,
+ force_unique: bool = False,
+ idna_codec: dns.name.IDNACodec | None = None,
+ ) -> dns.rrset.RRset:
+ """Find the RRset with the given attributes in the specified section.
+
+ *section*, an ``int`` section number, a ``str`` section name, or one of
+ the section attributes of this message. This specifies the
+ the section of the message to search. For example::
+
+ my_message.find_rrset(my_message.answer, name, rdclass, rdtype)
+ my_message.find_rrset(dns.message.ANSWER, name, rdclass, rdtype)
+ my_message.find_rrset("ANSWER", name, rdclass, rdtype)
+
+ *name*, a ``dns.name.Name`` or ``str``, the name of the RRset.
+
+ *rdclass*, an ``int`` or ``str``, the class of the RRset.
+
+ *rdtype*, an ``int`` or ``str``, the type of the RRset.
+
+ *covers*, an ``int`` or ``str``, the covers value of the RRset.
+ The default is ``dns.rdatatype.NONE``.
+
+ *deleting*, an ``int``, ``str``, or ``None``, the deleting value of the
+ RRset. The default is ``None``.
+
+ *create*, a ``bool``. If ``True``, create the RRset if it is not found.
+ The created RRset is appended to *section*.
+
+ *force_unique*, a ``bool``. If ``True`` and *create* is also ``True``,
+ create a new RRset regardless of whether a matching RRset exists
+ already. The default is ``False``. This is useful when creating
+ DDNS Update messages, as order matters for them.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ Raises ``KeyError`` if the RRset was not found and create was
+ ``False``.
+
+ Returns a ``dns.rrset.RRset object``.
+ """
+
+ if isinstance(section, int):
+ section_number = section
+ section = self.section_from_number(section_number)
+ elif isinstance(section, str):
+ section_number = self._section_enum.from_text(section)
+ section = self.section_from_number(section_number)
+ else:
+ section_number = self.section_number(section)
+ if isinstance(name, str):
+ name = dns.name.from_text(name, idna_codec=idna_codec)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ covers = dns.rdatatype.RdataType.make(covers)
+ if deleting is not None:
+ deleting = dns.rdataclass.RdataClass.make(deleting)
+ key = (section_number, name, rdclass, rdtype, covers, deleting)
+ if not force_unique:
+ if self.index is not None:
+ rrset = self.index.get(key)
+ if rrset is not None:
+ return rrset
+ else:
+ for rrset in section:
+ if rrset.full_match(name, rdclass, rdtype, covers, deleting):
+ return rrset
+ if not create:
+ raise KeyError
+ rrset = dns.rrset.RRset(name, rdclass, rdtype, covers, deleting)
+ section.append(rrset)
+ if self.index is not None:
+ self.index[key] = rrset
+ return rrset
+
+ def get_rrset(
+ self,
+ section: SectionType,
+ name: dns.name.Name,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ deleting: dns.rdataclass.RdataClass | None = None,
+ create: bool = False,
+ force_unique: bool = False,
+ idna_codec: dns.name.IDNACodec | None = None,
+ ) -> dns.rrset.RRset | None:
+ """Get the RRset with the given attributes in the specified section.
+
+ If the RRset is not found, None is returned.
+
+ *section*, an ``int`` section number, a ``str`` section name, or one of
+ the section attributes of this message. This specifies the
+ the section of the message to search. For example::
+
+ my_message.get_rrset(my_message.answer, name, rdclass, rdtype)
+ my_message.get_rrset(dns.message.ANSWER, name, rdclass, rdtype)
+ my_message.get_rrset("ANSWER", name, rdclass, rdtype)
+
+ *name*, a ``dns.name.Name`` or ``str``, the name of the RRset.
+
+ *rdclass*, an ``int`` or ``str``, the class of the RRset.
+
+ *rdtype*, an ``int`` or ``str``, the type of the RRset.
+
+ *covers*, an ``int`` or ``str``, the covers value of the RRset.
+ The default is ``dns.rdatatype.NONE``.
+
+ *deleting*, an ``int``, ``str``, or ``None``, the deleting value of the
+ RRset. The default is ``None``.
+
+ *create*, a ``bool``. If ``True``, create the RRset if it is not found.
+ The created RRset is appended to *section*.
+
+ *force_unique*, a ``bool``. If ``True`` and *create* is also ``True``,
+ create a new RRset regardless of whether a matching RRset exists
+ already. The default is ``False``. This is useful when creating
+ DDNS Update messages, as order matters for them.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ Returns a ``dns.rrset.RRset object`` or ``None``.
+ """
+
+ try:
+ rrset = self.find_rrset(
+ section,
+ name,
+ rdclass,
+ rdtype,
+ covers,
+ deleting,
+ create,
+ force_unique,
+ idna_codec,
+ )
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def section_count(self, section: SectionType) -> int:
+ """Returns the number of records in the specified section.
+
+ *section*, an ``int`` section number, a ``str`` section name, or one of
+ the section attributes of this message. This specifies the
+ the section of the message to count. For example::
+
+ my_message.section_count(my_message.answer)
+ my_message.section_count(dns.message.ANSWER)
+ my_message.section_count("ANSWER")
+ """
+
+ if isinstance(section, int):
+ section_number = section
+ section = self.section_from_number(section_number)
+ elif isinstance(section, str):
+ section_number = self._section_enum.from_text(section)
+ section = self.section_from_number(section_number)
+ else:
+ section_number = self.section_number(section)
+ count = sum(max(1, len(rrs)) for rrs in section)
+ if section_number == MessageSection.ADDITIONAL:
+ if self.opt is not None:
+ count += 1
+ if self.tsig is not None:
+ count += 1
+ return count
+
+ def _compute_opt_reserve(self) -> int:
+ """Compute the size required for the OPT RR, padding excluded"""
+ if not self.opt:
+ return 0
+ # 1 byte for the root name, 10 for the standard RR fields
+ size = 11
+ # This would be more efficient if options had a size() method, but we won't
+ # worry about that for now. We also don't worry if there is an existing padding
+ # option, as it is unlikely and probably harmless, as the worst case is that we
+ # may add another, and this seems to be legal.
+ opt_rdata = cast(dns.rdtypes.ANY.OPT.OPT, self.opt[0])
+ for option in opt_rdata.options:
+ wire = option.to_wire()
+ # We add 4 here to account for the option type and length
+ size += len(wire) + 4
+ if self.pad:
+ # Padding will be added, so again add the option type and length.
+ size += 4
+ return size
+
+ def _compute_tsig_reserve(self) -> int:
+ """Compute the size required for the TSIG RR"""
+ # This would be more efficient if TSIGs had a size method, but we won't
+ # worry about for now. Also, we can't really cope with the potential
+ # compressibility of the TSIG owner name, so we estimate with the uncompressed
+ # size. We will disable compression when TSIG and padding are both is active
+ # so that the padding comes out right.
+ if not self.tsig:
+ return 0
+ f = io.BytesIO()
+ self.tsig.to_wire(f)
+ return len(f.getvalue())
+
+ def to_wire(
+ self,
+ origin: dns.name.Name | None = None,
+ max_size: int = 0,
+ multi: bool = False,
+ tsig_ctx: Any | None = None,
+ prepend_length: bool = False,
+ prefer_truncation: bool = False,
+ **kw: Dict[str, Any],
+ ) -> bytes:
+ """Return a string containing the message in DNS compressed wire
+ format.
+
+ Additional keyword arguments are passed to the RRset ``to_wire()``
+ method.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin to be appended
+ to any relative names. If ``None``, and the message has an origin
+ attribute that is not ``None``, then it will be used.
+
+ *max_size*, an ``int``, the maximum size of the wire format
+ output; default is 0, which means "the message's request
+ payload, if nonzero, or 65535".
+
+ *multi*, a ``bool``, should be set to ``True`` if this message is
+ part of a multiple message sequence.
+
+ *tsig_ctx*, a ``dns.tsig.HMACTSig`` or ``dns.tsig.GSSTSig`` object, the
+ ongoing TSIG context, used when signing zone transfers.
+
+ *prepend_length*, a ``bool``, should be set to ``True`` if the caller
+ wants the message length prepended to the message itself. This is
+ useful for messages sent over TCP, TLS (DoT), or QUIC (DoQ).
+
+ *prefer_truncation*, a ``bool``, should be set to ``True`` if the caller
+ wants the message to be truncated if it would otherwise exceed the
+ maximum length. If the truncation occurs before the additional section,
+ the TC bit will be set.
+
+ Raises ``dns.exception.TooBig`` if *max_size* was exceeded.
+
+ Returns a ``bytes``.
+ """
+
+ if origin is None and self.origin is not None:
+ origin = self.origin
+ if max_size == 0:
+ if self.request_payload != 0:
+ max_size = self.request_payload
+ else:
+ max_size = 65535
+ if max_size < 512:
+ max_size = 512
+ elif max_size > 65535:
+ max_size = 65535
+ r = dns.renderer.Renderer(self.id, self.flags, max_size, origin)
+ opt_reserve = self._compute_opt_reserve()
+ r.reserve(opt_reserve)
+ tsig_reserve = self._compute_tsig_reserve()
+ r.reserve(tsig_reserve)
+ try:
+ for rrset in self.question:
+ r.add_question(rrset.name, rrset.rdtype, rrset.rdclass)
+ for rrset in self.answer:
+ r.add_rrset(dns.renderer.ANSWER, rrset, **kw)
+ for rrset in self.authority:
+ r.add_rrset(dns.renderer.AUTHORITY, rrset, **kw)
+ for rrset in self.additional:
+ r.add_rrset(dns.renderer.ADDITIONAL, rrset, **kw)
+ except dns.exception.TooBig:
+ if prefer_truncation:
+ if r.section < dns.renderer.ADDITIONAL:
+ r.flags |= dns.flags.TC
+ else:
+ raise
+ r.release_reserved()
+ if self.opt is not None:
+ r.add_opt(self.opt, self.pad, opt_reserve, tsig_reserve)
+ r.write_header()
+ if self.tsig is not None:
+ if self.want_tsig_sign:
+ (new_tsig, ctx) = dns.tsig.sign(
+ r.get_wire(),
+ self.keyring,
+ self.tsig[0],
+ int(time.time()),
+ self.request_mac,
+ tsig_ctx,
+ multi,
+ )
+ self.tsig.clear()
+ self.tsig.add(new_tsig)
+ if multi:
+ self.tsig_ctx = ctx
+ r.add_rrset(dns.renderer.ADDITIONAL, self.tsig)
+ r.write_header()
+ wire = r.get_wire()
+ self.wire = wire
+ if prepend_length:
+ wire = len(wire).to_bytes(2, "big") + wire
+ return wire
+
+ @staticmethod
+ def _make_tsig(
+ keyname, algorithm, time_signed, fudge, mac, original_id, error, other
+ ):
+ tsig = dns.rdtypes.ANY.TSIG.TSIG(
+ dns.rdataclass.ANY,
+ dns.rdatatype.TSIG,
+ algorithm,
+ time_signed,
+ fudge,
+ mac,
+ original_id,
+ error,
+ other,
+ )
+ return dns.rrset.from_rdata(keyname, 0, tsig)
+
+ def use_tsig(
+ self,
+ keyring: Any,
+ keyname: dns.name.Name | str | None = None,
+ fudge: int = 300,
+ original_id: int | None = None,
+ tsig_error: int = 0,
+ other_data: bytes = b"",
+ algorithm: dns.name.Name | str = dns.tsig.default_algorithm,
+ ) -> None:
+ """When sending, a TSIG signature using the specified key
+ should be added.
+
+ *keyring*, a ``dict``, ``callable`` or ``dns.tsig.Key``, is either
+ the TSIG keyring or key to use.
+
+ The format of a keyring dict is a mapping from TSIG key name, as
+ ``dns.name.Name`` to ``dns.tsig.Key`` or a TSIG secret, a ``bytes``.
+ If a ``dict`` *keyring* is specified but a *keyname* is not, the key
+ used will be the first key in the *keyring*. Note that the order of
+ keys in a dictionary is not defined, so applications should supply a
+ keyname when a ``dict`` keyring is used, unless they know the keyring
+ contains only one key. If a ``callable`` keyring is specified, the
+ callable will be called with the message and the keyname, and is
+ expected to return a key.
+
+ *keyname*, a ``dns.name.Name``, ``str`` or ``None``, the name of
+ this TSIG key to use; defaults to ``None``. If *keyring* is a
+ ``dict``, the key must be defined in it. If *keyring* is a
+ ``dns.tsig.Key``, this is ignored.
+
+ *fudge*, an ``int``, the TSIG time fudge.
+
+ *original_id*, an ``int``, the TSIG original id. If ``None``,
+ the message's id is used.
+
+ *tsig_error*, an ``int``, the TSIG error code.
+
+ *other_data*, a ``bytes``, the TSIG other data.
+
+ *algorithm*, a ``dns.name.Name`` or ``str``, the TSIG algorithm to use. This is
+ only used if *keyring* is a ``dict``, and the key entry is a ``bytes``.
+ """
+
+ if isinstance(keyring, dns.tsig.Key):
+ key = keyring
+ keyname = key.name
+ elif callable(keyring):
+ key = keyring(self, keyname)
+ else:
+ if isinstance(keyname, str):
+ keyname = dns.name.from_text(keyname)
+ if keyname is None:
+ keyname = next(iter(keyring))
+ key = keyring[keyname]
+ if isinstance(key, bytes):
+ key = dns.tsig.Key(keyname, key, algorithm)
+ self.keyring = key
+ if original_id is None:
+ original_id = self.id
+ self.tsig = self._make_tsig(
+ keyname,
+ self.keyring.algorithm,
+ 0,
+ fudge,
+ b"\x00" * dns.tsig.mac_sizes[self.keyring.algorithm],
+ original_id,
+ tsig_error,
+ other_data,
+ )
+ self.want_tsig_sign = True
+
+ @property
+ def keyname(self) -> dns.name.Name | None:
+ if self.tsig:
+ return self.tsig.name
+ else:
+ return None
+
+ @property
+ def keyalgorithm(self) -> dns.name.Name | None:
+ if self.tsig:
+ rdata = cast(dns.rdtypes.ANY.TSIG.TSIG, self.tsig[0])
+ return rdata.algorithm
+ else:
+ return None
+
+ @property
+ def mac(self) -> bytes | None:
+ if self.tsig:
+ rdata = cast(dns.rdtypes.ANY.TSIG.TSIG, self.tsig[0])
+ return rdata.mac
+ else:
+ return None
+
+ @property
+ def tsig_error(self) -> int | None:
+ if self.tsig:
+ rdata = cast(dns.rdtypes.ANY.TSIG.TSIG, self.tsig[0])
+ return rdata.error
+ else:
+ return None
+
+ @property
+ def had_tsig(self) -> bool:
+ return bool(self.tsig)
+
+ @staticmethod
+ def _make_opt(flags=0, payload=DEFAULT_EDNS_PAYLOAD, options=None):
+ opt = dns.rdtypes.ANY.OPT.OPT(payload, dns.rdatatype.OPT, options or ())
+ return dns.rrset.from_rdata(dns.name.root, int(flags), opt)
+
+ def use_edns(
+ self,
+ edns: int | bool | None = 0,
+ ednsflags: int = 0,
+ payload: int = DEFAULT_EDNS_PAYLOAD,
+ request_payload: int | None = None,
+ options: List[dns.edns.Option] | None = None,
+ pad: int = 0,
+ ) -> None:
+ """Configure EDNS behavior.
+
+ *edns*, an ``int``, is the EDNS level to use. Specifying ``None``, ``False``,
+ or ``-1`` means "do not use EDNS", and in this case the other parameters are
+ ignored. Specifying ``True`` is equivalent to specifying 0, i.e. "use EDNS0".
+
+ *ednsflags*, an ``int``, the EDNS flag values.
+
+ *payload*, an ``int``, is the EDNS sender's payload field, which is the maximum
+ size of UDP datagram the sender can handle. I.e. how big a response to this
+ message can be.
+
+ *request_payload*, an ``int``, is the EDNS payload size to use when sending this
+ message. If not specified, defaults to the value of *payload*.
+
+ *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS options.
+
+ *pad*, a non-negative ``int``. If 0, the default, do not pad; otherwise add
+ padding bytes to make the message size a multiple of *pad*. Note that if
+ padding is non-zero, an EDNS PADDING option will always be added to the
+ message.
+ """
+
+ if edns is None or edns is False:
+ edns = -1
+ elif edns is True:
+ edns = 0
+ if edns < 0:
+ self.opt = None
+ self.request_payload = 0
+ else:
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= 0xFF00FFFF
+ ednsflags |= edns << 16
+ if options is None:
+ options = []
+ self.opt = self._make_opt(ednsflags, payload, options)
+ if request_payload is None:
+ request_payload = payload
+ self.request_payload = request_payload
+ if pad < 0:
+ raise ValueError("pad must be non-negative")
+ self.pad = pad
+
+ @property
+ def edns(self) -> int:
+ if self.opt:
+ return (self.ednsflags & 0xFF0000) >> 16
+ else:
+ return -1
+
+ @property
+ def ednsflags(self) -> int:
+ if self.opt:
+ return self.opt.ttl
+ else:
+ return 0
+
+ @ednsflags.setter
+ def ednsflags(self, v):
+ if self.opt:
+ self.opt.ttl = v
+ elif v:
+ self.opt = self._make_opt(v)
+
+ @property
+ def payload(self) -> int:
+ if self.opt:
+ rdata = cast(dns.rdtypes.ANY.OPT.OPT, self.opt[0])
+ return rdata.payload
+ else:
+ return 0
+
+ @property
+ def options(self) -> Tuple:
+ if self.opt:
+ rdata = cast(dns.rdtypes.ANY.OPT.OPT, self.opt[0])
+ return rdata.options
+ else:
+ return ()
+
+ def want_dnssec(self, wanted: bool = True) -> None:
+ """Enable or disable 'DNSSEC desired' flag in requests.
+
+ *wanted*, a ``bool``. If ``True``, then DNSSEC data is
+ desired in the response, EDNS is enabled if required, and then
+ the DO bit is set. If ``False``, the DO bit is cleared if
+ EDNS is enabled.
+ """
+
+ if wanted:
+ self.ednsflags |= dns.flags.DO
+ elif self.opt:
+ self.ednsflags &= ~int(dns.flags.DO)
+
+ def rcode(self) -> dns.rcode.Rcode:
+ """Return the rcode.
+
+ Returns a ``dns.rcode.Rcode``.
+ """
+ return dns.rcode.from_flags(int(self.flags), int(self.ednsflags))
+
+ def set_rcode(self, rcode: dns.rcode.Rcode) -> None:
+ """Set the rcode.
+
+ *rcode*, a ``dns.rcode.Rcode``, is the rcode to set.
+ """
+ (value, evalue) = dns.rcode.to_flags(rcode)
+ self.flags &= 0xFFF0
+ self.flags |= value
+ self.ednsflags &= 0x00FFFFFF
+ self.ednsflags |= evalue
+
+ def opcode(self) -> dns.opcode.Opcode:
+ """Return the opcode.
+
+ Returns a ``dns.opcode.Opcode``.
+ """
+ return dns.opcode.from_flags(int(self.flags))
+
+ def set_opcode(self, opcode: dns.opcode.Opcode) -> None:
+ """Set the opcode.
+
+ *opcode*, a ``dns.opcode.Opcode``, is the opcode to set.
+ """
+ self.flags &= 0x87FF
+ self.flags |= dns.opcode.to_flags(opcode)
+
+ def get_options(self, otype: dns.edns.OptionType) -> List[dns.edns.Option]:
+ """Return the list of options of the specified type."""
+ return [option for option in self.options if option.otype == otype]
+
+ def extended_errors(self) -> List[dns.edns.EDEOption]:
+ """Return the list of Extended DNS Error (EDE) options in the message"""
+ return cast(List[dns.edns.EDEOption], self.get_options(dns.edns.OptionType.EDE))
+
+ def _get_one_rr_per_rrset(self, value):
+ # What the caller picked is fine.
+ return value
+
+ # pylint: disable=unused-argument
+
+ def _parse_rr_header(self, section, name, rdclass, rdtype):
+ return (rdclass, rdtype, None, False)
+
+ # pylint: enable=unused-argument
+
+ def _parse_special_rr_header(self, section, count, position, name, rdclass, rdtype):
+ if rdtype == dns.rdatatype.OPT:
+ if (
+ section != MessageSection.ADDITIONAL
+ or self.opt
+ or name != dns.name.root
+ ):
+ raise BadEDNS
+ elif rdtype == dns.rdatatype.TSIG:
+ if (
+ section != MessageSection.ADDITIONAL
+ or rdclass != dns.rdatatype.ANY
+ or position != count - 1
+ ):
+ raise BadTSIG
+ return (rdclass, rdtype, None, False)
+
+
+class ChainingResult:
+ """The result of a call to dns.message.QueryMessage.resolve_chaining().
+
+ The ``answer`` attribute is the answer RRSet, or ``None`` if it doesn't
+ exist.
+
+ The ``canonical_name`` attribute is the canonical name after all
+ chaining has been applied (this is the same name as ``rrset.name`` in cases
+ where rrset is not ``None``).
+
+ The ``minimum_ttl`` attribute is the minimum TTL, i.e. the TTL to
+ use if caching the data. It is the smallest of all the CNAME TTLs
+ and either the answer TTL if it exists or the SOA TTL and SOA
+ minimum values for negative answers.
+
+ The ``cnames`` attribute is a list of all the CNAME RRSets followed to
+ get to the canonical name.
+ """
+
+ def __init__(
+ self,
+ canonical_name: dns.name.Name,
+ answer: dns.rrset.RRset | None,
+ minimum_ttl: int,
+ cnames: List[dns.rrset.RRset],
+ ):
+ self.canonical_name = canonical_name
+ self.answer = answer
+ self.minimum_ttl = minimum_ttl
+ self.cnames = cnames
+
+
+class QueryMessage(Message):
+ def resolve_chaining(self) -> ChainingResult:
+ """Follow the CNAME chain in the response to determine the answer
+ RRset.
+
+ Raises ``dns.message.NotQueryResponse`` if the message is not
+ a response.
+
+ Raises ``dns.message.ChainTooLong`` if the CNAME chain is too long.
+
+ Raises ``dns.message.AnswerForNXDOMAIN`` if the rcode is NXDOMAIN
+ but an answer was found.
+
+ Raises ``dns.exception.FormError`` if the question count is not 1.
+
+ Returns a ChainingResult object.
+ """
+ if self.flags & dns.flags.QR == 0:
+ raise NotQueryResponse
+ if len(self.question) != 1:
+ raise dns.exception.FormError
+ question = self.question[0]
+ qname = question.name
+ min_ttl = dns.ttl.MAX_TTL
+ answer = None
+ count = 0
+ cnames = []
+ while count < MAX_CHAIN:
+ try:
+ answer = self.find_rrset(
+ self.answer, qname, question.rdclass, question.rdtype
+ )
+ min_ttl = min(min_ttl, answer.ttl)
+ break
+ except KeyError:
+ if question.rdtype != dns.rdatatype.CNAME:
+ try:
+ crrset = self.find_rrset(
+ self.answer, qname, question.rdclass, dns.rdatatype.CNAME
+ )
+ cnames.append(crrset)
+ min_ttl = min(min_ttl, crrset.ttl)
+ for rd in crrset:
+ qname = rd.target
+ break
+ count += 1
+ continue
+ except KeyError:
+ # Exit the chaining loop
+ break
+ else:
+ # Exit the chaining loop
+ break
+ if count >= MAX_CHAIN:
+ raise ChainTooLong
+ if self.rcode() == dns.rcode.NXDOMAIN and answer is not None:
+ raise AnswerForNXDOMAIN
+ if answer is None:
+ # Further minimize the TTL with NCACHE.
+ auname = qname
+ while True:
+ # Look for an SOA RR whose owner name is a superdomain
+ # of qname.
+ try:
+ srrset = self.find_rrset(
+ self.authority, auname, question.rdclass, dns.rdatatype.SOA
+ )
+ srdata = cast(dns.rdtypes.ANY.SOA.SOA, srrset[0])
+ min_ttl = min(min_ttl, srrset.ttl, srdata.minimum)
+ break
+ except KeyError:
+ try:
+ auname = auname.parent()
+ except dns.name.NoParent:
+ break
+ return ChainingResult(qname, answer, min_ttl, cnames)
+
+ def canonical_name(self) -> dns.name.Name:
+ """Return the canonical name of the first name in the question
+ section.
+
+ Raises ``dns.message.NotQueryResponse`` if the message is not
+ a response.
+
+ Raises ``dns.message.ChainTooLong`` if the CNAME chain is too long.
+
+ Raises ``dns.message.AnswerForNXDOMAIN`` if the rcode is NXDOMAIN
+ but an answer was found.
+
+ Raises ``dns.exception.FormError`` if the question count is not 1.
+ """
+ return self.resolve_chaining().canonical_name
+
+
+def _maybe_import_update():
+ # We avoid circular imports by doing this here. We do it in another
+ # function as doing it in _message_factory_from_opcode() makes "dns"
+ # a local symbol, and the first line fails :)
+
+ # pylint: disable=redefined-outer-name,import-outside-toplevel,unused-import
+ import dns.update # noqa: F401
+
+
+def _message_factory_from_opcode(opcode):
+ if opcode == dns.opcode.QUERY:
+ return QueryMessage
+ elif opcode == dns.opcode.UPDATE:
+ _maybe_import_update()
+ return dns.update.UpdateMessage # pyright: ignore
+ else:
+ return Message
+
+
+class _WireReader:
+ """Wire format reader.
+
+ parser: the binary parser
+ message: The message object being built
+ initialize_message: Callback to set message parsing options
+ question_only: Are we only reading the question?
+ one_rr_per_rrset: Put each RR into its own RRset?
+ keyring: TSIG keyring
+ ignore_trailing: Ignore trailing junk at end of request?
+ multi: Is this message part of a multi-message sequence?
+ DNS dynamic updates.
+ continue_on_error: try to extract as much information as possible from
+ the message, accumulating MessageErrors in the *errors* attribute instead of
+ raising them.
+ """
+
+ def __init__(
+ self,
+ wire,
+ initialize_message,
+ question_only=False,
+ one_rr_per_rrset=False,
+ ignore_trailing=False,
+ keyring=None,
+ multi=False,
+ continue_on_error=False,
+ ):
+ self.parser = dns.wire.Parser(wire)
+ self.message = None
+ self.initialize_message = initialize_message
+ self.question_only = question_only
+ self.one_rr_per_rrset = one_rr_per_rrset
+ self.ignore_trailing = ignore_trailing
+ self.keyring = keyring
+ self.multi = multi
+ self.continue_on_error = continue_on_error
+ self.errors = []
+
+ def _get_question(self, section_number, qcount):
+ """Read the next *qcount* records from the wire data and add them to
+ the question section.
+ """
+ assert self.message is not None
+ section = self.message.sections[section_number]
+ for _ in range(qcount):
+ qname = self.parser.get_name(self.message.origin)
+ (rdtype, rdclass) = self.parser.get_struct("!HH")
+ (rdclass, rdtype, _, _) = self.message._parse_rr_header(
+ section_number, qname, rdclass, rdtype
+ )
+ self.message.find_rrset(
+ section, qname, rdclass, rdtype, create=True, force_unique=True
+ )
+
+ def _add_error(self, e):
+ self.errors.append(MessageError(e, self.parser.current))
+
+ def _get_section(self, section_number, count):
+ """Read the next I{count} records from the wire data and add them to
+ the specified section.
+
+ section_number: the section of the message to which to add records
+ count: the number of records to read
+ """
+ assert self.message is not None
+ section = self.message.sections[section_number]
+ force_unique = self.one_rr_per_rrset
+ for i in range(count):
+ rr_start = self.parser.current
+ absolute_name = self.parser.get_name()
+ if self.message.origin is not None:
+ name = absolute_name.relativize(self.message.origin)
+ else:
+ name = absolute_name
+ (rdtype, rdclass, ttl, rdlen) = self.parser.get_struct("!HHIH")
+ if rdtype in (dns.rdatatype.OPT, dns.rdatatype.TSIG):
+ (
+ rdclass,
+ rdtype,
+ deleting,
+ empty,
+ ) = self.message._parse_special_rr_header(
+ section_number, count, i, name, rdclass, rdtype
+ )
+ else:
+ (rdclass, rdtype, deleting, empty) = self.message._parse_rr_header(
+ section_number, name, rdclass, rdtype
+ )
+ rdata_start = self.parser.current
+ try:
+ if empty:
+ if rdlen > 0:
+ raise dns.exception.FormError
+ rd = None
+ covers = dns.rdatatype.NONE
+ else:
+ with self.parser.restrict_to(rdlen):
+ rd = dns.rdata.from_wire_parser(
+ rdclass, # pyright: ignore
+ rdtype,
+ self.parser,
+ self.message.origin,
+ )
+ covers = rd.covers()
+ if self.message.xfr and rdtype == dns.rdatatype.SOA:
+ force_unique = True
+ if rdtype == dns.rdatatype.OPT:
+ self.message.opt = dns.rrset.from_rdata(name, ttl, rd)
+ elif rdtype == dns.rdatatype.TSIG:
+ trd = cast(dns.rdtypes.ANY.TSIG.TSIG, rd)
+ if self.keyring is None or self.keyring is True:
+ raise UnknownTSIGKey("got signed message without keyring")
+ elif isinstance(self.keyring, dict):
+ key = self.keyring.get(absolute_name)
+ if isinstance(key, bytes):
+ key = dns.tsig.Key(absolute_name, key, trd.algorithm)
+ elif callable(self.keyring):
+ key = self.keyring(self.message, absolute_name)
+ else:
+ key = self.keyring
+ if key is None:
+ raise UnknownTSIGKey(f"key '{name}' unknown")
+ if key:
+ self.message.keyring = key
+ self.message.tsig_ctx = dns.tsig.validate(
+ self.parser.wire,
+ key,
+ absolute_name,
+ rd,
+ int(time.time()),
+ self.message.request_mac,
+ rr_start,
+ self.message.tsig_ctx,
+ self.multi,
+ )
+ self.message.tsig = dns.rrset.from_rdata(absolute_name, 0, rd)
+ else:
+ rrset = self.message.find_rrset(
+ section,
+ name,
+ rdclass, # pyright: ignore
+ rdtype,
+ covers,
+ deleting,
+ True,
+ force_unique,
+ )
+ if rd is not None:
+ if ttl > 0x7FFFFFFF:
+ ttl = 0
+ rrset.add(rd, ttl)
+ except Exception as e:
+ if self.continue_on_error:
+ self._add_error(e)
+ self.parser.seek(rdata_start + rdlen)
+ else:
+ raise
+
+ def read(self):
+ """Read a wire format DNS message and build a dns.message.Message
+ object."""
+
+ if self.parser.remaining() < 12:
+ raise ShortHeader
+ (id, flags, qcount, ancount, aucount, adcount) = self.parser.get_struct(
+ "!HHHHHH"
+ )
+ factory = _message_factory_from_opcode(dns.opcode.from_flags(flags))
+ self.message = factory(id=id)
+ self.message.flags = dns.flags.Flag(flags)
+ self.message.wire = self.parser.wire
+ self.initialize_message(self.message)
+ self.one_rr_per_rrset = self.message._get_one_rr_per_rrset(
+ self.one_rr_per_rrset
+ )
+ try:
+ self._get_question(MessageSection.QUESTION, qcount)
+ if self.question_only:
+ return self.message
+ self._get_section(MessageSection.ANSWER, ancount)
+ self._get_section(MessageSection.AUTHORITY, aucount)
+ self._get_section(MessageSection.ADDITIONAL, adcount)
+ if not self.ignore_trailing and self.parser.remaining() != 0:
+ raise TrailingJunk
+ if self.multi and self.message.tsig_ctx and not self.message.had_tsig:
+ self.message.tsig_ctx.update(self.parser.wire)
+ except Exception as e:
+ if self.continue_on_error:
+ self._add_error(e)
+ else:
+ raise
+ return self.message
+
+
+def from_wire(
+ wire: bytes,
+ keyring: Any | None = None,
+ request_mac: bytes | None = b"",
+ xfr: bool = False,
+ origin: dns.name.Name | None = None,
+ tsig_ctx: dns.tsig.HMACTSig | dns.tsig.GSSTSig | None = None,
+ multi: bool = False,
+ question_only: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ raise_on_truncation: bool = False,
+ continue_on_error: bool = False,
+) -> Message:
+ """Convert a DNS wire format message into a message object.
+
+ *keyring*, a ``dns.tsig.Key``, ``dict``, ``bool``, or ``None``, the key or keyring
+ to use if the message is signed. If ``None`` or ``True``, then trying to decode
+ a message with a TSIG will fail as it cannot be validated. If ``False``, then
+ TSIG validation is disabled.
+
+ *request_mac*, a ``bytes`` or ``None``. If the message is a response to a
+ TSIG-signed request, *request_mac* should be set to the MAC of that request.
+
+ *xfr*, a ``bool``, should be set to ``True`` if this message is part of a zone
+ transfer.
+
+ *origin*, a ``dns.name.Name`` or ``None``. If the message is part of a zone
+ transfer, *origin* should be the origin name of the zone. If not ``None``, names
+ will be relativized to the origin.
+
+ *tsig_ctx*, a ``dns.tsig.HMACTSig`` or ``dns.tsig.GSSTSig`` object, the ongoing TSIG
+ context, used when validating zone transfers.
+
+ *multi*, a ``bool``, should be set to ``True`` if this message is part of a multiple
+ message sequence.
+
+ *question_only*, a ``bool``. If ``True``, read only up to the end of the question
+ section.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the
+ message.
+
+ *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if the TC bit is
+ set.
+
+ *continue_on_error*, a ``bool``. If ``True``, try to continue parsing even if
+ errors occur. Erroneous rdata will be ignored. Errors will be accumulated as a
+ list of MessageError objects in the message's ``errors`` attribute. This option is
+ recommended only for DNS analysis tools, or for use in a server as part of an error
+ handling path. The default is ``False``.
+
+ Raises ``dns.message.ShortHeader`` if the message is less than 12 octets long.
+
+ Raises ``dns.message.TrailingJunk`` if there were octets in the message past the end
+ of the proper DNS message, and *ignore_trailing* is ``False``.
+
+ Raises ``dns.message.BadEDNS`` if an OPT record was in the wrong section, or
+ occurred more than once.
+
+ Raises ``dns.message.BadTSIG`` if a TSIG record was not the last record of the
+ additional data section.
+
+ Raises ``dns.message.Truncated`` if the TC flag is set and *raise_on_truncation* is
+ ``True``.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ # We permit None for request_mac solely for backwards compatibility
+ if request_mac is None:
+ request_mac = b""
+
+ def initialize_message(message):
+ message.request_mac = request_mac
+ message.xfr = xfr
+ message.origin = origin
+ message.tsig_ctx = tsig_ctx
+
+ reader = _WireReader(
+ wire,
+ initialize_message,
+ question_only,
+ one_rr_per_rrset,
+ ignore_trailing,
+ keyring,
+ multi,
+ continue_on_error,
+ )
+ try:
+ m = reader.read()
+ except dns.exception.FormError:
+ if (
+ reader.message
+ and (reader.message.flags & dns.flags.TC)
+ and raise_on_truncation
+ ):
+ raise Truncated(message=reader.message)
+ else:
+ raise
+ # Reading a truncated message might not have any errors, so we
+ # have to do this check here too.
+ if m.flags & dns.flags.TC and raise_on_truncation:
+ raise Truncated(message=m)
+ if continue_on_error:
+ m.errors = reader.errors
+
+ return m
+
+
+class _TextReader:
+ """Text format reader.
+
+ tok: the tokenizer.
+ message: The message object being built.
+ DNS dynamic updates.
+ last_name: The most recently read name when building a message object.
+ one_rr_per_rrset: Put each RR into its own RRset?
+ origin: The origin for relative names
+ relativize: relativize names?
+ relativize_to: the origin to relativize to.
+ """
+
+ def __init__(
+ self,
+ text: str,
+ idna_codec: dns.name.IDNACodec | None,
+ one_rr_per_rrset: bool = False,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+ ):
+ self.message: Message | None = None # mypy: ignore
+ self.tok = dns.tokenizer.Tokenizer(text, idna_codec=idna_codec)
+ self.last_name = None
+ self.one_rr_per_rrset = one_rr_per_rrset
+ self.origin = origin
+ self.relativize = relativize
+ self.relativize_to = relativize_to
+ self.id = None
+ self.edns = -1
+ self.ednsflags = 0
+ self.payload = DEFAULT_EDNS_PAYLOAD
+ self.rcode = None
+ self.opcode = dns.opcode.QUERY
+ self.flags = 0
+
+ def _header_line(self, _):
+ """Process one line from the text format header section."""
+
+ token = self.tok.get()
+ what = token.value
+ if what == "id":
+ self.id = self.tok.get_int()
+ elif what == "flags":
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.flags = self.flags | dns.flags.from_text(token.value)
+ elif what == "edns":
+ self.edns = self.tok.get_int()
+ self.ednsflags = self.ednsflags | (self.edns << 16)
+ elif what == "eflags":
+ if self.edns < 0:
+ self.edns = 0
+ while True:
+ token = self.tok.get()
+ if not token.is_identifier():
+ self.tok.unget(token)
+ break
+ self.ednsflags = self.ednsflags | dns.flags.edns_from_text(token.value)
+ elif what == "payload":
+ self.payload = self.tok.get_int()
+ if self.edns < 0:
+ self.edns = 0
+ elif what == "opcode":
+ text = self.tok.get_string()
+ self.opcode = dns.opcode.from_text(text)
+ self.flags = self.flags | dns.opcode.to_flags(self.opcode)
+ elif what == "rcode":
+ text = self.tok.get_string()
+ self.rcode = dns.rcode.from_text(text)
+ else:
+ raise UnknownHeaderField
+ self.tok.get_eol()
+
+ def _question_line(self, section_number):
+ """Process one line from the text format question section."""
+
+ assert self.message is not None
+ section = self.message.sections[section_number]
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = self.tok.as_name(
+ token, self.message.origin, self.relativize, self.relativize_to
+ )
+ name = self.last_name
+ if name is None:
+ raise NoPreviousName
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ (rdclass, rdtype, _, _) = self.message._parse_rr_header(
+ section_number, name, rdclass, rdtype
+ )
+ self.message.find_rrset(
+ section, name, rdclass, rdtype, create=True, force_unique=True
+ )
+ self.tok.get_eol()
+
+ def _rr_line(self, section_number):
+ """Process one line from the text format answer, authority, or
+ additional data sections.
+ """
+
+ assert self.message is not None
+ section = self.message.sections[section_number]
+ # Name
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = self.tok.as_name(
+ token, self.message.origin, self.relativize, self.relativize_to
+ )
+ name = self.last_name
+ if name is None:
+ raise NoPreviousName
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ # TTL
+ try:
+ ttl = int(token.value, 0)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ ttl = 0
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = dns.rdataclass.IN
+ # Type
+ rdtype = dns.rdatatype.from_text(token.value)
+ (rdclass, rdtype, deleting, empty) = self.message._parse_rr_header(
+ section_number, name, rdclass, rdtype
+ )
+ token = self.tok.get()
+ if empty and not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError
+ if not empty and token.is_eol_or_eof():
+ raise dns.exception.UnexpectedEnd
+ if not token.is_eol_or_eof():
+ self.tok.unget(token)
+ rd = dns.rdata.from_text(
+ rdclass,
+ rdtype,
+ self.tok,
+ self.message.origin,
+ self.relativize,
+ self.relativize_to,
+ )
+ covers = rd.covers()
+ else:
+ rd = None
+ covers = dns.rdatatype.NONE
+ rrset = self.message.find_rrset(
+ section,
+ name,
+ rdclass,
+ rdtype,
+ covers,
+ deleting,
+ True,
+ self.one_rr_per_rrset,
+ )
+ if rd is not None:
+ rrset.add(rd, ttl)
+
+ def _make_message(self):
+ factory = _message_factory_from_opcode(self.opcode)
+ message = factory(id=self.id)
+ message.flags = self.flags
+ if self.edns >= 0:
+ message.use_edns(self.edns, self.ednsflags, self.payload)
+ if self.rcode:
+ message.set_rcode(self.rcode)
+ if self.origin:
+ message.origin = self.origin
+ return message
+
+ def read(self):
+ """Read a text format DNS message and build a dns.message.Message
+ object."""
+
+ line_method = self._header_line
+ section_number = None
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eol_or_eof():
+ break
+ if token.is_comment():
+ u = token.value.upper()
+ if u == "HEADER":
+ line_method = self._header_line
+
+ if self.message:
+ message = self.message
+ else:
+ # If we don't have a message, create one with the current
+ # opcode, so that we know which section names to parse.
+ message = self._make_message()
+ try:
+ section_number = message._section_enum.from_text(u)
+ # We found a section name. If we don't have a message,
+ # use the one we just created.
+ if not self.message:
+ self.message = message
+ self.one_rr_per_rrset = message._get_one_rr_per_rrset(
+ self.one_rr_per_rrset
+ )
+ if section_number == MessageSection.QUESTION:
+ line_method = self._question_line
+ else:
+ line_method = self._rr_line
+ except Exception:
+ # It's just a comment.
+ pass
+ self.tok.get_eol()
+ continue
+ self.tok.unget(token)
+ line_method(section_number)
+ if not self.message:
+ self.message = self._make_message()
+ return self.message
+
+
+def from_text(
+ text: str,
+ idna_codec: dns.name.IDNACodec | None = None,
+ one_rr_per_rrset: bool = False,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+) -> Message:
+ """Convert the text format message into a message object.
+
+ The reader stops after reading the first blank line in the input to
+ facilitate reading multiple messages from a single file with
+ ``dns.message.from_file()``.
+
+ *text*, a ``str``, the text format message.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, then each RR is put
+ into its own rrset. The default is ``False``.
+
+ *origin*, a ``dns.name.Name`` (or ``None``), the
+ origin to use for relative names.
+
+ *relativize*, a ``bool``. If true, name will be relativized.
+
+ *relativize_to*, a ``dns.name.Name`` (or ``None``), the origin to use
+ when relativizing names. If not set, the *origin* value will be used.
+
+ Raises ``dns.message.UnknownHeaderField`` if a header is unknown.
+
+ Raises ``dns.exception.SyntaxError`` if the text is badly formed.
+
+ Returns a ``dns.message.Message object``
+ """
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ reader = _TextReader(
+ text, idna_codec, one_rr_per_rrset, origin, relativize, relativize_to
+ )
+ return reader.read()
+
+
+def from_file(
+ f: Any,
+ idna_codec: dns.name.IDNACodec | None = None,
+ one_rr_per_rrset: bool = False,
+) -> Message:
+ """Read the next text format message from the specified file.
+
+ Message blocks are separated by a single blank line.
+
+ *f*, a ``file`` or ``str``. If *f* is text, it is treated as the
+ pathname of a file to open.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, then each RR is put
+ into its own rrset. The default is ``False``.
+
+ Raises ``dns.message.UnknownHeaderField`` if a header is unknown.
+
+ Raises ``dns.exception.SyntaxError`` if the text is badly formed.
+
+ Returns a ``dns.message.Message object``
+ """
+
+ if isinstance(f, str):
+ cm: contextlib.AbstractContextManager = open(f, encoding="utf-8")
+ else:
+ cm = contextlib.nullcontext(f)
+ with cm as f:
+ return from_text(f, idna_codec, one_rr_per_rrset)
+ assert False # for mypy lgtm[py/unreachable-statement]
+
+
+def make_query(
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ use_edns: int | bool | None = None,
+ want_dnssec: bool = False,
+ ednsflags: int | None = None,
+ payload: int | None = None,
+ request_payload: int | None = None,
+ options: List[dns.edns.Option] | None = None,
+ idna_codec: dns.name.IDNACodec | None = None,
+ id: int | None = None,
+ flags: int = dns.flags.RD,
+ pad: int = 0,
+) -> QueryMessage:
+ """Make a query message.
+
+ The query name, type, and class may all be specified either
+ as objects of the appropriate type, or as strings.
+
+ The query will have a randomly chosen query id, and its DNS flags
+ will be set to dns.flags.RD.
+
+ qname, a ``dns.name.Name`` or ``str``, the query name.
+
+ *rdtype*, an ``int`` or ``str``, the desired rdata type.
+
+ *rdclass*, an ``int`` or ``str``, the desired rdata class; the default
+ is class IN.
+
+ *use_edns*, an ``int``, ``bool`` or ``None``. The EDNS level to use; the
+ default is ``None``. If ``None``, EDNS will be enabled only if other
+ parameters (*ednsflags*, *payload*, *request_payload*, or *options*) are
+ set.
+ See the description of dns.message.Message.use_edns() for the possible
+ values for use_edns and their meanings.
+
+ *want_dnssec*, a ``bool``. If ``True``, DNSSEC data is desired.
+
+ *ednsflags*, an ``int``, the EDNS flag values.
+
+ *payload*, an ``int``, is the EDNS sender's payload field, which is the
+ maximum size of UDP datagram the sender can handle. I.e. how big
+ a response to this message can be.
+
+ *request_payload*, an ``int``, is the EDNS payload size to use when
+ sending this message. If not specified, defaults to the value of
+ *payload*.
+
+ *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS
+ options.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ *id*, an ``int`` or ``None``, the desired query id. The default is
+ ``None``, which generates a random query id.
+
+ *flags*, an ``int``, the desired query flags. The default is
+ ``dns.flags.RD``.
+
+ *pad*, a non-negative ``int``. If 0, the default, do not pad; otherwise add
+ padding bytes to make the message size a multiple of *pad*. Note that if
+ padding is non-zero, an EDNS PADDING option will always be added to the
+ message.
+
+ Returns a ``dns.message.QueryMessage``
+ """
+
+ if isinstance(qname, str):
+ qname = dns.name.from_text(qname, idna_codec=idna_codec)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ m = QueryMessage(id=id)
+ m.flags = dns.flags.Flag(flags)
+ m.find_rrset(m.question, qname, rdclass, rdtype, create=True, force_unique=True)
+ # only pass keywords on to use_edns if they have been set to a
+ # non-None value. Setting a field will turn EDNS on if it hasn't
+ # been configured.
+ kwargs: Dict[str, Any] = {}
+ if ednsflags is not None:
+ kwargs["ednsflags"] = ednsflags
+ if payload is not None:
+ kwargs["payload"] = payload
+ if request_payload is not None:
+ kwargs["request_payload"] = request_payload
+ if options is not None:
+ kwargs["options"] = options
+ if kwargs and use_edns is None:
+ use_edns = 0
+ kwargs["edns"] = use_edns
+ kwargs["pad"] = pad
+ m.use_edns(**kwargs)
+ if want_dnssec:
+ m.want_dnssec(want_dnssec)
+ return m
+
+
+class CopyMode(enum.Enum):
+ """
+ How should sections be copied when making an update response?
+ """
+
+ NOTHING = 0
+ QUESTION = 1
+ EVERYTHING = 2
+
+
+def make_response(
+ query: Message,
+ recursion_available: bool = False,
+ our_payload: int = 8192,
+ fudge: int = 300,
+ tsig_error: int = 0,
+ pad: int | None = None,
+ copy_mode: CopyMode | None = None,
+) -> Message:
+ """Make a message which is a response for the specified query.
+ The message returned is really a response skeleton; it has all of the infrastructure
+ required of a response, but none of the content.
+
+ Response section(s) which are copied are shallow copies of the matching section(s)
+ in the query, so the query's RRsets should not be changed.
+
+ *query*, a ``dns.message.Message``, the query to respond to.
+
+ *recursion_available*, a ``bool``, should RA be set in the response?
+
+ *our_payload*, an ``int``, the payload size to advertise in EDNS responses.
+
+ *fudge*, an ``int``, the TSIG time fudge.
+
+ *tsig_error*, an ``int``, the TSIG error.
+
+ *pad*, a non-negative ``int`` or ``None``. If 0, the default, do not pad; otherwise
+ if not ``None`` add padding bytes to make the message size a multiple of *pad*. Note
+ that if padding is non-zero, an EDNS PADDING option will always be added to the
+ message. If ``None``, add padding following RFC 8467, namely if the request is
+ padded, pad the response to 468 otherwise do not pad.
+
+ *copy_mode*, a ``dns.message.CopyMode`` or ``None``, determines how sections are
+ copied. The default, ``None`` copies sections according to the default for the
+ message's opcode, which is currently ``dns.message.CopyMode.QUESTION`` for all
+ opcodes. ``dns.message.CopyMode.QUESTION`` copies only the question section.
+ ``dns.message.CopyMode.EVERYTHING`` copies all sections other than OPT or TSIG
+ records, which are created appropriately if needed. ``dns.message.CopyMode.NOTHING``
+ copies no sections; note that this mode is for server testing purposes and is
+ otherwise not recommended for use. In particular, ``dns.message.is_response()``
+ will be ``False`` if you create a response this way and the rcode is not
+ ``FORMERR``, ``SERVFAIL``, ``NOTIMP``, or ``REFUSED``.
+
+ Returns a ``dns.message.Message`` object whose specific class is appropriate for the
+ query. For example, if query is a ``dns.update.UpdateMessage``, the response will
+ be one too.
+ """
+
+ if query.flags & dns.flags.QR:
+ raise dns.exception.FormError("specified query message is not a query")
+ opcode = query.opcode()
+ factory = _message_factory_from_opcode(opcode)
+ response = factory(id=query.id)
+ response.flags = dns.flags.QR | (query.flags & dns.flags.RD)
+ if recursion_available:
+ response.flags |= dns.flags.RA
+ response.set_opcode(opcode)
+ if copy_mode is None:
+ copy_mode = CopyMode.QUESTION
+ if copy_mode != CopyMode.NOTHING:
+ response.question = list(query.question)
+ if copy_mode == CopyMode.EVERYTHING:
+ response.answer = list(query.answer)
+ response.authority = list(query.authority)
+ response.additional = list(query.additional)
+ if query.edns >= 0:
+ if pad is None:
+ # Set response padding per RFC 8467
+ pad = 0
+ for option in query.options:
+ if option.otype == dns.edns.OptionType.PADDING:
+ pad = 468
+ response.use_edns(0, 0, our_payload, query.payload, pad=pad)
+ if query.had_tsig and query.keyring:
+ assert query.mac is not None
+ assert query.keyalgorithm is not None
+ response.use_tsig(
+ query.keyring,
+ query.keyname,
+ fudge,
+ None,
+ tsig_error,
+ b"",
+ query.keyalgorithm,
+ )
+ response.request_mac = query.mac
+ return response
+
+
+### BEGIN generated MessageSection constants
+
+QUESTION = MessageSection.QUESTION
+ANSWER = MessageSection.ANSWER
+AUTHORITY = MessageSection.AUTHORITY
+ADDITIONAL = MessageSection.ADDITIONAL
+
+### END generated MessageSection constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/name.py b/tapdown/lib/python3.11/site-packages/dns/name.py
new file mode 100644
index 0000000..45c8f45
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/name.py
@@ -0,0 +1,1289 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Names."""
+
+import copy
+import encodings.idna # type: ignore
+import functools
+import struct
+from typing import Any, Callable, Dict, Iterable, Optional, Tuple
+
+import dns._features
+import dns.enum
+import dns.exception
+import dns.immutable
+import dns.wire
+
+# Dnspython will never access idna if the import fails, but pyright can't figure
+# that out, so...
+#
+# pyright: reportAttributeAccessIssue = false, reportPossiblyUnboundVariable = false
+
+if dns._features.have("idna"):
+ import idna # type: ignore
+
+ have_idna_2008 = True
+else: # pragma: no cover
+ have_idna_2008 = False
+
+
+CompressType = Dict["Name", int]
+
+
+class NameRelation(dns.enum.IntEnum):
+ """Name relation result from fullcompare()."""
+
+ # This is an IntEnum for backwards compatibility in case anyone
+ # has hardwired the constants.
+
+ #: The compared names have no relationship to each other.
+ NONE = 0
+ #: the first name is a superdomain of the second.
+ SUPERDOMAIN = 1
+ #: The first name is a subdomain of the second.
+ SUBDOMAIN = 2
+ #: The compared names are equal.
+ EQUAL = 3
+ #: The compared names have a common ancestor.
+ COMMONANCESTOR = 4
+
+ @classmethod
+ def _maximum(cls):
+ return cls.COMMONANCESTOR # pragma: no cover
+
+ @classmethod
+ def _short_name(cls):
+ return cls.__name__ # pragma: no cover
+
+
+# Backwards compatibility
+NAMERELN_NONE = NameRelation.NONE
+NAMERELN_SUPERDOMAIN = NameRelation.SUPERDOMAIN
+NAMERELN_SUBDOMAIN = NameRelation.SUBDOMAIN
+NAMERELN_EQUAL = NameRelation.EQUAL
+NAMERELN_COMMONANCESTOR = NameRelation.COMMONANCESTOR
+
+
+class EmptyLabel(dns.exception.SyntaxError):
+ """A DNS label is empty."""
+
+
+class BadEscape(dns.exception.SyntaxError):
+ """An escaped code in a text format of DNS name is invalid."""
+
+
+class BadPointer(dns.exception.FormError):
+ """A DNS compression pointer points forward instead of backward."""
+
+
+class BadLabelType(dns.exception.FormError):
+ """The label type in DNS name wire format is unknown."""
+
+
+class NeedAbsoluteNameOrOrigin(dns.exception.DNSException):
+ """An attempt was made to convert a non-absolute name to
+ wire when there was also a non-absolute (or missing) origin."""
+
+
+class NameTooLong(dns.exception.FormError):
+ """A DNS name is > 255 octets long."""
+
+
+class LabelTooLong(dns.exception.SyntaxError):
+ """A DNS label is > 63 octets long."""
+
+
+class AbsoluteConcatenation(dns.exception.DNSException):
+ """An attempt was made to append anything other than the
+ empty name to an absolute DNS name."""
+
+
+class NoParent(dns.exception.DNSException):
+ """An attempt was made to get the parent of the root name
+ or the empty name."""
+
+
+class NoIDNA2008(dns.exception.DNSException):
+ """IDNA 2008 processing was requested but the idna module is not
+ available."""
+
+
+class IDNAException(dns.exception.DNSException):
+ """IDNA processing raised an exception."""
+
+ supp_kwargs = {"idna_exception"}
+ fmt = "IDNA processing exception: {idna_exception}"
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+
+class NeedSubdomainOfOrigin(dns.exception.DNSException):
+ """An absolute name was provided that is not a subdomain of the specified origin."""
+
+
+_escaped = b'"().;\\@$'
+_escaped_text = '"().;\\@$'
+
+
+def _escapify(label: bytes | str) -> str:
+ """Escape the characters in label which need it.
+ @returns: the escaped string
+ @rtype: string"""
+ if isinstance(label, bytes):
+ # Ordinary DNS label mode. Escape special characters and values
+ # < 0x20 or > 0x7f.
+ text = ""
+ for c in label:
+ if c in _escaped:
+ text += "\\" + chr(c)
+ elif c > 0x20 and c < 0x7F:
+ text += chr(c)
+ else:
+ text += f"\\{c:03d}"
+ return text
+
+ # Unicode label mode. Escape only special characters and values < 0x20
+ text = ""
+ for uc in label:
+ if uc in _escaped_text:
+ text += "\\" + uc
+ elif uc <= "\x20":
+ text += f"\\{ord(uc):03d}"
+ else:
+ text += uc
+ return text
+
+
+class IDNACodec:
+ """Abstract base class for IDNA encoder/decoders."""
+
+ def __init__(self):
+ pass
+
+ def is_idna(self, label: bytes) -> bool:
+ return label.lower().startswith(b"xn--")
+
+ def encode(self, label: str) -> bytes:
+ raise NotImplementedError # pragma: no cover
+
+ def decode(self, label: bytes) -> str:
+ # We do not apply any IDNA policy on decode.
+ if self.is_idna(label):
+ try:
+ slabel = label[4:].decode("punycode")
+ return _escapify(slabel)
+ except Exception as e:
+ raise IDNAException(idna_exception=e)
+ else:
+ return _escapify(label)
+
+
+class IDNA2003Codec(IDNACodec):
+ """IDNA 2003 encoder/decoder."""
+
+ def __init__(self, strict_decode: bool = False):
+ """Initialize the IDNA 2003 encoder/decoder.
+
+ *strict_decode* is a ``bool``. If `True`, then IDNA2003 checking
+ is done when decoding. This can cause failures if the name
+ was encoded with IDNA2008. The default is `False`.
+ """
+
+ super().__init__()
+ self.strict_decode = strict_decode
+
+ def encode(self, label: str) -> bytes:
+ """Encode *label*."""
+
+ if label == "":
+ return b""
+ try:
+ return encodings.idna.ToASCII(label)
+ except UnicodeError:
+ raise LabelTooLong
+
+ def decode(self, label: bytes) -> str:
+ """Decode *label*."""
+ if not self.strict_decode:
+ return super().decode(label)
+ if label == b"":
+ return ""
+ try:
+ return _escapify(encodings.idna.ToUnicode(label))
+ except Exception as e:
+ raise IDNAException(idna_exception=e)
+
+
+class IDNA2008Codec(IDNACodec):
+ """IDNA 2008 encoder/decoder."""
+
+ def __init__(
+ self,
+ uts_46: bool = False,
+ transitional: bool = False,
+ allow_pure_ascii: bool = False,
+ strict_decode: bool = False,
+ ):
+ """Initialize the IDNA 2008 encoder/decoder.
+
+ *uts_46* is a ``bool``. If True, apply Unicode IDNA
+ compatibility processing as described in Unicode Technical
+ Standard #46 (https://unicode.org/reports/tr46/).
+ If False, do not apply the mapping. The default is False.
+
+ *transitional* is a ``bool``: If True, use the
+ "transitional" mode described in Unicode Technical Standard
+ #46. The default is False.
+
+ *allow_pure_ascii* is a ``bool``. If True, then a label which
+ consists of only ASCII characters is allowed. This is less
+ strict than regular IDNA 2008, but is also necessary for mixed
+ names, e.g. a name with starting with "_sip._tcp." and ending
+ in an IDN suffix which would otherwise be disallowed. The
+ default is False.
+
+ *strict_decode* is a ``bool``: If True, then IDNA2008 checking
+ is done when decoding. This can cause failures if the name
+ was encoded with IDNA2003. The default is False.
+ """
+ super().__init__()
+ self.uts_46 = uts_46
+ self.transitional = transitional
+ self.allow_pure_ascii = allow_pure_ascii
+ self.strict_decode = strict_decode
+
+ def encode(self, label: str) -> bytes:
+ if label == "":
+ return b""
+ if self.allow_pure_ascii and is_all_ascii(label):
+ encoded = label.encode("ascii")
+ if len(encoded) > 63:
+ raise LabelTooLong
+ return encoded
+ if not have_idna_2008:
+ raise NoIDNA2008
+ try:
+ if self.uts_46:
+ # pylint: disable=possibly-used-before-assignment
+ label = idna.uts46_remap(label, False, self.transitional)
+ return idna.alabel(label)
+ except idna.IDNAError as e:
+ if e.args[0] == "Label too long":
+ raise LabelTooLong
+ else:
+ raise IDNAException(idna_exception=e)
+
+ def decode(self, label: bytes) -> str:
+ if not self.strict_decode:
+ return super().decode(label)
+ if label == b"":
+ return ""
+ if not have_idna_2008:
+ raise NoIDNA2008
+ try:
+ ulabel = idna.ulabel(label)
+ if self.uts_46:
+ ulabel = idna.uts46_remap(ulabel, False, self.transitional)
+ return _escapify(ulabel)
+ except (idna.IDNAError, UnicodeError) as e:
+ raise IDNAException(idna_exception=e)
+
+
+IDNA_2003_Practical = IDNA2003Codec(False)
+IDNA_2003_Strict = IDNA2003Codec(True)
+IDNA_2003 = IDNA_2003_Practical
+IDNA_2008_Practical = IDNA2008Codec(True, False, True, False)
+IDNA_2008_UTS_46 = IDNA2008Codec(True, False, False, False)
+IDNA_2008_Strict = IDNA2008Codec(False, False, False, True)
+IDNA_2008_Transitional = IDNA2008Codec(True, True, False, False)
+IDNA_2008 = IDNA_2008_Practical
+
+
+def _validate_labels(labels: Tuple[bytes, ...]) -> None:
+ """Check for empty labels in the middle of a label sequence,
+ labels that are too long, and for too many labels.
+
+ Raises ``dns.name.NameTooLong`` if the name as a whole is too long.
+
+ Raises ``dns.name.EmptyLabel`` if a label is empty (i.e. the root
+ label) and appears in a position other than the end of the label
+ sequence
+
+ """
+
+ l = len(labels)
+ total = 0
+ i = -1
+ j = 0
+ for label in labels:
+ ll = len(label)
+ total += ll + 1
+ if ll > 63:
+ raise LabelTooLong
+ if i < 0 and label == b"":
+ i = j
+ j += 1
+ if total > 255:
+ raise NameTooLong
+ if i >= 0 and i != l - 1:
+ raise EmptyLabel
+
+
+def _maybe_convert_to_binary(label: bytes | str) -> bytes:
+ """If label is ``str``, convert it to ``bytes``. If it is already
+ ``bytes`` just return it.
+
+ """
+
+ if isinstance(label, bytes):
+ return label
+ if isinstance(label, str):
+ return label.encode()
+ raise ValueError # pragma: no cover
+
+
+@dns.immutable.immutable
+class Name:
+ """A DNS name.
+
+ The dns.name.Name class represents a DNS name as a tuple of
+ labels. Each label is a ``bytes`` in DNS wire format. Instances
+ of the class are immutable.
+ """
+
+ __slots__ = ["labels"]
+
+ def __init__(self, labels: Iterable[bytes | str]):
+ """*labels* is any iterable whose values are ``str`` or ``bytes``."""
+
+ blabels = [_maybe_convert_to_binary(x) for x in labels]
+ self.labels = tuple(blabels)
+ _validate_labels(self.labels)
+
+ def __copy__(self):
+ return Name(self.labels)
+
+ def __deepcopy__(self, memo):
+ return Name(copy.deepcopy(self.labels, memo))
+
+ def __getstate__(self):
+ # Names can be pickled
+ return {"labels": self.labels}
+
+ def __setstate__(self, state):
+ super().__setattr__("labels", state["labels"])
+ _validate_labels(self.labels)
+
+ def is_absolute(self) -> bool:
+ """Is the most significant label of this name the root label?
+
+ Returns a ``bool``.
+ """
+
+ return len(self.labels) > 0 and self.labels[-1] == b""
+
+ def is_wild(self) -> bool:
+ """Is this name wild? (I.e. Is the least significant label '*'?)
+
+ Returns a ``bool``.
+ """
+
+ return len(self.labels) > 0 and self.labels[0] == b"*"
+
+ def __hash__(self) -> int:
+ """Return a case-insensitive hash of the name.
+
+ Returns an ``int``.
+ """
+
+ h = 0
+ for label in self.labels:
+ for c in label.lower():
+ h += (h << 3) + c
+ return h
+
+ def fullcompare(self, other: "Name") -> Tuple[NameRelation, int, int]:
+ """Compare two names, returning a 3-tuple
+ ``(relation, order, nlabels)``.
+
+ *relation* describes the relation ship between the names,
+ and is one of: ``dns.name.NameRelation.NONE``,
+ ``dns.name.NameRelation.SUPERDOMAIN``, ``dns.name.NameRelation.SUBDOMAIN``,
+ ``dns.name.NameRelation.EQUAL``, or ``dns.name.NameRelation.COMMONANCESTOR``.
+
+ *order* is < 0 if *self* < *other*, > 0 if *self* > *other*, and ==
+ 0 if *self* == *other*. A relative name is always less than an
+ absolute name. If both names have the same relativity, then
+ the DNSSEC order relation is used to order them.
+
+ *nlabels* is the number of significant labels that the two names
+ have in common.
+
+ Here are some examples. Names ending in "." are absolute names,
+ those not ending in "." are relative names.
+
+ ============= ============= =========== ===== =======
+ self other relation order nlabels
+ ============= ============= =========== ===== =======
+ www.example. www.example. equal 0 3
+ www.example. example. subdomain > 0 2
+ example. www.example. superdomain < 0 2
+ example1.com. example2.com. common anc. < 0 2
+ example1 example2. none < 0 0
+ example1. example2 none > 0 0
+ ============= ============= =========== ===== =======
+ """
+
+ sabs = self.is_absolute()
+ oabs = other.is_absolute()
+ if sabs != oabs:
+ if sabs:
+ return (NameRelation.NONE, 1, 0)
+ else:
+ return (NameRelation.NONE, -1, 0)
+ l1 = len(self.labels)
+ l2 = len(other.labels)
+ ldiff = l1 - l2
+ if ldiff < 0:
+ l = l1
+ else:
+ l = l2
+
+ order = 0
+ nlabels = 0
+ namereln = NameRelation.NONE
+ while l > 0:
+ l -= 1
+ l1 -= 1
+ l2 -= 1
+ label1 = self.labels[l1].lower()
+ label2 = other.labels[l2].lower()
+ if label1 < label2:
+ order = -1
+ if nlabels > 0:
+ namereln = NameRelation.COMMONANCESTOR
+ return (namereln, order, nlabels)
+ elif label1 > label2:
+ order = 1
+ if nlabels > 0:
+ namereln = NameRelation.COMMONANCESTOR
+ return (namereln, order, nlabels)
+ nlabels += 1
+ order = ldiff
+ if ldiff < 0:
+ namereln = NameRelation.SUPERDOMAIN
+ elif ldiff > 0:
+ namereln = NameRelation.SUBDOMAIN
+ else:
+ namereln = NameRelation.EQUAL
+ return (namereln, order, nlabels)
+
+ def is_subdomain(self, other: "Name") -> bool:
+ """Is self a subdomain of other?
+
+ Note that the notion of subdomain includes equality, e.g.
+ "dnspython.org" is a subdomain of itself.
+
+ Returns a ``bool``.
+ """
+
+ (nr, _, _) = self.fullcompare(other)
+ if nr == NameRelation.SUBDOMAIN or nr == NameRelation.EQUAL:
+ return True
+ return False
+
+ def is_superdomain(self, other: "Name") -> bool:
+ """Is self a superdomain of other?
+
+ Note that the notion of superdomain includes equality, e.g.
+ "dnspython.org" is a superdomain of itself.
+
+ Returns a ``bool``.
+ """
+
+ (nr, _, _) = self.fullcompare(other)
+ if nr == NameRelation.SUPERDOMAIN or nr == NameRelation.EQUAL:
+ return True
+ return False
+
+ def canonicalize(self) -> "Name":
+ """Return a name which is equal to the current name, but is in
+ DNSSEC canonical form.
+ """
+
+ return Name([x.lower() for x in self.labels])
+
+ def __eq__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] == 0
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] != 0
+ else:
+ return True
+
+ def __lt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] < 0
+ else:
+ return NotImplemented
+
+ def __le__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] <= 0
+ else:
+ return NotImplemented
+
+ def __ge__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] >= 0
+ else:
+ return NotImplemented
+
+ def __gt__(self, other):
+ if isinstance(other, Name):
+ return self.fullcompare(other)[1] > 0
+ else:
+ return NotImplemented
+
+ def __repr__(self):
+ return ""
+
+ def __str__(self):
+ return self.to_text(False)
+
+ def to_text(self, omit_final_dot: bool = False) -> str:
+ """Convert name to DNS text format.
+
+ *omit_final_dot* is a ``bool``. If True, don't emit the final
+ dot (denoting the root label) for absolute names. The default
+ is False.
+
+ Returns a ``str``.
+ """
+
+ if len(self.labels) == 0:
+ return "@"
+ if len(self.labels) == 1 and self.labels[0] == b"":
+ return "."
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ s = ".".join(map(_escapify, l))
+ return s
+
+ def to_unicode(
+ self, omit_final_dot: bool = False, idna_codec: IDNACodec | None = None
+ ) -> str:
+ """Convert name to Unicode text format.
+
+ IDN ACE labels are converted to Unicode.
+
+ *omit_final_dot* is a ``bool``. If True, don't emit the final
+ dot (denoting the root label) for absolute names. The default
+ is False.
+ *idna_codec* specifies the IDNA encoder/decoder. If None, the
+ dns.name.IDNA_2003_Practical encoder/decoder is used.
+ The IDNA_2003_Practical decoder does
+ not impose any policy, it just decodes punycode, so if you
+ don't want checking for compliance, you can use this decoder
+ for IDNA2008 as well.
+
+ Returns a ``str``.
+ """
+
+ if len(self.labels) == 0:
+ return "@"
+ if len(self.labels) == 1 and self.labels[0] == b"":
+ return "."
+ if omit_final_dot and self.is_absolute():
+ l = self.labels[:-1]
+ else:
+ l = self.labels
+ if idna_codec is None:
+ idna_codec = IDNA_2003_Practical
+ return ".".join([idna_codec.decode(x) for x in l])
+
+ def to_digestable(self, origin: Optional["Name"] = None) -> bytes:
+ """Convert name to a format suitable for digesting in hashes.
+
+ The name is canonicalized and converted to uncompressed wire
+ format. All names in wire format are absolute. If the name
+ is a relative name, then an origin must be supplied.
+
+ *origin* is a ``dns.name.Name`` or ``None``. If the name is
+ relative and origin is not ``None``, then origin will be appended
+ to the name.
+
+ Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
+ relative and no origin was provided.
+
+ Returns a ``bytes``.
+ """
+
+ digest = self.to_wire(origin=origin, canonicalize=True)
+ assert digest is not None
+ return digest
+
+ def to_wire(
+ self,
+ file: Any | None = None,
+ compress: CompressType | None = None,
+ origin: Optional["Name"] = None,
+ canonicalize: bool = False,
+ ) -> bytes | None:
+ """Convert name to wire format, possibly compressing it.
+
+ *file* is the file where the name is emitted (typically an
+ io.BytesIO file). If ``None`` (the default), a ``bytes``
+ containing the wire name will be returned.
+
+ *compress*, a ``dict``, is the compression table to use. If
+ ``None`` (the default), names will not be compressed. Note that
+ the compression code assumes that compression offset 0 is the
+ start of *file*, and thus compression will not be correct
+ if this is not the case.
+
+ *origin* is a ``dns.name.Name`` or ``None``. If the name is
+ relative and origin is not ``None``, then *origin* will be appended
+ to it.
+
+ *canonicalize*, a ``bool``, indicates whether the name should
+ be canonicalized; that is, converted to a format suitable for
+ digesting in hashes.
+
+ Raises ``dns.name.NeedAbsoluteNameOrOrigin`` if the name is
+ relative and no origin was provided.
+
+ Returns a ``bytes`` or ``None``.
+ """
+
+ if file is None:
+ out = bytearray()
+ for label in self.labels:
+ out.append(len(label))
+ if canonicalize:
+ out += label.lower()
+ else:
+ out += label
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ for label in origin.labels:
+ out.append(len(label))
+ if canonicalize:
+ out += label.lower()
+ else:
+ out += label
+ return bytes(out)
+
+ labels: Iterable[bytes]
+ if not self.is_absolute():
+ if origin is None or not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ labels = list(self.labels)
+ labels.extend(list(origin.labels))
+ else:
+ labels = self.labels
+ i = 0
+ for label in labels:
+ n = Name(labels[i:])
+ i += 1
+ if compress is not None:
+ pos = compress.get(n)
+ else:
+ pos = None
+ if pos is not None:
+ value = 0xC000 + pos
+ s = struct.pack("!H", value)
+ file.write(s)
+ break
+ else:
+ if compress is not None and len(n) > 1:
+ pos = file.tell()
+ if pos <= 0x3FFF:
+ compress[n] = pos
+ l = len(label)
+ file.write(struct.pack("!B", l))
+ if l > 0:
+ if canonicalize:
+ file.write(label.lower())
+ else:
+ file.write(label)
+ return None
+
+ def __len__(self) -> int:
+ """The length of the name (in labels).
+
+ Returns an ``int``.
+ """
+
+ return len(self.labels)
+
+ def __getitem__(self, index):
+ return self.labels[index]
+
+ def __add__(self, other):
+ return self.concatenate(other)
+
+ def __sub__(self, other):
+ return self.relativize(other)
+
+ def split(self, depth: int) -> Tuple["Name", "Name"]:
+ """Split a name into a prefix and suffix names at the specified depth.
+
+ *depth* is an ``int`` specifying the number of labels in the suffix
+
+ Raises ``ValueError`` if *depth* was not >= 0 and <= the length of the
+ name.
+
+ Returns the tuple ``(prefix, suffix)``.
+ """
+
+ l = len(self.labels)
+ if depth == 0:
+ return (self, dns.name.empty)
+ elif depth == l:
+ return (dns.name.empty, self)
+ elif depth < 0 or depth > l:
+ raise ValueError("depth must be >= 0 and <= the length of the name")
+ return (Name(self[:-depth]), Name(self[-depth:]))
+
+ def concatenate(self, other: "Name") -> "Name":
+ """Return a new name which is the concatenation of self and other.
+
+ Raises ``dns.name.AbsoluteConcatenation`` if the name is
+ absolute and *other* is not the empty name.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if self.is_absolute() and len(other) > 0:
+ raise AbsoluteConcatenation
+ labels = list(self.labels)
+ labels.extend(list(other.labels))
+ return Name(labels)
+
+ def relativize(self, origin: "Name") -> "Name":
+ """If the name is a subdomain of *origin*, return a new name which is
+ the name relative to origin. Otherwise return the name.
+
+ For example, relativizing ``www.dnspython.org.`` to origin
+ ``dnspython.org.`` returns the name ``www``. Relativizing ``example.``
+ to origin ``dnspython.org.`` returns ``example.``.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if origin is not None and self.is_subdomain(origin):
+ return Name(self[: -len(origin)])
+ else:
+ return self
+
+ def derelativize(self, origin: "Name") -> "Name":
+ """If the name is a relative name, return a new name which is the
+ concatenation of the name and origin. Otherwise return the name.
+
+ For example, derelativizing ``www`` to origin ``dnspython.org.``
+ returns the name ``www.dnspython.org.``. Derelativizing ``example.``
+ to origin ``dnspython.org.`` returns ``example.``.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if not self.is_absolute():
+ return self.concatenate(origin)
+ else:
+ return self
+
+ def choose_relativity(
+ self, origin: Optional["Name"] = None, relativize: bool = True
+ ) -> "Name":
+ """Return a name with the relativity desired by the caller.
+
+ If *origin* is ``None``, then the name is returned.
+ Otherwise, if *relativize* is ``True`` the name is
+ relativized, and if *relativize* is ``False`` the name is
+ derelativized.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if origin:
+ if relativize:
+ return self.relativize(origin)
+ else:
+ return self.derelativize(origin)
+ else:
+ return self
+
+ def parent(self) -> "Name":
+ """Return the parent of the name.
+
+ For example, the parent of ``www.dnspython.org.`` is ``dnspython.org``.
+
+ Raises ``dns.name.NoParent`` if the name is either the root name or the
+ empty name, and thus has no parent.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if self == root or self == empty:
+ raise NoParent
+ return Name(self.labels[1:])
+
+ def predecessor(self, origin: "Name", prefix_ok: bool = True) -> "Name":
+ """Return the maximal predecessor of *name* in the DNSSEC ordering in the zone
+ whose origin is *origin*, or return the longest name under *origin* if the
+ name is origin (i.e. wrap around to the longest name, which may still be
+ *origin* due to length considerations.
+
+ The relativity of the name is preserved, so if this name is relative
+ then the method will return a relative name, and likewise if this name
+ is absolute then the predecessor will be absolute.
+
+ *prefix_ok* indicates if prefixing labels is allowed, and
+ defaults to ``True``. Normally it is good to allow this, but if computing
+ a maximal predecessor at a zone cut point then ``False`` must be specified.
+ """
+ return _handle_relativity_and_call(
+ _absolute_predecessor, self, origin, prefix_ok
+ )
+
+ def successor(self, origin: "Name", prefix_ok: bool = True) -> "Name":
+ """Return the minimal successor of *name* in the DNSSEC ordering in the zone
+ whose origin is *origin*, or return *origin* if the successor cannot be
+ computed due to name length limitations.
+
+ Note that *origin* is returned in the "too long" cases because wrapping
+ around to the origin is how NSEC records express "end of the zone".
+
+ The relativity of the name is preserved, so if this name is relative
+ then the method will return a relative name, and likewise if this name
+ is absolute then the successor will be absolute.
+
+ *prefix_ok* indicates if prefixing a new minimal label is allowed, and
+ defaults to ``True``. Normally it is good to allow this, but if computing
+ a minimal successor at a zone cut point then ``False`` must be specified.
+ """
+ return _handle_relativity_and_call(_absolute_successor, self, origin, prefix_ok)
+
+
+#: The root name, '.'
+root = Name([b""])
+
+#: The empty name.
+empty = Name([])
+
+
+def from_unicode(
+ text: str, origin: Name | None = root, idna_codec: IDNACodec | None = None
+) -> Name:
+ """Convert unicode text into a Name object.
+
+ Labels are encoded in IDN ACE form according to rules specified by
+ the IDNA codec.
+
+ *text*, a ``str``, is the text to convert into a name.
+
+ *origin*, a ``dns.name.Name``, specifies the origin to
+ append to non-absolute names. The default is the root name.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if not isinstance(text, str):
+ raise ValueError("input to from_unicode() must be a unicode string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = ""
+ escaping = False
+ edigits = 0
+ total = 0
+ if idna_codec is None:
+ idna_codec = IDNA_2003
+ if text == "@":
+ text = ""
+ if text:
+ if text in [".", "\u3002", "\uff0e", "\uff61"]:
+ return Name([b""]) # no Unicode "u" on this constant!
+ for c in text:
+ if escaping:
+ if edigits == 0:
+ if c.isdigit():
+ total = int(c)
+ edigits += 1
+ else:
+ label += c
+ escaping = False
+ else:
+ if not c.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(c)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += chr(total)
+ elif c in [".", "\u3002", "\uff0e", "\uff61"]:
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(idna_codec.encode(label))
+ label = ""
+ elif c == "\\":
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += c
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(idna_codec.encode(label))
+ else:
+ labels.append(b"")
+
+ if (len(labels) == 0 or labels[-1] != b"") and origin is not None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+
+def is_all_ascii(text: str) -> bool:
+ for c in text:
+ if ord(c) > 0x7F:
+ return False
+ return True
+
+
+def from_text(
+ text: bytes | str,
+ origin: Name | None = root,
+ idna_codec: IDNACodec | None = None,
+) -> Name:
+ """Convert text into a Name object.
+
+ *text*, a ``bytes`` or ``str``, is the text to convert into a name.
+
+ *origin*, a ``dns.name.Name``, specifies the origin to
+ append to non-absolute names. The default is the root name.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if isinstance(text, str):
+ if not is_all_ascii(text):
+ # Some codepoint in the input text is > 127, so IDNA applies.
+ return from_unicode(text, origin, idna_codec)
+ # The input is all ASCII, so treat this like an ordinary non-IDNA
+ # domain name. Note that "all ASCII" is about the input text,
+ # not the codepoints in the domain name. E.g. if text has value
+ #
+ # r'\150\151\152\153\154\155\156\157\158\159'
+ #
+ # then it's still "all ASCII" even though the domain name has
+ # codepoints > 127.
+ text = text.encode("ascii")
+ if not isinstance(text, bytes):
+ raise ValueError("input to from_text() must be a string")
+ if not (origin is None or isinstance(origin, Name)):
+ raise ValueError("origin must be a Name or None")
+ labels = []
+ label = b""
+ escaping = False
+ edigits = 0
+ total = 0
+ if text == b"@":
+ text = b""
+ if text:
+ if text == b".":
+ return Name([b""])
+ for c in text:
+ byte_ = struct.pack("!B", c)
+ if escaping:
+ if edigits == 0:
+ if byte_.isdigit():
+ total = int(byte_)
+ edigits += 1
+ else:
+ label += byte_
+ escaping = False
+ else:
+ if not byte_.isdigit():
+ raise BadEscape
+ total *= 10
+ total += int(byte_)
+ edigits += 1
+ if edigits == 3:
+ escaping = False
+ label += struct.pack("!B", total)
+ elif byte_ == b".":
+ if len(label) == 0:
+ raise EmptyLabel
+ labels.append(label)
+ label = b""
+ elif byte_ == b"\\":
+ escaping = True
+ edigits = 0
+ total = 0
+ else:
+ label += byte_
+ if escaping:
+ raise BadEscape
+ if len(label) > 0:
+ labels.append(label)
+ else:
+ labels.append(b"")
+ if (len(labels) == 0 or labels[-1] != b"") and origin is not None:
+ labels.extend(list(origin.labels))
+ return Name(labels)
+
+
+# we need 'dns.wire.Parser' quoted as dns.name and dns.wire depend on each other.
+
+
+def from_wire_parser(parser: "dns.wire.Parser") -> Name:
+ """Convert possibly compressed wire format into a Name.
+
+ *parser* is a dns.wire.Parser.
+
+ Raises ``dns.name.BadPointer`` if a compression pointer did not
+ point backwards in the message.
+
+ Raises ``dns.name.BadLabelType`` if an invalid label type was encountered.
+
+ Returns a ``dns.name.Name``
+ """
+
+ labels = []
+ biggest_pointer = parser.current
+ with parser.restore_furthest():
+ count = parser.get_uint8()
+ while count != 0:
+ if count < 64:
+ labels.append(parser.get_bytes(count))
+ elif count >= 192:
+ current = (count & 0x3F) * 256 + parser.get_uint8()
+ if current >= biggest_pointer:
+ raise BadPointer
+ biggest_pointer = current
+ parser.seek(current)
+ else:
+ raise BadLabelType
+ count = parser.get_uint8()
+ labels.append(b"")
+ return Name(labels)
+
+
+def from_wire(message: bytes, current: int) -> Tuple[Name, int]:
+ """Convert possibly compressed wire format into a Name.
+
+ *message* is a ``bytes`` containing an entire DNS message in DNS
+ wire form.
+
+ *current*, an ``int``, is the offset of the beginning of the name
+ from the start of the message
+
+ Raises ``dns.name.BadPointer`` if a compression pointer did not
+ point backwards in the message.
+
+ Raises ``dns.name.BadLabelType`` if an invalid label type was encountered.
+
+ Returns a ``(dns.name.Name, int)`` tuple consisting of the name
+ that was read and the number of bytes of the wire format message
+ which were consumed reading it.
+ """
+
+ if not isinstance(message, bytes):
+ raise ValueError("input to from_wire() must be a byte string")
+ parser = dns.wire.Parser(message, current)
+ name = from_wire_parser(parser)
+ return (name, parser.current - current)
+
+
+# RFC 4471 Support
+
+_MINIMAL_OCTET = b"\x00"
+_MINIMAL_OCTET_VALUE = ord(_MINIMAL_OCTET)
+_SUCCESSOR_PREFIX = Name([_MINIMAL_OCTET])
+_MAXIMAL_OCTET = b"\xff"
+_MAXIMAL_OCTET_VALUE = ord(_MAXIMAL_OCTET)
+_AT_SIGN_VALUE = ord("@")
+_LEFT_SQUARE_BRACKET_VALUE = ord("[")
+
+
+def _wire_length(labels):
+ return functools.reduce(lambda v, x: v + len(x) + 1, labels, 0)
+
+
+def _pad_to_max_name(name):
+ needed = 255 - _wire_length(name.labels)
+ new_labels = []
+ while needed > 64:
+ new_labels.append(_MAXIMAL_OCTET * 63)
+ needed -= 64
+ if needed >= 2:
+ new_labels.append(_MAXIMAL_OCTET * (needed - 1))
+ # Note we're already maximal in the needed == 1 case as while we'd like
+ # to add one more byte as a new label, we can't, as adding a new non-empty
+ # label requires at least 2 bytes.
+ new_labels = list(reversed(new_labels))
+ new_labels.extend(name.labels)
+ return Name(new_labels)
+
+
+def _pad_to_max_label(label, suffix_labels):
+ length = len(label)
+ # We have to subtract one here to account for the length byte of label.
+ remaining = 255 - _wire_length(suffix_labels) - length - 1
+ if remaining <= 0:
+ # Shouldn't happen!
+ return label
+ needed = min(63 - length, remaining)
+ return label + _MAXIMAL_OCTET * needed
+
+
+def _absolute_predecessor(name: Name, origin: Name, prefix_ok: bool) -> Name:
+ # This is the RFC 4471 predecessor algorithm using the "absolute method" of section
+ # 3.1.1.
+ #
+ # Our caller must ensure that the name and origin are absolute, and that name is a
+ # subdomain of origin.
+ if name == origin:
+ return _pad_to_max_name(name)
+ least_significant_label = name[0]
+ if least_significant_label == _MINIMAL_OCTET:
+ return name.parent()
+ least_octet = least_significant_label[-1]
+ suffix_labels = name.labels[1:]
+ if least_octet == _MINIMAL_OCTET_VALUE:
+ new_labels = [least_significant_label[:-1]]
+ else:
+ octets = bytearray(least_significant_label)
+ octet = octets[-1]
+ if octet == _LEFT_SQUARE_BRACKET_VALUE:
+ octet = _AT_SIGN_VALUE
+ else:
+ octet -= 1
+ octets[-1] = octet
+ least_significant_label = bytes(octets)
+ new_labels = [_pad_to_max_label(least_significant_label, suffix_labels)]
+ new_labels.extend(suffix_labels)
+ name = Name(new_labels)
+ if prefix_ok:
+ return _pad_to_max_name(name)
+ else:
+ return name
+
+
+def _absolute_successor(name: Name, origin: Name, prefix_ok: bool) -> Name:
+ # This is the RFC 4471 successor algorithm using the "absolute method" of section
+ # 3.1.2.
+ #
+ # Our caller must ensure that the name and origin are absolute, and that name is a
+ # subdomain of origin.
+ if prefix_ok:
+ # Try prefixing \000 as new label
+ try:
+ return _SUCCESSOR_PREFIX.concatenate(name)
+ except NameTooLong:
+ pass
+ while name != origin:
+ # Try extending the least significant label.
+ least_significant_label = name[0]
+ if len(least_significant_label) < 63:
+ # We may be able to extend the least label with a minimal additional byte.
+ # This is only "may" because we could have a maximal length name even though
+ # the least significant label isn't maximally long.
+ new_labels = [least_significant_label + _MINIMAL_OCTET]
+ new_labels.extend(name.labels[1:])
+ try:
+ return dns.name.Name(new_labels)
+ except dns.name.NameTooLong:
+ pass
+ # We can't extend the label either, so we'll try to increment the least
+ # signficant non-maximal byte in it.
+ octets = bytearray(least_significant_label)
+ # We do this reversed iteration with an explicit indexing variable because
+ # if we find something to increment, we're going to want to truncate everything
+ # to the right of it.
+ for i in range(len(octets) - 1, -1, -1):
+ octet = octets[i]
+ if octet == _MAXIMAL_OCTET_VALUE:
+ # We can't increment this, so keep looking.
+ continue
+ # Finally, something we can increment. We have to apply a special rule for
+ # incrementing "@", sending it to "[", because RFC 4034 6.1 says that when
+ # comparing names, uppercase letters compare as if they were their
+ # lower-case equivalents. If we increment "@" to "A", then it would compare
+ # as "a", which is after "[", "\", "]", "^", "_", and "`", so we would have
+ # skipped the most minimal successor, namely "[".
+ if octet == _AT_SIGN_VALUE:
+ octet = _LEFT_SQUARE_BRACKET_VALUE
+ else:
+ octet += 1
+ octets[i] = octet
+ # We can now truncate all of the maximal values we skipped (if any)
+ new_labels = [bytes(octets[: i + 1])]
+ new_labels.extend(name.labels[1:])
+ # We haven't changed the length of the name, so the Name constructor will
+ # always work.
+ return Name(new_labels)
+ # We couldn't increment, so chop off the least significant label and try
+ # again.
+ name = name.parent()
+
+ # We couldn't increment at all, so return the origin, as wrapping around is the
+ # DNSSEC way.
+ return origin
+
+
+def _handle_relativity_and_call(
+ function: Callable[[Name, Name, bool], Name],
+ name: Name,
+ origin: Name,
+ prefix_ok: bool,
+) -> Name:
+ # Make "name" absolute if needed, ensure that the origin is absolute,
+ # call function(), and then relativize the result if needed.
+ if not origin.is_absolute():
+ raise NeedAbsoluteNameOrOrigin
+ relative = not name.is_absolute()
+ if relative:
+ name = name.derelativize(origin)
+ elif not name.is_subdomain(origin):
+ raise NeedSubdomainOfOrigin
+ result_name = function(name, origin, prefix_ok)
+ if relative:
+ result_name = result_name.relativize(origin)
+ return result_name
diff --git a/tapdown/lib/python3.11/site-packages/dns/namedict.py b/tapdown/lib/python3.11/site-packages/dns/namedict.py
new file mode 100644
index 0000000..ca8b197
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/namedict.py
@@ -0,0 +1,109 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+# Copyright (C) 2016 Coresec Systems AB
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND CORESEC SYSTEMS AB DISCLAIMS ALL
+# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL CORESEC
+# SYSTEMS AB BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS name dictionary"""
+
+# pylint seems to be confused about this one!
+from collections.abc import MutableMapping # pylint: disable=no-name-in-module
+
+import dns.name
+
+
+class NameDict(MutableMapping):
+ """A dictionary whose keys are dns.name.Name objects.
+
+ In addition to being like a regular Python dictionary, this
+ dictionary can also get the deepest match for a given key.
+ """
+
+ __slots__ = ["max_depth", "max_depth_items", "__store"]
+
+ def __init__(self, *args, **kwargs):
+ super().__init__()
+ self.__store = dict()
+ #: the maximum depth of the keys that have ever been added
+ self.max_depth = 0
+ #: the number of items of maximum depth
+ self.max_depth_items = 0
+ self.update(dict(*args, **kwargs))
+
+ def __update_max_depth(self, key):
+ if len(key) == self.max_depth:
+ self.max_depth_items = self.max_depth_items + 1
+ elif len(key) > self.max_depth:
+ self.max_depth = len(key)
+ self.max_depth_items = 1
+
+ def __getitem__(self, key):
+ return self.__store[key]
+
+ def __setitem__(self, key, value):
+ if not isinstance(key, dns.name.Name):
+ raise ValueError("NameDict key must be a name")
+ self.__store[key] = value
+ self.__update_max_depth(key)
+
+ def __delitem__(self, key):
+ self.__store.pop(key)
+ if len(key) == self.max_depth:
+ self.max_depth_items = self.max_depth_items - 1
+ if self.max_depth_items == 0:
+ self.max_depth = 0
+ for k in self.__store:
+ self.__update_max_depth(k)
+
+ def __iter__(self):
+ return iter(self.__store)
+
+ def __len__(self):
+ return len(self.__store)
+
+ def has_key(self, key):
+ return key in self.__store
+
+ def get_deepest_match(self, name):
+ """Find the deepest match to *name* in the dictionary.
+
+ The deepest match is the longest name in the dictionary which is
+ a superdomain of *name*. Note that *superdomain* includes matching
+ *name* itself.
+
+ *name*, a ``dns.name.Name``, the name to find.
+
+ Returns a ``(key, value)`` where *key* is the deepest
+ ``dns.name.Name``, and *value* is the value associated with *key*.
+ """
+
+ depth = len(name)
+ if depth > self.max_depth:
+ depth = self.max_depth
+ for i in range(-depth, 0):
+ n = dns.name.Name(name[i:])
+ if n in self:
+ return (n, self[n])
+ v = self[dns.name.empty]
+ return (dns.name.empty, v)
diff --git a/tapdown/lib/python3.11/site-packages/dns/nameserver.py b/tapdown/lib/python3.11/site-packages/dns/nameserver.py
new file mode 100644
index 0000000..c9307d3
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/nameserver.py
@@ -0,0 +1,361 @@
+from urllib.parse import urlparse
+
+import dns.asyncbackend
+import dns.asyncquery
+import dns.message
+import dns.query
+
+
+class Nameserver:
+ def __init__(self):
+ pass
+
+ def __str__(self):
+ raise NotImplementedError
+
+ def kind(self) -> str:
+ raise NotImplementedError
+
+ def is_always_max_size(self) -> bool:
+ raise NotImplementedError
+
+ def answer_nameserver(self) -> str:
+ raise NotImplementedError
+
+ def answer_port(self) -> int:
+ raise NotImplementedError
+
+ def query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ raise NotImplementedError
+
+ async def async_query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ backend: dns.asyncbackend.Backend,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ raise NotImplementedError
+
+
+class AddressAndPortNameserver(Nameserver):
+ def __init__(self, address: str, port: int):
+ super().__init__()
+ self.address = address
+ self.port = port
+
+ def kind(self) -> str:
+ raise NotImplementedError
+
+ def is_always_max_size(self) -> bool:
+ return False
+
+ def __str__(self):
+ ns_kind = self.kind()
+ return f"{ns_kind}:{self.address}@{self.port}"
+
+ def answer_nameserver(self) -> str:
+ return self.address
+
+ def answer_port(self) -> int:
+ return self.port
+
+
+class Do53Nameserver(AddressAndPortNameserver):
+ def __init__(self, address: str, port: int = 53):
+ super().__init__(address, port)
+
+ def kind(self):
+ return "Do53"
+
+ def query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ if max_size:
+ response = dns.query.tcp(
+ request,
+ self.address,
+ timeout=timeout,
+ port=self.port,
+ source=source,
+ source_port=source_port,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ else:
+ response = dns.query.udp(
+ request,
+ self.address,
+ timeout=timeout,
+ port=self.port,
+ source=source,
+ source_port=source_port,
+ raise_on_truncation=True,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ ignore_errors=True,
+ ignore_unexpected=True,
+ )
+ return response
+
+ async def async_query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ backend: dns.asyncbackend.Backend,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ if max_size:
+ response = await dns.asyncquery.tcp(
+ request,
+ self.address,
+ timeout=timeout,
+ port=self.port,
+ source=source,
+ source_port=source_port,
+ backend=backend,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ else:
+ response = await dns.asyncquery.udp(
+ request,
+ self.address,
+ timeout=timeout,
+ port=self.port,
+ source=source,
+ source_port=source_port,
+ raise_on_truncation=True,
+ backend=backend,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ ignore_errors=True,
+ ignore_unexpected=True,
+ )
+ return response
+
+
+class DoHNameserver(Nameserver):
+ def __init__(
+ self,
+ url: str,
+ bootstrap_address: str | None = None,
+ verify: bool | str = True,
+ want_get: bool = False,
+ http_version: dns.query.HTTPVersion = dns.query.HTTPVersion.DEFAULT,
+ ):
+ super().__init__()
+ self.url = url
+ self.bootstrap_address = bootstrap_address
+ self.verify = verify
+ self.want_get = want_get
+ self.http_version = http_version
+
+ def kind(self):
+ return "DoH"
+
+ def is_always_max_size(self) -> bool:
+ return True
+
+ def __str__(self):
+ return self.url
+
+ def answer_nameserver(self) -> str:
+ return self.url
+
+ def answer_port(self) -> int:
+ port = urlparse(self.url).port
+ if port is None:
+ port = 443
+ return port
+
+ def query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ return dns.query.https(
+ request,
+ self.url,
+ timeout=timeout,
+ source=source,
+ source_port=source_port,
+ bootstrap_address=self.bootstrap_address,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ verify=self.verify,
+ post=(not self.want_get),
+ http_version=self.http_version,
+ )
+
+ async def async_query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ backend: dns.asyncbackend.Backend,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ return await dns.asyncquery.https(
+ request,
+ self.url,
+ timeout=timeout,
+ source=source,
+ source_port=source_port,
+ bootstrap_address=self.bootstrap_address,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ verify=self.verify,
+ post=(not self.want_get),
+ http_version=self.http_version,
+ )
+
+
+class DoTNameserver(AddressAndPortNameserver):
+ def __init__(
+ self,
+ address: str,
+ port: int = 853,
+ hostname: str | None = None,
+ verify: bool | str = True,
+ ):
+ super().__init__(address, port)
+ self.hostname = hostname
+ self.verify = verify
+
+ def kind(self):
+ return "DoT"
+
+ def query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ return dns.query.tls(
+ request,
+ self.address,
+ port=self.port,
+ timeout=timeout,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ server_hostname=self.hostname,
+ verify=self.verify,
+ )
+
+ async def async_query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ backend: dns.asyncbackend.Backend,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ return await dns.asyncquery.tls(
+ request,
+ self.address,
+ port=self.port,
+ timeout=timeout,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ server_hostname=self.hostname,
+ verify=self.verify,
+ )
+
+
+class DoQNameserver(AddressAndPortNameserver):
+ def __init__(
+ self,
+ address: str,
+ port: int = 853,
+ verify: bool | str = True,
+ server_hostname: str | None = None,
+ ):
+ super().__init__(address, port)
+ self.verify = verify
+ self.server_hostname = server_hostname
+
+ def kind(self):
+ return "DoQ"
+
+ def query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ return dns.query.quic(
+ request,
+ self.address,
+ port=self.port,
+ timeout=timeout,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ verify=self.verify,
+ server_hostname=self.server_hostname,
+ )
+
+ async def async_query(
+ self,
+ request: dns.message.QueryMessage,
+ timeout: float,
+ source: str | None,
+ source_port: int,
+ max_size: bool,
+ backend: dns.asyncbackend.Backend,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ ) -> dns.message.Message:
+ return await dns.asyncquery.quic(
+ request,
+ self.address,
+ port=self.port,
+ timeout=timeout,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ verify=self.verify,
+ server_hostname=self.server_hostname,
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/node.py b/tapdown/lib/python3.11/site-packages/dns/node.py
new file mode 100644
index 0000000..b2cbf1b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/node.py
@@ -0,0 +1,358 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS nodes. A node is a set of rdatasets."""
+
+import enum
+import io
+from typing import Any, Dict
+
+import dns.immutable
+import dns.name
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.rrset
+
+_cname_types = {
+ dns.rdatatype.CNAME,
+}
+
+# "neutral" types can coexist with a CNAME and thus are not "other data"
+_neutral_types = {
+ dns.rdatatype.NSEC, # RFC 4035 section 2.5
+ dns.rdatatype.NSEC3, # This is not likely to happen, but not impossible!
+ dns.rdatatype.KEY, # RFC 4035 section 2.5, RFC 3007
+}
+
+
+def _matches_type_or_its_signature(rdtypes, rdtype, covers):
+ return rdtype in rdtypes or (rdtype == dns.rdatatype.RRSIG and covers in rdtypes)
+
+
+@enum.unique
+class NodeKind(enum.Enum):
+ """Rdatasets in nodes"""
+
+ REGULAR = 0 # a.k.a "other data"
+ NEUTRAL = 1
+ CNAME = 2
+
+ @classmethod
+ def classify(
+ cls, rdtype: dns.rdatatype.RdataType, covers: dns.rdatatype.RdataType
+ ) -> "NodeKind":
+ if _matches_type_or_its_signature(_cname_types, rdtype, covers):
+ return NodeKind.CNAME
+ elif _matches_type_or_its_signature(_neutral_types, rdtype, covers):
+ return NodeKind.NEUTRAL
+ else:
+ return NodeKind.REGULAR
+
+ @classmethod
+ def classify_rdataset(cls, rdataset: dns.rdataset.Rdataset) -> "NodeKind":
+ return cls.classify(rdataset.rdtype, rdataset.covers)
+
+
+class Node:
+ """A Node is a set of rdatasets.
+
+ A node is either a CNAME node or an "other data" node. A CNAME
+ node contains only CNAME, KEY, NSEC, and NSEC3 rdatasets along with their
+ covering RRSIG rdatasets. An "other data" node contains any
+ rdataset other than a CNAME or RRSIG(CNAME) rdataset. When
+ changes are made to a node, the CNAME or "other data" state is
+ always consistent with the update, i.e. the most recent change
+ wins. For example, if you have a node which contains a CNAME
+ rdataset, and then add an MX rdataset to it, then the CNAME
+ rdataset will be deleted. Likewise if you have a node containing
+ an MX rdataset and add a CNAME rdataset, the MX rdataset will be
+ deleted.
+ """
+
+ __slots__ = ["rdatasets"]
+
+ def __init__(self):
+ # the set of rdatasets, represented as a list.
+ self.rdatasets = []
+
+ def to_text(self, name: dns.name.Name, **kw: Dict[str, Any]) -> str:
+ """Convert a node to text format.
+
+ Each rdataset at the node is printed. Any keyword arguments
+ to this method are passed on to the rdataset's to_text() method.
+
+ *name*, a ``dns.name.Name``, the owner name of the
+ rdatasets.
+
+ Returns a ``str``.
+
+ """
+
+ s = io.StringIO()
+ for rds in self.rdatasets:
+ if len(rds) > 0:
+ s.write(rds.to_text(name, **kw)) # type: ignore[arg-type]
+ s.write("\n")
+ return s.getvalue()[:-1]
+
+ def __repr__(self):
+ return ""
+
+ def __eq__(self, other):
+ #
+ # This is inefficient. Good thing we don't need to do it much.
+ #
+ for rd in self.rdatasets:
+ if rd not in other.rdatasets:
+ return False
+ for rd in other.rdatasets:
+ if rd not in self.rdatasets:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.rdatasets)
+
+ def __iter__(self):
+ return iter(self.rdatasets)
+
+ def _append_rdataset(self, rdataset):
+ """Append rdataset to the node with special handling for CNAME and
+ other data conditions.
+
+ Specifically, if the rdataset being appended has ``NodeKind.CNAME``,
+ then all rdatasets other than KEY, NSEC, NSEC3, and their covering
+ RRSIGs are deleted. If the rdataset being appended has
+ ``NodeKind.REGULAR`` then CNAME and RRSIG(CNAME) are deleted.
+ """
+ # Make having just one rdataset at the node fast.
+ if len(self.rdatasets) > 0:
+ kind = NodeKind.classify_rdataset(rdataset)
+ if kind == NodeKind.CNAME:
+ self.rdatasets = [
+ rds
+ for rds in self.rdatasets
+ if NodeKind.classify_rdataset(rds) != NodeKind.REGULAR
+ ]
+ elif kind == NodeKind.REGULAR:
+ self.rdatasets = [
+ rds
+ for rds in self.rdatasets
+ if NodeKind.classify_rdataset(rds) != NodeKind.CNAME
+ ]
+ # Otherwise the rdataset is NodeKind.NEUTRAL and we do not need to
+ # edit self.rdatasets.
+ self.rdatasets.append(rdataset)
+
+ def find_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset:
+ """Find an rdataset matching the specified properties in the
+ current node.
+
+ *rdclass*, a ``dns.rdataclass.RdataClass``, the class of the rdataset.
+
+ *rdtype*, a ``dns.rdatatype.RdataType``, the type of the rdataset.
+
+ *covers*, a ``dns.rdatatype.RdataType``, the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+
+ *create*, a ``bool``. If True, create the rdataset if it is not found.
+
+ Raises ``KeyError`` if an rdataset of the desired type and class does
+ not exist and *create* is not ``True``.
+
+ Returns a ``dns.rdataset.Rdataset``.
+ """
+
+ for rds in self.rdatasets:
+ if rds.match(rdclass, rdtype, covers):
+ return rds
+ if not create:
+ raise KeyError
+ rds = dns.rdataset.Rdataset(rdclass, rdtype, covers)
+ self._append_rdataset(rds)
+ return rds
+
+ def get_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset | None:
+ """Get an rdataset matching the specified properties in the
+ current node.
+
+ None is returned if an rdataset of the specified type and
+ class does not exist and *create* is not ``True``.
+
+ *rdclass*, an ``int``, the class of the rdataset.
+
+ *rdtype*, an ``int``, the type of the rdataset.
+
+ *covers*, an ``int``, the covered type. Usually this value is
+ dns.rdatatype.NONE, but if the rdtype is dns.rdatatype.SIG or
+ dns.rdatatype.RRSIG, then the covers value will be the rdata
+ type the SIG/RRSIG covers. The library treats the SIG and RRSIG
+ types as if they were a family of
+ types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This makes RRSIGs much
+ easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+
+ *create*, a ``bool``. If True, create the rdataset if it is not found.
+
+ Returns a ``dns.rdataset.Rdataset`` or ``None``.
+ """
+
+ try:
+ rds = self.find_rdataset(rdclass, rdtype, covers, create)
+ except KeyError:
+ rds = None
+ return rds
+
+ def delete_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ ) -> None:
+ """Delete the rdataset matching the specified properties in the
+ current node.
+
+ If a matching rdataset does not exist, it is not an error.
+
+ *rdclass*, an ``int``, the class of the rdataset.
+
+ *rdtype*, an ``int``, the type of the rdataset.
+
+ *covers*, an ``int``, the covered type.
+ """
+
+ rds = self.get_rdataset(rdclass, rdtype, covers)
+ if rds is not None:
+ self.rdatasets.remove(rds)
+
+ def replace_rdataset(self, replacement: dns.rdataset.Rdataset) -> None:
+ """Replace an rdataset.
+
+ It is not an error if there is no rdataset matching *replacement*.
+
+ Ownership of the *replacement* object is transferred to the node;
+ in other words, this method does not store a copy of *replacement*
+ at the node, it stores *replacement* itself.
+
+ *replacement*, a ``dns.rdataset.Rdataset``.
+
+ Raises ``ValueError`` if *replacement* is not a
+ ``dns.rdataset.Rdataset``.
+ """
+
+ if not isinstance(replacement, dns.rdataset.Rdataset):
+ raise ValueError("replacement is not an rdataset")
+ if isinstance(replacement, dns.rrset.RRset):
+ # RRsets are not good replacements as the match() method
+ # is not compatible.
+ replacement = replacement.to_rdataset()
+ self.delete_rdataset(
+ replacement.rdclass, replacement.rdtype, replacement.covers
+ )
+ self._append_rdataset(replacement)
+
+ def classify(self) -> NodeKind:
+ """Classify a node.
+
+ A node which contains a CNAME or RRSIG(CNAME) is a
+ ``NodeKind.CNAME`` node.
+
+ A node which contains only "neutral" types, i.e. types allowed to
+ co-exist with a CNAME, is a ``NodeKind.NEUTRAL`` node. The neutral
+ types are NSEC, NSEC3, KEY, and their associated RRSIGS. An empty node
+ is also considered neutral.
+
+ A node which contains some rdataset which is not a CNAME, RRSIG(CNAME),
+ or a neutral type is a a ``NodeKind.REGULAR`` node. Regular nodes are
+ also commonly referred to as "other data".
+ """
+ for rdataset in self.rdatasets:
+ kind = NodeKind.classify(rdataset.rdtype, rdataset.covers)
+ if kind != NodeKind.NEUTRAL:
+ return kind
+ return NodeKind.NEUTRAL
+
+ def is_immutable(self) -> bool:
+ return False
+
+
+@dns.immutable.immutable
+class ImmutableNode(Node):
+ def __init__(self, node):
+ super().__init__()
+ self.rdatasets = tuple(
+ [dns.rdataset.ImmutableRdataset(rds) for rds in node.rdatasets]
+ )
+
+ def find_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset:
+ if create:
+ raise TypeError("immutable")
+ return super().find_rdataset(rdclass, rdtype, covers, False)
+
+ def get_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset | None:
+ if create:
+ raise TypeError("immutable")
+ return super().get_rdataset(rdclass, rdtype, covers, False)
+
+ def delete_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ ) -> None:
+ raise TypeError("immutable")
+
+ def replace_rdataset(self, replacement: dns.rdataset.Rdataset) -> None:
+ raise TypeError("immutable")
+
+ def is_immutable(self) -> bool:
+ return True
diff --git a/tapdown/lib/python3.11/site-packages/dns/opcode.py b/tapdown/lib/python3.11/site-packages/dns/opcode.py
new file mode 100644
index 0000000..3fa610d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/opcode.py
@@ -0,0 +1,119 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Opcodes."""
+
+from typing import Type
+
+import dns.enum
+import dns.exception
+
+
+class Opcode(dns.enum.IntEnum):
+ #: Query
+ QUERY = 0
+ #: Inverse Query (historical)
+ IQUERY = 1
+ #: Server Status (unspecified and unimplemented anywhere)
+ STATUS = 2
+ #: Notify
+ NOTIFY = 4
+ #: Dynamic Update
+ UPDATE = 5
+
+ @classmethod
+ def _maximum(cls):
+ return 15
+
+ @classmethod
+ def _unknown_exception_class(cls) -> Type[Exception]:
+ return UnknownOpcode
+
+
+class UnknownOpcode(dns.exception.DNSException):
+ """An DNS opcode is unknown."""
+
+
+def from_text(text: str) -> Opcode:
+ """Convert text into an opcode.
+
+ *text*, a ``str``, the textual opcode
+
+ Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
+
+ Returns an ``int``.
+ """
+
+ return Opcode.from_text(text)
+
+
+def from_flags(flags: int) -> Opcode:
+ """Extract an opcode from DNS message flags.
+
+ *flags*, an ``int``, the DNS flags.
+
+ Returns an ``int``.
+ """
+
+ return Opcode((flags & 0x7800) >> 11)
+
+
+def to_flags(value: Opcode) -> int:
+ """Convert an opcode to a value suitable for ORing into DNS message
+ flags.
+
+ *value*, an ``int``, the DNS opcode value.
+
+ Returns an ``int``.
+ """
+
+ return (value << 11) & 0x7800
+
+
+def to_text(value: Opcode) -> str:
+ """Convert an opcode to text.
+
+ *value*, an ``int`` the opcode value,
+
+ Raises ``dns.opcode.UnknownOpcode`` if the opcode is unknown.
+
+ Returns a ``str``.
+ """
+
+ return Opcode.to_text(value)
+
+
+def is_update(flags: int) -> bool:
+ """Is the opcode in flags UPDATE?
+
+ *flags*, an ``int``, the DNS message flags.
+
+ Returns a ``bool``.
+ """
+
+ return from_flags(flags) == Opcode.UPDATE
+
+
+### BEGIN generated Opcode constants
+
+QUERY = Opcode.QUERY
+IQUERY = Opcode.IQUERY
+STATUS = Opcode.STATUS
+NOTIFY = Opcode.NOTIFY
+UPDATE = Opcode.UPDATE
+
+### END generated Opcode constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/py.typed b/tapdown/lib/python3.11/site-packages/dns/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/dns/query.py b/tapdown/lib/python3.11/site-packages/dns/query.py
new file mode 100644
index 0000000..17b1862
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/query.py
@@ -0,0 +1,1786 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Talk to a DNS server."""
+
+import base64
+import contextlib
+import enum
+import errno
+import os
+import random
+import selectors
+import socket
+import struct
+import time
+import urllib.parse
+from typing import Any, Callable, Dict, Optional, Tuple, cast
+
+import dns._features
+import dns._tls_util
+import dns.exception
+import dns.inet
+import dns.message
+import dns.name
+import dns.quic
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.transaction
+import dns.tsig
+import dns.xfr
+
+try:
+ import ssl
+except ImportError:
+ import dns._no_ssl as ssl # type: ignore
+
+
+def _remaining(expiration):
+ if expiration is None:
+ return None
+ timeout = expiration - time.time()
+ if timeout <= 0.0:
+ raise dns.exception.Timeout
+ return timeout
+
+
+def _expiration_for_this_attempt(timeout, expiration):
+ if expiration is None:
+ return None
+ return min(time.time() + timeout, expiration)
+
+
+_have_httpx = dns._features.have("doh")
+if _have_httpx:
+ import httpcore._backends.sync
+ import httpx
+
+ _CoreNetworkBackend = httpcore.NetworkBackend
+ _CoreSyncStream = httpcore._backends.sync.SyncStream
+
+ class _NetworkBackend(_CoreNetworkBackend):
+ def __init__(self, resolver, local_port, bootstrap_address, family):
+ super().__init__()
+ self._local_port = local_port
+ self._resolver = resolver
+ self._bootstrap_address = bootstrap_address
+ self._family = family
+
+ def connect_tcp(
+ self, host, port, timeout=None, local_address=None, socket_options=None
+ ): # pylint: disable=signature-differs
+ addresses = []
+ _, expiration = _compute_times(timeout)
+ if dns.inet.is_address(host):
+ addresses.append(host)
+ elif self._bootstrap_address is not None:
+ addresses.append(self._bootstrap_address)
+ else:
+ timeout = _remaining(expiration)
+ family = self._family
+ if local_address:
+ family = dns.inet.af_for_address(local_address)
+ answers = self._resolver.resolve_name(
+ host, family=family, lifetime=timeout
+ )
+ addresses = answers.addresses()
+ for address in addresses:
+ af = dns.inet.af_for_address(address)
+ if local_address is not None or self._local_port != 0:
+ if local_address is None:
+ local_address = "0.0.0.0"
+ source = dns.inet.low_level_address_tuple(
+ (local_address, self._local_port), af
+ )
+ else:
+ source = None
+ try:
+ sock = make_socket(af, socket.SOCK_STREAM, source)
+ attempt_expiration = _expiration_for_this_attempt(2.0, expiration)
+ _connect(
+ sock,
+ dns.inet.low_level_address_tuple((address, port), af),
+ attempt_expiration,
+ )
+ return _CoreSyncStream(sock)
+ except Exception:
+ pass
+ raise httpcore.ConnectError
+
+ def connect_unix_socket(
+ self, path, timeout=None, socket_options=None
+ ): # pylint: disable=signature-differs
+ raise NotImplementedError
+
+ class _HTTPTransport(httpx.HTTPTransport): # pyright: ignore
+ def __init__(
+ self,
+ *args,
+ local_port=0,
+ bootstrap_address=None,
+ resolver=None,
+ family=socket.AF_UNSPEC,
+ **kwargs,
+ ):
+ if resolver is None and bootstrap_address is None:
+ # pylint: disable=import-outside-toplevel,redefined-outer-name
+ import dns.resolver
+
+ resolver = dns.resolver.Resolver()
+ super().__init__(*args, **kwargs)
+ self._pool._network_backend = _NetworkBackend(
+ resolver, local_port, bootstrap_address, family
+ )
+
+else:
+
+ class _HTTPTransport: # type: ignore
+ def __init__(
+ self,
+ *args,
+ local_port=0,
+ bootstrap_address=None,
+ resolver=None,
+ family=socket.AF_UNSPEC,
+ **kwargs,
+ ):
+ pass
+
+ def connect_tcp(self, host, port, timeout, local_address):
+ raise NotImplementedError
+
+
+have_doh = _have_httpx
+
+
+def default_socket_factory(
+ af: socket.AddressFamily | int,
+ kind: socket.SocketKind,
+ proto: int,
+) -> socket.socket:
+ return socket.socket(af, kind, proto)
+
+
+# Function used to create a socket. Can be overridden if needed in special
+# situations.
+socket_factory: Callable[
+ [socket.AddressFamily | int, socket.SocketKind, int], socket.socket
+] = default_socket_factory
+
+
+class UnexpectedSource(dns.exception.DNSException):
+ """A DNS query response came from an unexpected address or port."""
+
+
+class BadResponse(dns.exception.FormError):
+ """A DNS query response does not respond to the question asked."""
+
+
+class NoDOH(dns.exception.DNSException):
+ """DNS over HTTPS (DOH) was requested but the httpx module is not
+ available."""
+
+
+class NoDOQ(dns.exception.DNSException):
+ """DNS over QUIC (DOQ) was requested but the aioquic module is not
+ available."""
+
+
+# for backwards compatibility
+TransferError = dns.xfr.TransferError
+
+
+def _compute_times(timeout):
+ now = time.time()
+ if timeout is None:
+ return (now, None)
+ else:
+ return (now, now + timeout)
+
+
+def _wait_for(fd, readable, writable, _, expiration):
+ # Use the selected selector class to wait for any of the specified
+ # events. An "expiration" absolute time is converted into a relative
+ # timeout.
+ #
+ # The unused parameter is 'error', which is always set when
+ # selecting for read or write, and we have no error-only selects.
+
+ if readable and isinstance(fd, ssl.SSLSocket) and fd.pending() > 0:
+ return True
+ with selectors.DefaultSelector() as sel:
+ events = 0
+ if readable:
+ events |= selectors.EVENT_READ
+ if writable:
+ events |= selectors.EVENT_WRITE
+ if events:
+ sel.register(fd, events) # pyright: ignore
+ if expiration is None:
+ timeout = None
+ else:
+ timeout = expiration - time.time()
+ if timeout <= 0.0:
+ raise dns.exception.Timeout
+ if not sel.select(timeout):
+ raise dns.exception.Timeout
+
+
+def _wait_for_readable(s, expiration):
+ _wait_for(s, True, False, True, expiration)
+
+
+def _wait_for_writable(s, expiration):
+ _wait_for(s, False, True, True, expiration)
+
+
+def _addresses_equal(af, a1, a2):
+ # Convert the first value of the tuple, which is a textual format
+ # address into binary form, so that we are not confused by different
+ # textual representations of the same address
+ try:
+ n1 = dns.inet.inet_pton(af, a1[0])
+ n2 = dns.inet.inet_pton(af, a2[0])
+ except dns.exception.SyntaxError:
+ return False
+ return n1 == n2 and a1[1:] == a2[1:]
+
+
+def _matches_destination(af, from_address, destination, ignore_unexpected):
+ # Check that from_address is appropriate for a response to a query
+ # sent to destination.
+ if not destination:
+ return True
+ if _addresses_equal(af, from_address, destination) or (
+ dns.inet.is_multicast(destination[0]) and from_address[1:] == destination[1:]
+ ):
+ return True
+ elif ignore_unexpected:
+ return False
+ raise UnexpectedSource(
+ f"got a response from {from_address} instead of " f"{destination}"
+ )
+
+
+def _destination_and_source(
+ where, port, source, source_port, where_must_be_address=True
+):
+ # Apply defaults and compute destination and source tuples
+ # suitable for use in connect(), sendto(), or bind().
+ af = None
+ destination = None
+ try:
+ af = dns.inet.af_for_address(where)
+ destination = where
+ except Exception:
+ if where_must_be_address:
+ raise
+ # URLs are ok so eat the exception
+ if source:
+ saf = dns.inet.af_for_address(source)
+ if af:
+ # We know the destination af, so source had better agree!
+ if saf != af:
+ raise ValueError(
+ "different address families for source and destination"
+ )
+ else:
+ # We didn't know the destination af, but we know the source,
+ # so that's our af.
+ af = saf
+ if source_port and not source:
+ # Caller has specified a source_port but not an address, so we
+ # need to return a source, and we need to use the appropriate
+ # wildcard address as the address.
+ try:
+ source = dns.inet.any_for_af(af)
+ except Exception:
+ # we catch this and raise ValueError for backwards compatibility
+ raise ValueError("source_port specified but address family is unknown")
+ # Convert high-level (address, port) tuples into low-level address
+ # tuples.
+ if destination:
+ destination = dns.inet.low_level_address_tuple((destination, port), af)
+ if source:
+ source = dns.inet.low_level_address_tuple((source, source_port), af)
+ return (af, destination, source)
+
+
+def make_socket(
+ af: socket.AddressFamily | int,
+ type: socket.SocketKind,
+ source: Any | None = None,
+) -> socket.socket:
+ """Make a socket.
+
+ This function uses the module's ``socket_factory`` to make a socket of the
+ specified address family and type.
+
+ *af*, a ``socket.AddressFamily`` or ``int`` is the address family, either
+ ``socket.AF_INET`` or ``socket.AF_INET6``.
+
+ *type*, a ``socket.SocketKind`` is the type of socket, e.g. ``socket.SOCK_DGRAM``,
+ a datagram socket, or ``socket.SOCK_STREAM``, a stream socket. Note that the
+ ``proto`` attribute of a socket is always zero with this API, so a datagram socket
+ will always be a UDP socket, and a stream socket will always be a TCP socket.
+
+ *source* is the source address and port to bind to, if any. The default is
+ ``None`` which will bind to the wildcard address and a randomly chosen port.
+ If not ``None``, it should be a (low-level) address tuple appropriate for *af*.
+ """
+ s = socket_factory(af, type, 0)
+ try:
+ s.setblocking(False)
+ if source is not None:
+ s.bind(source)
+ return s
+ except Exception:
+ s.close()
+ raise
+
+
+def make_ssl_socket(
+ af: socket.AddressFamily | int,
+ type: socket.SocketKind,
+ ssl_context: ssl.SSLContext,
+ server_hostname: dns.name.Name | str | None = None,
+ source: Any | None = None,
+) -> ssl.SSLSocket:
+ """Make a socket.
+
+ This function uses the module's ``socket_factory`` to make a socket of the
+ specified address family and type.
+
+ *af*, a ``socket.AddressFamily`` or ``int`` is the address family, either
+ ``socket.AF_INET`` or ``socket.AF_INET6``.
+
+ *type*, a ``socket.SocketKind`` is the type of socket, e.g. ``socket.SOCK_DGRAM``,
+ a datagram socket, or ``socket.SOCK_STREAM``, a stream socket. Note that the
+ ``proto`` attribute of a socket is always zero with this API, so a datagram socket
+ will always be a UDP socket, and a stream socket will always be a TCP socket.
+
+ If *ssl_context* is not ``None``, then it specifies the SSL context to use,
+ typically created with ``make_ssl_context()``.
+
+ If *server_hostname* is not ``None``, then it is the hostname to use for server
+ certificate validation. A valid hostname must be supplied if *ssl_context*
+ requires hostname checking.
+
+ *source* is the source address and port to bind to, if any. The default is
+ ``None`` which will bind to the wildcard address and a randomly chosen port.
+ If not ``None``, it should be a (low-level) address tuple appropriate for *af*.
+ """
+ sock = make_socket(af, type, source)
+ if isinstance(server_hostname, dns.name.Name):
+ server_hostname = server_hostname.to_text()
+ # LGTM gets a false positive here, as our default context is OK
+ return ssl_context.wrap_socket(
+ sock,
+ do_handshake_on_connect=False, # lgtm[py/insecure-protocol]
+ server_hostname=server_hostname,
+ )
+
+
+# for backwards compatibility
+def _make_socket(
+ af,
+ type,
+ source,
+ ssl_context,
+ server_hostname,
+):
+ if ssl_context is not None:
+ return make_ssl_socket(af, type, ssl_context, server_hostname, source)
+ else:
+ return make_socket(af, type, source)
+
+
+def _maybe_get_resolver(
+ resolver: Optional["dns.resolver.Resolver"], # pyright: ignore
+) -> "dns.resolver.Resolver": # pyright: ignore
+ # We need a separate method for this to avoid overriding the global
+ # variable "dns" with the as-yet undefined local variable "dns"
+ # in https().
+ if resolver is None:
+ # pylint: disable=import-outside-toplevel,redefined-outer-name
+ import dns.resolver
+
+ resolver = dns.resolver.Resolver()
+ return resolver
+
+
+class HTTPVersion(enum.IntEnum):
+ """Which version of HTTP should be used?
+
+ DEFAULT will select the first version from the list [2, 1.1, 3] that
+ is available.
+ """
+
+ DEFAULT = 0
+ HTTP_1 = 1
+ H1 = 1
+ HTTP_2 = 2
+ H2 = 2
+ HTTP_3 = 3
+ H3 = 3
+
+
+def https(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 443,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ session: Any | None = None,
+ path: str = "/dns-query",
+ post: bool = True,
+ bootstrap_address: str | None = None,
+ verify: bool | str | ssl.SSLContext = True,
+ resolver: Optional["dns.resolver.Resolver"] = None, # pyright: ignore
+ family: int = socket.AF_UNSPEC,
+ http_version: HTTPVersion = HTTPVersion.DEFAULT,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via DNS-over-HTTPS.
+
+ *q*, a ``dns.message.Message``, the query to send.
+
+ *where*, a ``str``, the nameserver IP address or the full URL. If an IP address is
+ given, the URL will be constructed using the following schema:
+ https://:/.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query
+ times out. If ``None``, the default, wait forever.
+
+ *port*, a ``int``, the port to send the query to. The default is 443.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying the source
+ address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message. The default is
+ 0.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the
+ received message.
+
+ *session*, an ``httpx.Client``. If provided, the client session to use to send the
+ queries.
+
+ *path*, a ``str``. If *where* is an IP address, then *path* will be used to
+ construct the URL to send the DNS query to.
+
+ *post*, a ``bool``. If ``True``, the default, POST method will be used.
+
+ *bootstrap_address*, a ``str``, the IP address to use to bypass resolution.
+
+ *verify*, a ``bool`` or ``str``. If a ``True``, then TLS certificate verification
+ of the server is done using the default CA bundle; if ``False``, then no
+ verification is done; if a `str` then it specifies the path to a certificate file or
+ directory which will be used for verification.
+
+ *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use for
+ resolution of hostnames in URLs. If not specified, a new resolver with a default
+ configuration will be used; note this is *not* the default resolver as that resolver
+ might have been configured to use DoH causing a chicken-and-egg problem. This
+ parameter only has an effect if the HTTP library is httpx.
+
+ *family*, an ``int``, the address family. If socket.AF_UNSPEC (the default), both A
+ and AAAA records will be retrieved.
+
+ *http_version*, a ``dns.query.HTTPVersion``, indicating which HTTP version to use.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ (af, _, the_source) = _destination_and_source(
+ where, port, source, source_port, False
+ )
+ # we bind url and then override as pyright can't figure out all paths bind.
+ url = where
+ if af is not None and dns.inet.is_address(where):
+ if af == socket.AF_INET:
+ url = f"https://{where}:{port}{path}"
+ elif af == socket.AF_INET6:
+ url = f"https://[{where}]:{port}{path}"
+
+ extensions = {}
+ if bootstrap_address is None:
+ # pylint: disable=possibly-used-before-assignment
+ parsed = urllib.parse.urlparse(url)
+ if parsed.hostname is None:
+ raise ValueError("no hostname in URL")
+ if dns.inet.is_address(parsed.hostname):
+ bootstrap_address = parsed.hostname
+ extensions["sni_hostname"] = parsed.hostname
+ if parsed.port is not None:
+ port = parsed.port
+
+ if http_version == HTTPVersion.H3 or (
+ http_version == HTTPVersion.DEFAULT and not have_doh
+ ):
+ if bootstrap_address is None:
+ resolver = _maybe_get_resolver(resolver)
+ assert parsed.hostname is not None # pyright: ignore
+ answers = resolver.resolve_name(parsed.hostname, family) # pyright: ignore
+ bootstrap_address = random.choice(list(answers.addresses()))
+ if session and not isinstance(
+ session, dns.quic.SyncQuicConnection
+ ): # pyright: ignore
+ raise ValueError("session parameter must be a dns.quic.SyncQuicConnection.")
+ return _http3(
+ q,
+ bootstrap_address,
+ url, # pyright: ignore
+ timeout,
+ port,
+ source,
+ source_port,
+ one_rr_per_rrset,
+ ignore_trailing,
+ verify=verify,
+ post=post,
+ connection=session,
+ )
+
+ if not have_doh:
+ raise NoDOH # pragma: no cover
+ if session and not isinstance(session, httpx.Client): # pyright: ignore
+ raise ValueError("session parameter must be an httpx.Client")
+
+ wire = q.to_wire()
+ headers = {"accept": "application/dns-message"}
+
+ h1 = http_version in (HTTPVersion.H1, HTTPVersion.DEFAULT)
+ h2 = http_version in (HTTPVersion.H2, HTTPVersion.DEFAULT)
+
+ # set source port and source address
+
+ if the_source is None:
+ local_address = None
+ local_port = 0
+ else:
+ local_address = the_source[0]
+ local_port = the_source[1]
+
+ if session:
+ cm: contextlib.AbstractContextManager = contextlib.nullcontext(session)
+ else:
+ transport = _HTTPTransport(
+ local_address=local_address,
+ http1=h1,
+ http2=h2,
+ verify=verify,
+ local_port=local_port,
+ bootstrap_address=bootstrap_address,
+ resolver=resolver,
+ family=family, # pyright: ignore
+ )
+
+ cm = httpx.Client( # type: ignore
+ http1=h1, http2=h2, verify=verify, transport=transport # type: ignore
+ )
+ with cm as session:
+ # see https://tools.ietf.org/html/rfc8484#section-4.1.1 for DoH
+ # GET and POST examples
+ assert session is not None
+ if post:
+ headers.update(
+ {
+ "content-type": "application/dns-message",
+ "content-length": str(len(wire)),
+ }
+ )
+ response = session.post(
+ url,
+ headers=headers,
+ content=wire,
+ timeout=timeout,
+ extensions=extensions,
+ )
+ else:
+ wire = base64.urlsafe_b64encode(wire).rstrip(b"=")
+ twire = wire.decode() # httpx does a repr() if we give it bytes
+ response = session.get(
+ url,
+ headers=headers,
+ timeout=timeout,
+ params={"dns": twire},
+ extensions=extensions,
+ )
+
+ # see https://tools.ietf.org/html/rfc8484#section-4.2.1 for info about DoH
+ # status codes
+ if response.status_code < 200 or response.status_code > 299:
+ raise ValueError(
+ f"{where} responded with status code {response.status_code}"
+ f"\nResponse body: {response.content}"
+ )
+ r = dns.message.from_wire(
+ response.content,
+ keyring=q.keyring,
+ request_mac=q.request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ r.time = response.elapsed.total_seconds()
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+def _find_header(headers: dns.quic.Headers, name: bytes) -> bytes:
+ if headers is None:
+ raise KeyError
+ for header, value in headers:
+ if header == name:
+ return value
+ raise KeyError
+
+
+def _check_status(headers: dns.quic.Headers, peer: str, wire: bytes) -> None:
+ value = _find_header(headers, b":status")
+ if value is None:
+ raise SyntaxError("no :status header in response")
+ status = int(value)
+ if status < 0:
+ raise SyntaxError("status is negative")
+ if status < 200 or status > 299:
+ error = ""
+ if len(wire) > 0:
+ try:
+ error = ": " + wire.decode()
+ except Exception:
+ pass
+ raise ValueError(f"{peer} responded with status code {status}{error}")
+
+
+def _http3(
+ q: dns.message.Message,
+ where: str,
+ url: str,
+ timeout: float | None = None,
+ port: int = 443,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ verify: bool | str | ssl.SSLContext = True,
+ post: bool = True,
+ connection: dns.quic.SyncQuicConnection | None = None,
+) -> dns.message.Message:
+ if not dns.quic.have_quic:
+ raise NoDOH("DNS-over-HTTP3 is not available.") # pragma: no cover
+
+ url_parts = urllib.parse.urlparse(url)
+ hostname = url_parts.hostname
+ assert hostname is not None
+ if url_parts.port is not None:
+ port = url_parts.port
+
+ q.id = 0
+ wire = q.to_wire()
+ the_connection: dns.quic.SyncQuicConnection
+ the_manager: dns.quic.SyncQuicManager
+ if connection:
+ manager: contextlib.AbstractContextManager = contextlib.nullcontext(None)
+ else:
+ manager = dns.quic.SyncQuicManager(
+ verify_mode=verify, server_name=hostname, h3=True # pyright: ignore
+ )
+ the_manager = manager # for type checking happiness
+
+ with manager:
+ if connection:
+ the_connection = connection
+ else:
+ the_connection = the_manager.connect( # pyright: ignore
+ where, port, source, source_port
+ )
+ (start, expiration) = _compute_times(timeout)
+ with the_connection.make_stream(timeout) as stream: # pyright: ignore
+ stream.send_h3(url, wire, post)
+ wire = stream.receive(_remaining(expiration))
+ _check_status(stream.headers(), where, wire)
+ finish = time.time()
+ r = dns.message.from_wire(
+ wire,
+ keyring=q.keyring,
+ request_mac=q.request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ r.time = max(finish - start, 0.0)
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+def _udp_recv(sock, max_size, expiration):
+ """Reads a datagram from the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ while True:
+ try:
+ return sock.recvfrom(max_size)
+ except BlockingIOError:
+ _wait_for_readable(sock, expiration)
+
+
+def _udp_send(sock, data, destination, expiration):
+ """Sends the specified datagram to destination over the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ while True:
+ try:
+ if destination:
+ return sock.sendto(data, destination)
+ else:
+ return sock.send(data)
+ except BlockingIOError: # pragma: no cover
+ _wait_for_writable(sock, expiration)
+
+
+def send_udp(
+ sock: Any,
+ what: dns.message.Message | bytes,
+ destination: Any,
+ expiration: float | None = None,
+) -> Tuple[int, float]:
+ """Send a DNS message to the specified UDP socket.
+
+ *sock*, a ``socket``.
+
+ *what*, a ``bytes`` or ``dns.message.Message``, the message to send.
+
+ *destination*, a destination tuple appropriate for the address family
+ of the socket, specifying where to send the query.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ Returns an ``(int, float)`` tuple of bytes sent and the sent time.
+ """
+
+ if isinstance(what, dns.message.Message):
+ what = what.to_wire()
+ sent_time = time.time()
+ n = _udp_send(sock, what, destination, expiration)
+ return (n, sent_time)
+
+
+def receive_udp(
+ sock: Any,
+ destination: Any | None = None,
+ expiration: float | None = None,
+ ignore_unexpected: bool = False,
+ one_rr_per_rrset: bool = False,
+ keyring: Dict[dns.name.Name, dns.tsig.Key] | None = None,
+ request_mac: bytes | None = b"",
+ ignore_trailing: bool = False,
+ raise_on_truncation: bool = False,
+ ignore_errors: bool = False,
+ query: dns.message.Message | None = None,
+) -> Any:
+ """Read a DNS message from a UDP socket.
+
+ *sock*, a ``socket``.
+
+ *destination*, a destination tuple appropriate for the address family
+ of the socket, specifying where the message is expected to arrive from.
+ When receiving a response, this would be where the associated query was
+ sent.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from
+ unexpected sources.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *keyring*, a ``dict``, the keyring to use for TSIG.
+
+ *request_mac*, a ``bytes`` or ``None``, the MAC of the request (for TSIG).
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if
+ the TC bit is set.
+
+ Raises if the message is malformed, if network errors occur, of if
+ there is a timeout.
+
+ If *destination* is not ``None``, returns a ``(dns.message.Message, float)``
+ tuple of the received message and the received time.
+
+ If *destination* is ``None``, returns a
+ ``(dns.message.Message, float, tuple)``
+ tuple of the received message, the received time, and the address where
+ the message arrived from.
+
+ *ignore_errors*, a ``bool``. If various format errors or response
+ mismatches occur, ignore them and keep listening for a valid response.
+ The default is ``False``.
+
+ *query*, a ``dns.message.Message`` or ``None``. If not ``None`` and
+ *ignore_errors* is ``True``, check that the received message is a response
+ to this query, and if not keep listening for a valid response.
+ """
+
+ wire = b""
+ while True:
+ (wire, from_address) = _udp_recv(sock, 65535, expiration)
+ if not _matches_destination(
+ sock.family, from_address, destination, ignore_unexpected
+ ):
+ continue
+ received_time = time.time()
+ try:
+ r = dns.message.from_wire(
+ wire,
+ keyring=keyring,
+ request_mac=request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ raise_on_truncation=raise_on_truncation,
+ )
+ except dns.message.Truncated as e:
+ # If we got Truncated and not FORMERR, we at least got the header with TC
+ # set, and very likely the question section, so we'll re-raise if the
+ # message seems to be a response as we need to know when truncation happens.
+ # We need to check that it seems to be a response as we don't want a random
+ # injected message with TC set to cause us to bail out.
+ if (
+ ignore_errors
+ and query is not None
+ and not query.is_response(e.message())
+ ):
+ continue
+ else:
+ raise
+ except Exception:
+ if ignore_errors:
+ continue
+ else:
+ raise
+ if ignore_errors and query is not None and not query.is_response(r):
+ continue
+ if destination:
+ return (r, received_time)
+ else:
+ return (r, received_time, from_address)
+
+
+def udp(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 53,
+ source: str | None = None,
+ source_port: int = 0,
+ ignore_unexpected: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ raise_on_truncation: bool = False,
+ sock: Any | None = None,
+ ignore_errors: bool = False,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via UDP.
+
+ *q*, a ``dns.message.Message``, the query to send
+
+ *where*, a ``str`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the
+ query times out. If ``None``, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from
+ unexpected sources.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ *raise_on_truncation*, a ``bool``. If ``True``, raise an exception if
+ the TC bit is set.
+
+ *sock*, a ``socket.socket``, or ``None``, the socket to use for the
+ query. If ``None``, the default, a socket is created. Note that
+ if a socket is provided, it must be a nonblocking datagram socket,
+ and the *source* and *source_port* are ignored.
+
+ *ignore_errors*, a ``bool``. If various format errors or response
+ mismatches occur, ignore them and keep listening for a valid response.
+ The default is ``False``.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ wire = q.to_wire()
+ (af, destination, source) = _destination_and_source(
+ where, port, source, source_port, True
+ )
+ (begin_time, expiration) = _compute_times(timeout)
+ if sock:
+ cm: contextlib.AbstractContextManager = contextlib.nullcontext(sock)
+ else:
+ assert af is not None
+ cm = make_socket(af, socket.SOCK_DGRAM, source)
+ with cm as s:
+ send_udp(s, wire, destination, expiration)
+ (r, received_time) = receive_udp(
+ s,
+ destination,
+ expiration,
+ ignore_unexpected,
+ one_rr_per_rrset,
+ q.keyring,
+ q.mac,
+ ignore_trailing,
+ raise_on_truncation,
+ ignore_errors,
+ q,
+ )
+ r.time = received_time - begin_time
+ # We don't need to check q.is_response() if we are in ignore_errors mode
+ # as receive_udp() will have checked it.
+ if not (ignore_errors or q.is_response(r)):
+ raise BadResponse
+ return r
+ assert (
+ False # help mypy figure out we can't get here lgtm[py/unreachable-statement]
+ )
+
+
+def udp_with_fallback(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 53,
+ source: str | None = None,
+ source_port: int = 0,
+ ignore_unexpected: bool = False,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ udp_sock: Any | None = None,
+ tcp_sock: Any | None = None,
+ ignore_errors: bool = False,
+) -> Tuple[dns.message.Message, bool]:
+ """Return the response to the query, trying UDP first and falling back
+ to TCP if UDP results in a truncated response.
+
+ *q*, a ``dns.message.Message``, the query to send
+
+ *where*, a ``str`` containing an IPv4 or IPv6 address, where to send the message.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query
+ times out. If ``None``, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying the source
+ address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message. The default is
+ 0.
+
+ *ignore_unexpected*, a ``bool``. If ``True``, ignore responses from unexpected
+ sources.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the
+ received message.
+
+ *udp_sock*, a ``socket.socket``, or ``None``, the socket to use for the UDP query.
+ If ``None``, the default, a socket is created. Note that if a socket is provided,
+ it must be a nonblocking datagram socket, and the *source* and *source_port* are
+ ignored for the UDP query.
+
+ *tcp_sock*, a ``socket.socket``, or ``None``, the connected socket to use for the
+ TCP query. If ``None``, the default, a socket is created. Note that if a socket is
+ provided, it must be a nonblocking connected stream socket, and *where*, *source*
+ and *source_port* are ignored for the TCP query.
+
+ *ignore_errors*, a ``bool``. If various format errors or response mismatches occur
+ while listening for UDP, ignore them and keep listening for a valid response. The
+ default is ``False``.
+
+ Returns a (``dns.message.Message``, tcp) tuple where tcp is ``True`` if and only if
+ TCP was used.
+ """
+ try:
+ response = udp(
+ q,
+ where,
+ timeout,
+ port,
+ source,
+ source_port,
+ ignore_unexpected,
+ one_rr_per_rrset,
+ ignore_trailing,
+ True,
+ udp_sock,
+ ignore_errors,
+ )
+ return (response, False)
+ except dns.message.Truncated:
+ response = tcp(
+ q,
+ where,
+ timeout,
+ port,
+ source,
+ source_port,
+ one_rr_per_rrset,
+ ignore_trailing,
+ tcp_sock,
+ )
+ return (response, True)
+
+
+def _net_read(sock, count, expiration):
+ """Read the specified number of bytes from sock. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ s = b""
+ while count > 0:
+ try:
+ n = sock.recv(count)
+ if n == b"":
+ raise EOFError("EOF")
+ count -= len(n)
+ s += n
+ except (BlockingIOError, ssl.SSLWantReadError):
+ _wait_for_readable(sock, expiration)
+ except ssl.SSLWantWriteError: # pragma: no cover
+ _wait_for_writable(sock, expiration)
+ return s
+
+
+def _net_write(sock, data, expiration):
+ """Write the specified data to the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ current = 0
+ l = len(data)
+ while current < l:
+ try:
+ current += sock.send(data[current:])
+ except (BlockingIOError, ssl.SSLWantWriteError):
+ _wait_for_writable(sock, expiration)
+ except ssl.SSLWantReadError: # pragma: no cover
+ _wait_for_readable(sock, expiration)
+
+
+def send_tcp(
+ sock: Any,
+ what: dns.message.Message | bytes,
+ expiration: float | None = None,
+) -> Tuple[int, float]:
+ """Send a DNS message to the specified TCP socket.
+
+ *sock*, a ``socket``.
+
+ *what*, a ``bytes`` or ``dns.message.Message``, the message to send.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ Returns an ``(int, float)`` tuple of bytes sent and the sent time.
+ """
+
+ if isinstance(what, dns.message.Message):
+ tcpmsg = what.to_wire(prepend_length=True)
+ else:
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = len(what).to_bytes(2, "big") + what
+ sent_time = time.time()
+ _net_write(sock, tcpmsg, expiration)
+ return (len(tcpmsg), sent_time)
+
+
+def receive_tcp(
+ sock: Any,
+ expiration: float | None = None,
+ one_rr_per_rrset: bool = False,
+ keyring: Dict[dns.name.Name, dns.tsig.Key] | None = None,
+ request_mac: bytes | None = b"",
+ ignore_trailing: bool = False,
+) -> Tuple[dns.message.Message, float]:
+ """Read a DNS message from a TCP socket.
+
+ *sock*, a ``socket``.
+
+ *expiration*, a ``float`` or ``None``, the absolute time at which
+ a timeout exception should be raised. If ``None``, no timeout will
+ occur.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *keyring*, a ``dict``, the keyring to use for TSIG.
+
+ *request_mac*, a ``bytes`` or ``None``, the MAC of the request (for TSIG).
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ Raises if the message is malformed, if network errors occur, of if
+ there is a timeout.
+
+ Returns a ``(dns.message.Message, float)`` tuple of the received message
+ and the received time.
+ """
+
+ ldata = _net_read(sock, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = _net_read(sock, l, expiration)
+ received_time = time.time()
+ r = dns.message.from_wire(
+ wire,
+ keyring=keyring,
+ request_mac=request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ return (r, received_time)
+
+
+def _connect(s, address, expiration):
+ err = s.connect_ex(address)
+ if err == 0:
+ return
+ if err in (errno.EINPROGRESS, errno.EWOULDBLOCK, errno.EALREADY):
+ _wait_for_writable(s, expiration)
+ err = s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err != 0:
+ raise OSError(err, os.strerror(err))
+
+
+def tcp(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 53,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ sock: Any | None = None,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via TCP.
+
+ *q*, a ``dns.message.Message``, the query to send
+
+ *where*, a ``str`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the
+ query times out. If ``None``, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ *sock*, a ``socket.socket``, or ``None``, the connected socket to use for the
+ query. If ``None``, the default, a socket is created. Note that
+ if a socket is provided, it must be a nonblocking connected stream
+ socket, and *where*, *port*, *source* and *source_port* are ignored.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ wire = q.to_wire()
+ (begin_time, expiration) = _compute_times(timeout)
+ if sock:
+ cm: contextlib.AbstractContextManager = contextlib.nullcontext(sock)
+ else:
+ (af, destination, source) = _destination_and_source(
+ where, port, source, source_port, True
+ )
+ assert af is not None
+ cm = make_socket(af, socket.SOCK_STREAM, source)
+ with cm as s:
+ if not sock:
+ # pylint: disable=possibly-used-before-assignment
+ _connect(s, destination, expiration) # pyright: ignore
+ send_tcp(s, wire, expiration)
+ (r, received_time) = receive_tcp(
+ s, expiration, one_rr_per_rrset, q.keyring, q.mac, ignore_trailing
+ )
+ r.time = received_time - begin_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+ assert (
+ False # help mypy figure out we can't get here lgtm[py/unreachable-statement]
+ )
+
+
+def _tls_handshake(s, expiration):
+ while True:
+ try:
+ s.do_handshake()
+ return
+ except ssl.SSLWantReadError:
+ _wait_for_readable(s, expiration)
+ except ssl.SSLWantWriteError: # pragma: no cover
+ _wait_for_writable(s, expiration)
+
+
+def make_ssl_context(
+ verify: bool | str = True,
+ check_hostname: bool = True,
+ alpns: list[str] | None = None,
+) -> ssl.SSLContext:
+ """Make an SSL context
+
+ If *verify* is ``True``, the default, then certificate verification will occur using
+ the standard CA roots. If *verify* is ``False``, then certificate verification will
+ be disabled. If *verify* is a string which is a valid pathname, then if the
+ pathname is a regular file, the CA roots will be taken from the file, otherwise if
+ the pathname is a directory roots will be taken from the directory.
+
+ If *check_hostname* is ``True``, the default, then the hostname of the server must
+ be specified when connecting and the server's certificate must authorize the
+ hostname. If ``False``, then hostname checking is disabled.
+
+ *aplns* is ``None`` or a list of TLS ALPN (Application Layer Protocol Negotiation)
+ strings to use in negotiation. For DNS-over-TLS, the right value is `["dot"]`.
+ """
+ cafile, capath = dns._tls_util.convert_verify_to_cafile_and_capath(verify)
+ ssl_context = ssl.create_default_context(cafile=cafile, capath=capath)
+ # the pyright ignores below are because it gets confused between the
+ # _no_ssl compatibility types and the real ones.
+ ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2 # type: ignore
+ ssl_context.check_hostname = check_hostname
+ if verify is False:
+ ssl_context.verify_mode = ssl.CERT_NONE # type: ignore
+ if alpns is not None:
+ ssl_context.set_alpn_protocols(alpns)
+ return ssl_context # type: ignore
+
+
+# for backwards compatibility
+def _make_dot_ssl_context(
+ server_hostname: str | None, verify: bool | str
+) -> ssl.SSLContext:
+ return make_ssl_context(verify, server_hostname is not None, ["dot"])
+
+
+def tls(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 853,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ sock: ssl.SSLSocket | None = None,
+ ssl_context: ssl.SSLContext | None = None,
+ server_hostname: str | None = None,
+ verify: bool | str = True,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via TLS.
+
+ *q*, a ``dns.message.Message``, the query to send
+
+ *where*, a ``str`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the
+ query times out. If ``None``, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 853.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own
+ RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing
+ junk at end of the received message.
+
+ *sock*, an ``ssl.SSLSocket``, or ``None``, the socket to use for
+ the query. If ``None``, the default, a socket is created. Note
+ that if a socket is provided, it must be a nonblocking connected
+ SSL stream socket, and *where*, *port*, *source*, *source_port*,
+ and *ssl_context* are ignored.
+
+ *ssl_context*, an ``ssl.SSLContext``, the context to use when establishing
+ a TLS connection. If ``None``, the default, creates one with the default
+ configuration.
+
+ *server_hostname*, a ``str`` containing the server's hostname. The
+ default is ``None``, which means that no hostname is known, and if an
+ SSL context is created, hostname checking will be disabled.
+
+ *verify*, a ``bool`` or ``str``. If a ``True``, then TLS certificate verification
+ of the server is done using the default CA bundle; if ``False``, then no
+ verification is done; if a `str` then it specifies the path to a certificate file or
+ directory which will be used for verification.
+
+ Returns a ``dns.message.Message``.
+
+ """
+
+ if sock:
+ #
+ # If a socket was provided, there's no special TLS handling needed.
+ #
+ return tcp(
+ q,
+ where,
+ timeout,
+ port,
+ source,
+ source_port,
+ one_rr_per_rrset,
+ ignore_trailing,
+ sock,
+ )
+
+ wire = q.to_wire()
+ (begin_time, expiration) = _compute_times(timeout)
+ (af, destination, source) = _destination_and_source(
+ where, port, source, source_port, True
+ )
+ assert af is not None # where must be an address
+ if ssl_context is None:
+ ssl_context = make_ssl_context(verify, server_hostname is not None, ["dot"])
+
+ with make_ssl_socket(
+ af,
+ socket.SOCK_STREAM,
+ ssl_context=ssl_context,
+ server_hostname=server_hostname,
+ source=source,
+ ) as s:
+ _connect(s, destination, expiration)
+ _tls_handshake(s, expiration)
+ send_tcp(s, wire, expiration)
+ (r, received_time) = receive_tcp(
+ s, expiration, one_rr_per_rrset, q.keyring, q.mac, ignore_trailing
+ )
+ r.time = received_time - begin_time
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+ assert (
+ False # help mypy figure out we can't get here lgtm[py/unreachable-statement]
+ )
+
+
+def quic(
+ q: dns.message.Message,
+ where: str,
+ timeout: float | None = None,
+ port: int = 853,
+ source: str | None = None,
+ source_port: int = 0,
+ one_rr_per_rrset: bool = False,
+ ignore_trailing: bool = False,
+ connection: dns.quic.SyncQuicConnection | None = None,
+ verify: bool | str = True,
+ hostname: str | None = None,
+ server_hostname: str | None = None,
+) -> dns.message.Message:
+ """Return the response obtained after sending a query via DNS-over-QUIC.
+
+ *q*, a ``dns.message.Message``, the query to send.
+
+ *where*, a ``str``, the nameserver IP address.
+
+ *timeout*, a ``float`` or ``None``, the number of seconds to wait before the query
+ times out. If ``None``, the default, wait forever.
+
+ *port*, a ``int``, the port to send the query to. The default is 853.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying the source
+ address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message. The default is
+ 0.
+
+ *one_rr_per_rrset*, a ``bool``. If ``True``, put each RR into its own RRset.
+
+ *ignore_trailing*, a ``bool``. If ``True``, ignore trailing junk at end of the
+ received message.
+
+ *connection*, a ``dns.quic.SyncQuicConnection``. If provided, the connection to use
+ to send the query.
+
+ *verify*, a ``bool`` or ``str``. If a ``True``, then TLS certificate verification
+ of the server is done using the default CA bundle; if ``False``, then no
+ verification is done; if a `str` then it specifies the path to a certificate file or
+ directory which will be used for verification.
+
+ *hostname*, a ``str`` containing the server's hostname or ``None``. The default is
+ ``None``, which means that no hostname is known, and if an SSL context is created,
+ hostname checking will be disabled. This value is ignored if *url* is not
+ ``None``.
+
+ *server_hostname*, a ``str`` or ``None``. This item is for backwards compatibility
+ only, and has the same meaning as *hostname*.
+
+ Returns a ``dns.message.Message``.
+ """
+
+ if not dns.quic.have_quic:
+ raise NoDOQ("DNS-over-QUIC is not available.") # pragma: no cover
+
+ if server_hostname is not None and hostname is None:
+ hostname = server_hostname
+
+ q.id = 0
+ wire = q.to_wire()
+ the_connection: dns.quic.SyncQuicConnection
+ the_manager: dns.quic.SyncQuicManager
+ if connection:
+ manager: contextlib.AbstractContextManager = contextlib.nullcontext(None)
+ the_connection = connection
+ else:
+ manager = dns.quic.SyncQuicManager(
+ verify_mode=verify, server_name=hostname # pyright: ignore
+ )
+ the_manager = manager # for type checking happiness
+
+ with manager:
+ if not connection:
+ the_connection = the_manager.connect( # pyright: ignore
+ where, port, source, source_port
+ )
+ (start, expiration) = _compute_times(timeout)
+ with the_connection.make_stream(timeout) as stream: # pyright: ignore
+ stream.send(wire, True)
+ wire = stream.receive(_remaining(expiration))
+ finish = time.time()
+ r = dns.message.from_wire(
+ wire,
+ keyring=q.keyring,
+ request_mac=q.request_mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ )
+ r.time = max(finish - start, 0.0)
+ if not q.is_response(r):
+ raise BadResponse
+ return r
+
+
+class UDPMode(enum.IntEnum):
+ """How should UDP be used in an IXFR from :py:func:`inbound_xfr()`?
+
+ NEVER means "never use UDP; always use TCP"
+ TRY_FIRST means "try to use UDP but fall back to TCP if needed"
+ ONLY means "raise ``dns.xfr.UseTCP`` if trying UDP does not succeed"
+ """
+
+ NEVER = 0
+ TRY_FIRST = 1
+ ONLY = 2
+
+
+def _inbound_xfr(
+ txn_manager: dns.transaction.TransactionManager,
+ s: socket.socket | ssl.SSLSocket,
+ query: dns.message.Message,
+ serial: int | None,
+ timeout: float | None,
+ expiration: float | None,
+) -> Any:
+ """Given a socket, does the zone transfer."""
+ rdtype = query.question[0].rdtype
+ is_ixfr = rdtype == dns.rdatatype.IXFR
+ origin = txn_manager.from_wire_origin()
+ wire = query.to_wire()
+ is_udp = isinstance(s, socket.socket) and s.type == socket.SOCK_DGRAM
+ if is_udp:
+ _udp_send(s, wire, None, expiration)
+ else:
+ tcpmsg = struct.pack("!H", len(wire)) + wire
+ _net_write(s, tcpmsg, expiration)
+ with dns.xfr.Inbound(txn_manager, rdtype, serial, is_udp) as inbound:
+ done = False
+ tsig_ctx = None
+ r: dns.message.Message | None = None
+ while not done:
+ (_, mexpiration) = _compute_times(timeout)
+ if mexpiration is None or (
+ expiration is not None and mexpiration > expiration
+ ):
+ mexpiration = expiration
+ if is_udp:
+ (rwire, _) = _udp_recv(s, 65535, mexpiration)
+ else:
+ ldata = _net_read(s, 2, mexpiration)
+ (l,) = struct.unpack("!H", ldata)
+ rwire = _net_read(s, l, mexpiration)
+ r = dns.message.from_wire(
+ rwire,
+ keyring=query.keyring,
+ request_mac=query.mac,
+ xfr=True,
+ origin=origin,
+ tsig_ctx=tsig_ctx,
+ multi=(not is_udp),
+ one_rr_per_rrset=is_ixfr,
+ )
+ done = inbound.process_message(r)
+ yield r
+ tsig_ctx = r.tsig_ctx
+ if query.keyring and r is not None and not r.had_tsig:
+ raise dns.exception.FormError("missing TSIG")
+
+
+def xfr(
+ where: str,
+ zone: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.AXFR,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ timeout: float | None = None,
+ port: int = 53,
+ keyring: Dict[dns.name.Name, dns.tsig.Key] | None = None,
+ keyname: dns.name.Name | str | None = None,
+ relativize: bool = True,
+ lifetime: float | None = None,
+ source: str | None = None,
+ source_port: int = 0,
+ serial: int = 0,
+ use_udp: bool = False,
+ keyalgorithm: dns.name.Name | str = dns.tsig.default_algorithm,
+) -> Any:
+ """Return a generator for the responses to a zone transfer.
+
+ *where*, a ``str`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *zone*, a ``dns.name.Name`` or ``str``, the name of the zone to transfer.
+
+ *rdtype*, an ``int`` or ``str``, the type of zone transfer. The
+ default is ``dns.rdatatype.AXFR``. ``dns.rdatatype.IXFR`` can be
+ used to do an incremental transfer instead.
+
+ *rdclass*, an ``int`` or ``str``, the class of the zone transfer.
+ The default is ``dns.rdataclass.IN``.
+
+ *timeout*, a ``float``, the number of seconds to wait for each
+ response message. If None, the default, wait forever.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *keyring*, a ``dict``, the keyring to use for TSIG.
+
+ *keyname*, a ``dns.name.Name`` or ``str``, the name of the TSIG
+ key to use.
+
+ *relativize*, a ``bool``. If ``True``, all names in the zone will be
+ relativized to the zone origin. It is essential that the
+ relativize setting matches the one specified to
+ ``dns.zone.from_xfr()`` if using this generator to make a zone.
+
+ *lifetime*, a ``float``, the total number of seconds to spend
+ doing the transfer. If ``None``, the default, then there is no
+ limit on the time the transfer may take.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *serial*, an ``int``, the SOA serial number to use as the base for
+ an IXFR diff sequence (only meaningful if *rdtype* is
+ ``dns.rdatatype.IXFR``).
+
+ *use_udp*, a ``bool``. If ``True``, use UDP (only meaningful for IXFR).
+
+ *keyalgorithm*, a ``dns.name.Name`` or ``str``, the TSIG algorithm to use.
+
+ Raises on errors, and so does the generator.
+
+ Returns a generator of ``dns.message.Message`` objects.
+ """
+
+ class DummyTransactionManager(dns.transaction.TransactionManager):
+ def __init__(self, origin, relativize):
+ self.info = (origin, relativize, dns.name.empty if relativize else origin)
+
+ def origin_information(self):
+ return self.info
+
+ def get_class(self) -> dns.rdataclass.RdataClass:
+ raise NotImplementedError # pragma: no cover
+
+ def reader(self):
+ raise NotImplementedError # pragma: no cover
+
+ def writer(self, replacement: bool = False) -> dns.transaction.Transaction:
+ class DummyTransaction:
+ def nop(self, *args, **kw):
+ pass
+
+ def __getattr__(self, _):
+ return self.nop
+
+ return cast(dns.transaction.Transaction, DummyTransaction())
+
+ if isinstance(zone, str):
+ zone = dns.name.from_text(zone)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ q = dns.message.make_query(zone, rdtype, rdclass)
+ if rdtype == dns.rdatatype.IXFR:
+ rrset = q.find_rrset(
+ q.authority, zone, dns.rdataclass.IN, dns.rdatatype.SOA, create=True
+ )
+ soa = dns.rdata.from_text("IN", "SOA", f". . {serial} 0 0 0 0")
+ rrset.add(soa, 0)
+ if keyring is not None:
+ q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+ (af, destination, source) = _destination_and_source(
+ where, port, source, source_port, True
+ )
+ assert af is not None
+ (_, expiration) = _compute_times(lifetime)
+ tm = DummyTransactionManager(zone, relativize)
+ if use_udp and rdtype != dns.rdatatype.IXFR:
+ raise ValueError("cannot do a UDP AXFR")
+ sock_type = socket.SOCK_DGRAM if use_udp else socket.SOCK_STREAM
+ with make_socket(af, sock_type, source) as s:
+ _connect(s, destination, expiration)
+ yield from _inbound_xfr(tm, s, q, serial, timeout, expiration)
+
+
+def inbound_xfr(
+ where: str,
+ txn_manager: dns.transaction.TransactionManager,
+ query: dns.message.Message | None = None,
+ port: int = 53,
+ timeout: float | None = None,
+ lifetime: float | None = None,
+ source: str | None = None,
+ source_port: int = 0,
+ udp_mode: UDPMode = UDPMode.NEVER,
+) -> None:
+ """Conduct an inbound transfer and apply it via a transaction from the
+ txn_manager.
+
+ *where*, a ``str`` containing an IPv4 or IPv6 address, where
+ to send the message.
+
+ *txn_manager*, a ``dns.transaction.TransactionManager``, the txn_manager
+ for this transfer (typically a ``dns.zone.Zone``).
+
+ *query*, the query to send. If not supplied, a default query is
+ constructed using information from the *txn_manager*.
+
+ *port*, an ``int``, the port send the message to. The default is 53.
+
+ *timeout*, a ``float``, the number of seconds to wait for each
+ response message. If None, the default, wait forever.
+
+ *lifetime*, a ``float``, the total number of seconds to spend
+ doing the transfer. If ``None``, the default, then there is no
+ limit on the time the transfer may take.
+
+ *source*, a ``str`` containing an IPv4 or IPv6 address, specifying
+ the source address. The default is the wildcard address.
+
+ *source_port*, an ``int``, the port from which to send the message.
+ The default is 0.
+
+ *udp_mode*, a ``dns.query.UDPMode``, determines how UDP is used
+ for IXFRs. The default is ``dns.query.UDPMode.NEVER``, i.e. only use
+ TCP. Other possibilities are ``dns.query.UDPMode.TRY_FIRST``, which
+ means "try UDP but fallback to TCP if needed", and
+ ``dns.query.UDPMode.ONLY``, which means "try UDP and raise
+ ``dns.xfr.UseTCP`` if it does not succeed.
+
+ Raises on errors.
+ """
+ if query is None:
+ (query, serial) = dns.xfr.make_query(txn_manager)
+ else:
+ serial = dns.xfr.extract_serial_from_query(query)
+
+ (af, destination, source) = _destination_and_source(
+ where, port, source, source_port, True
+ )
+ assert af is not None
+ (_, expiration) = _compute_times(lifetime)
+ if query.question[0].rdtype == dns.rdatatype.IXFR and udp_mode != UDPMode.NEVER:
+ with make_socket(af, socket.SOCK_DGRAM, source) as s:
+ _connect(s, destination, expiration)
+ try:
+ for _ in _inbound_xfr(
+ txn_manager, s, query, serial, timeout, expiration
+ ):
+ pass
+ return
+ except dns.xfr.UseTCP:
+ if udp_mode == UDPMode.ONLY:
+ raise
+
+ with make_socket(af, socket.SOCK_STREAM, source) as s:
+ _connect(s, destination, expiration)
+ for _ in _inbound_xfr(txn_manager, s, query, serial, timeout, expiration):
+ pass
diff --git a/tapdown/lib/python3.11/site-packages/dns/quic/__init__.py b/tapdown/lib/python3.11/site-packages/dns/quic/__init__.py
new file mode 100644
index 0000000..7c2a699
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/quic/__init__.py
@@ -0,0 +1,78 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+from typing import Any, Dict, List, Tuple
+
+import dns._features
+import dns.asyncbackend
+
+if dns._features.have("doq"):
+ from dns._asyncbackend import NullContext
+ from dns.quic._asyncio import AsyncioQuicConnection as AsyncioQuicConnection
+ from dns.quic._asyncio import AsyncioQuicManager
+ from dns.quic._asyncio import AsyncioQuicStream as AsyncioQuicStream
+ from dns.quic._common import AsyncQuicConnection # pyright: ignore
+ from dns.quic._common import AsyncQuicManager as AsyncQuicManager
+ from dns.quic._sync import SyncQuicConnection # pyright: ignore
+ from dns.quic._sync import SyncQuicStream # pyright: ignore
+ from dns.quic._sync import SyncQuicManager as SyncQuicManager
+
+ have_quic = True
+
+ def null_factory(
+ *args, # pylint: disable=unused-argument
+ **kwargs, # pylint: disable=unused-argument
+ ):
+ return NullContext(None)
+
+ def _asyncio_manager_factory(
+ context, *args, **kwargs # pylint: disable=unused-argument
+ ):
+ return AsyncioQuicManager(*args, **kwargs)
+
+ # We have a context factory and a manager factory as for trio we need to have
+ # a nursery.
+
+ _async_factories: Dict[str, Tuple[Any, Any]] = {
+ "asyncio": (null_factory, _asyncio_manager_factory)
+ }
+
+ if dns._features.have("trio"):
+ import trio
+
+ # pylint: disable=ungrouped-imports
+ from dns.quic._trio import TrioQuicConnection as TrioQuicConnection
+ from dns.quic._trio import TrioQuicManager
+ from dns.quic._trio import TrioQuicStream as TrioQuicStream
+
+ def _trio_context_factory():
+ return trio.open_nursery()
+
+ def _trio_manager_factory(context, *args, **kwargs):
+ return TrioQuicManager(context, *args, **kwargs)
+
+ _async_factories["trio"] = (_trio_context_factory, _trio_manager_factory)
+
+ def factories_for_backend(backend=None):
+ if backend is None:
+ backend = dns.asyncbackend.get_default_backend()
+ return _async_factories[backend.name()]
+
+else: # pragma: no cover
+ have_quic = False
+
+ class AsyncQuicStream: # type: ignore
+ pass
+
+ class AsyncQuicConnection: # type: ignore
+ async def make_stream(self) -> Any:
+ raise NotImplementedError
+
+ class SyncQuicStream: # type: ignore
+ pass
+
+ class SyncQuicConnection: # type: ignore
+ def make_stream(self) -> Any:
+ raise NotImplementedError
+
+
+Headers = List[Tuple[bytes, bytes]]
diff --git a/tapdown/lib/python3.11/site-packages/dns/quic/_asyncio.py b/tapdown/lib/python3.11/site-packages/dns/quic/_asyncio.py
new file mode 100644
index 0000000..0a177b6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/quic/_asyncio.py
@@ -0,0 +1,276 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import asyncio
+import socket
+import ssl
+import struct
+import time
+
+import aioquic.h3.connection # type: ignore
+import aioquic.h3.events # type: ignore
+import aioquic.quic.configuration # type: ignore
+import aioquic.quic.connection # type: ignore
+import aioquic.quic.events # type: ignore
+
+import dns.asyncbackend
+import dns.exception
+import dns.inet
+from dns.quic._common import (
+ QUIC_MAX_DATAGRAM,
+ AsyncQuicConnection,
+ AsyncQuicManager,
+ BaseQuicStream,
+ UnexpectedEOF,
+)
+
+
+class AsyncioQuicStream(BaseQuicStream):
+ def __init__(self, connection, stream_id):
+ super().__init__(connection, stream_id)
+ self._wake_up = asyncio.Condition()
+
+ async def _wait_for_wake_up(self):
+ async with self._wake_up:
+ await self._wake_up.wait()
+
+ async def wait_for(self, amount, expiration):
+ while True:
+ timeout = self._timeout_from_expiration(expiration)
+ if self._buffer.have(amount):
+ return
+ self._expecting = amount
+ try:
+ await asyncio.wait_for(self._wait_for_wake_up(), timeout)
+ except TimeoutError:
+ raise dns.exception.Timeout
+ self._expecting = 0
+
+ async def wait_for_end(self, expiration):
+ while True:
+ timeout = self._timeout_from_expiration(expiration)
+ if self._buffer.seen_end():
+ return
+ try:
+ await asyncio.wait_for(self._wait_for_wake_up(), timeout)
+ except TimeoutError:
+ raise dns.exception.Timeout
+
+ async def receive(self, timeout=None):
+ expiration = self._expiration_from_timeout(timeout)
+ if self._connection.is_h3():
+ await self.wait_for_end(expiration)
+ return self._buffer.get_all()
+ else:
+ await self.wait_for(2, expiration)
+ (size,) = struct.unpack("!H", self._buffer.get(2))
+ await self.wait_for(size, expiration)
+ return self._buffer.get(size)
+
+ async def send(self, datagram, is_end=False):
+ data = self._encapsulate(datagram)
+ await self._connection.write(self._stream_id, data, is_end)
+
+ async def _add_input(self, data, is_end):
+ if self._common_add_input(data, is_end):
+ async with self._wake_up:
+ self._wake_up.notify()
+
+ async def close(self):
+ self._close()
+
+ # Streams are async context managers
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.close()
+ async with self._wake_up:
+ self._wake_up.notify()
+ return False
+
+
+class AsyncioQuicConnection(AsyncQuicConnection):
+ def __init__(self, connection, address, port, source, source_port, manager=None):
+ super().__init__(connection, address, port, source, source_port, manager)
+ self._socket = None
+ self._handshake_complete = asyncio.Event()
+ self._socket_created = asyncio.Event()
+ self._wake_timer = asyncio.Condition()
+ self._receiver_task = None
+ self._sender_task = None
+ self._wake_pending = False
+
+ async def _receiver(self):
+ try:
+ af = dns.inet.af_for_address(self._address)
+ backend = dns.asyncbackend.get_backend("asyncio")
+ # Note that peer is a low-level address tuple, but make_socket() wants
+ # a high-level address tuple, so we convert.
+ self._socket = await backend.make_socket(
+ af, socket.SOCK_DGRAM, 0, self._source, (self._peer[0], self._peer[1])
+ )
+ self._socket_created.set()
+ async with self._socket:
+ while not self._done:
+ (datagram, address) = await self._socket.recvfrom(
+ QUIC_MAX_DATAGRAM, None
+ )
+ if address[0] != self._peer[0] or address[1] != self._peer[1]:
+ continue
+ self._connection.receive_datagram(datagram, address, time.time())
+ # Wake up the timer in case the sender is sleeping, as there may be
+ # stuff to send now.
+ await self._wakeup()
+ except Exception:
+ pass
+ finally:
+ self._done = True
+ await self._wakeup()
+ self._handshake_complete.set()
+
+ async def _wakeup(self):
+ self._wake_pending = True
+ async with self._wake_timer:
+ self._wake_timer.notify_all()
+
+ async def _wait_for_wake_timer(self):
+ async with self._wake_timer:
+ if not self._wake_pending:
+ await self._wake_timer.wait()
+ self._wake_pending = False
+
+ async def _sender(self):
+ await self._socket_created.wait()
+ while not self._done:
+ datagrams = self._connection.datagrams_to_send(time.time())
+ for datagram, address in datagrams:
+ assert address == self._peer
+ assert self._socket is not None
+ await self._socket.sendto(datagram, self._peer, None)
+ (expiration, interval) = self._get_timer_values()
+ try:
+ await asyncio.wait_for(self._wait_for_wake_timer(), interval)
+ except Exception:
+ pass
+ self._handle_timer(expiration)
+ await self._handle_events()
+
+ async def _handle_events(self):
+ count = 0
+ while True:
+ event = self._connection.next_event()
+ if event is None:
+ return
+ if isinstance(event, aioquic.quic.events.StreamDataReceived):
+ if self.is_h3():
+ assert self._h3_conn is not None
+ h3_events = self._h3_conn.handle_event(event)
+ for h3_event in h3_events:
+ if isinstance(h3_event, aioquic.h3.events.HeadersReceived):
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ if stream._headers is None:
+ stream._headers = h3_event.headers
+ elif stream._trailers is None:
+ stream._trailers = h3_event.headers
+ if h3_event.stream_ended:
+ await stream._add_input(b"", True)
+ elif isinstance(h3_event, aioquic.h3.events.DataReceived):
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ await stream._add_input(
+ h3_event.data, h3_event.stream_ended
+ )
+ else:
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ await stream._add_input(event.data, event.end_stream)
+ elif isinstance(event, aioquic.quic.events.HandshakeCompleted):
+ self._handshake_complete.set()
+ elif isinstance(event, aioquic.quic.events.ConnectionTerminated):
+ self._done = True
+ if self._receiver_task is not None:
+ self._receiver_task.cancel()
+ elif isinstance(event, aioquic.quic.events.StreamReset):
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ await stream._add_input(b"", True)
+
+ count += 1
+ if count > 10:
+ # yield
+ count = 0
+ await asyncio.sleep(0)
+
+ async def write(self, stream, data, is_end=False):
+ self._connection.send_stream_data(stream, data, is_end)
+ await self._wakeup()
+
+ def run(self):
+ if self._closed:
+ return
+ self._receiver_task = asyncio.Task(self._receiver())
+ self._sender_task = asyncio.Task(self._sender())
+
+ async def make_stream(self, timeout=None):
+ try:
+ await asyncio.wait_for(self._handshake_complete.wait(), timeout)
+ except TimeoutError:
+ raise dns.exception.Timeout
+ if self._done:
+ raise UnexpectedEOF
+ stream_id = self._connection.get_next_available_stream_id(False)
+ stream = AsyncioQuicStream(self, stream_id)
+ self._streams[stream_id] = stream
+ return stream
+
+ async def close(self):
+ if not self._closed:
+ if self._manager is not None:
+ self._manager.closed(self._peer[0], self._peer[1])
+ self._closed = True
+ self._connection.close()
+ # sender might be blocked on this, so set it
+ self._socket_created.set()
+ await self._wakeup()
+ try:
+ if self._receiver_task is not None:
+ await self._receiver_task
+ except asyncio.CancelledError:
+ pass
+ try:
+ if self._sender_task is not None:
+ await self._sender_task
+ except asyncio.CancelledError:
+ pass
+ if self._socket is not None:
+ await self._socket.close()
+
+
+class AsyncioQuicManager(AsyncQuicManager):
+ def __init__(
+ self, conf=None, verify_mode=ssl.CERT_REQUIRED, server_name=None, h3=False
+ ):
+ super().__init__(conf, verify_mode, AsyncioQuicConnection, server_name, h3)
+
+ def connect(
+ self, address, port=853, source=None, source_port=0, want_session_ticket=True
+ ):
+ (connection, start) = self._connect(
+ address, port, source, source_port, want_session_ticket
+ )
+ if start:
+ connection.run()
+ return connection
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ # Copy the iterator into a list as exiting things will mutate the connections
+ # table.
+ connections = list(self._connections.values())
+ for connection in connections:
+ await connection.close()
+ return False
diff --git a/tapdown/lib/python3.11/site-packages/dns/quic/_common.py b/tapdown/lib/python3.11/site-packages/dns/quic/_common.py
new file mode 100644
index 0000000..ba9d245
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/quic/_common.py
@@ -0,0 +1,344 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import base64
+import copy
+import functools
+import socket
+import struct
+import time
+import urllib.parse
+from typing import Any
+
+import aioquic.h3.connection # type: ignore
+import aioquic.quic.configuration # type: ignore
+import aioquic.quic.connection # type: ignore
+
+import dns._tls_util
+import dns.inet
+
+QUIC_MAX_DATAGRAM = 2048
+MAX_SESSION_TICKETS = 8
+# If we hit the max sessions limit we will delete this many of the oldest connections.
+# The value must be a integer > 0 and <= MAX_SESSION_TICKETS.
+SESSIONS_TO_DELETE = MAX_SESSION_TICKETS // 4
+
+
+class UnexpectedEOF(Exception):
+ pass
+
+
+class Buffer:
+ def __init__(self):
+ self._buffer = b""
+ self._seen_end = False
+
+ def put(self, data, is_end):
+ if self._seen_end:
+ return
+ self._buffer += data
+ if is_end:
+ self._seen_end = True
+
+ def have(self, amount):
+ if len(self._buffer) >= amount:
+ return True
+ if self._seen_end:
+ raise UnexpectedEOF
+ return False
+
+ def seen_end(self):
+ return self._seen_end
+
+ def get(self, amount):
+ assert self.have(amount)
+ data = self._buffer[:amount]
+ self._buffer = self._buffer[amount:]
+ return data
+
+ def get_all(self):
+ assert self.seen_end()
+ data = self._buffer
+ self._buffer = b""
+ return data
+
+
+class BaseQuicStream:
+ def __init__(self, connection, stream_id):
+ self._connection = connection
+ self._stream_id = stream_id
+ self._buffer = Buffer()
+ self._expecting = 0
+ self._headers = None
+ self._trailers = None
+
+ def id(self):
+ return self._stream_id
+
+ def headers(self):
+ return self._headers
+
+ def trailers(self):
+ return self._trailers
+
+ def _expiration_from_timeout(self, timeout):
+ if timeout is not None:
+ expiration = time.time() + timeout
+ else:
+ expiration = None
+ return expiration
+
+ def _timeout_from_expiration(self, expiration):
+ if expiration is not None:
+ timeout = max(expiration - time.time(), 0.0)
+ else:
+ timeout = None
+ return timeout
+
+ # Subclass must implement receive() as sync / async and which returns a message
+ # or raises.
+
+ # Subclass must implement send() as sync / async and which takes a message and
+ # an EOF indicator.
+
+ def send_h3(self, url, datagram, post=True):
+ if not self._connection.is_h3():
+ raise SyntaxError("cannot send H3 to a non-H3 connection")
+ url_parts = urllib.parse.urlparse(url)
+ path = url_parts.path.encode()
+ if post:
+ method = b"POST"
+ else:
+ method = b"GET"
+ path += b"?dns=" + base64.urlsafe_b64encode(datagram).rstrip(b"=")
+ headers = [
+ (b":method", method),
+ (b":scheme", url_parts.scheme.encode()),
+ (b":authority", url_parts.netloc.encode()),
+ (b":path", path),
+ (b"accept", b"application/dns-message"),
+ ]
+ if post:
+ headers.extend(
+ [
+ (b"content-type", b"application/dns-message"),
+ (b"content-length", str(len(datagram)).encode()),
+ ]
+ )
+ self._connection.send_headers(self._stream_id, headers, not post)
+ if post:
+ self._connection.send_data(self._stream_id, datagram, True)
+
+ def _encapsulate(self, datagram):
+ if self._connection.is_h3():
+ return datagram
+ l = len(datagram)
+ return struct.pack("!H", l) + datagram
+
+ def _common_add_input(self, data, is_end):
+ self._buffer.put(data, is_end)
+ try:
+ return (
+ self._expecting > 0 and self._buffer.have(self._expecting)
+ ) or self._buffer.seen_end
+ except UnexpectedEOF:
+ return True
+
+ def _close(self):
+ self._connection.close_stream(self._stream_id)
+ self._buffer.put(b"", True) # send EOF in case we haven't seen it.
+
+
+class BaseQuicConnection:
+ def __init__(
+ self,
+ connection,
+ address,
+ port,
+ source=None,
+ source_port=0,
+ manager=None,
+ ):
+ self._done = False
+ self._connection = connection
+ self._address = address
+ self._port = port
+ self._closed = False
+ self._manager = manager
+ self._streams = {}
+ if manager is not None and manager.is_h3():
+ self._h3_conn = aioquic.h3.connection.H3Connection(connection, False)
+ else:
+ self._h3_conn = None
+ self._af = dns.inet.af_for_address(address)
+ self._peer = dns.inet.low_level_address_tuple((address, port))
+ if source is None and source_port != 0:
+ if self._af == socket.AF_INET:
+ source = "0.0.0.0"
+ elif self._af == socket.AF_INET6:
+ source = "::"
+ else:
+ raise NotImplementedError
+ if source:
+ self._source = (source, source_port)
+ else:
+ self._source = None
+
+ def is_h3(self):
+ return self._h3_conn is not None
+
+ def close_stream(self, stream_id):
+ del self._streams[stream_id]
+
+ def send_headers(self, stream_id, headers, is_end=False):
+ assert self._h3_conn is not None
+ self._h3_conn.send_headers(stream_id, headers, is_end)
+
+ def send_data(self, stream_id, data, is_end=False):
+ assert self._h3_conn is not None
+ self._h3_conn.send_data(stream_id, data, is_end)
+
+ def _get_timer_values(self, closed_is_special=True):
+ now = time.time()
+ expiration = self._connection.get_timer()
+ if expiration is None:
+ expiration = now + 3600 # arbitrary "big" value
+ interval = max(expiration - now, 0)
+ if self._closed and closed_is_special:
+ # lower sleep interval to avoid a race in the closing process
+ # which can lead to higher latency closing due to sleeping when
+ # we have events.
+ interval = min(interval, 0.05)
+ return (expiration, interval)
+
+ def _handle_timer(self, expiration):
+ now = time.time()
+ if expiration <= now:
+ self._connection.handle_timer(now)
+
+
+class AsyncQuicConnection(BaseQuicConnection):
+ async def make_stream(self, timeout: float | None = None) -> Any:
+ pass
+
+
+class BaseQuicManager:
+ def __init__(
+ self, conf, verify_mode, connection_factory, server_name=None, h3=False
+ ):
+ self._connections = {}
+ self._connection_factory = connection_factory
+ self._session_tickets = {}
+ self._tokens = {}
+ self._h3 = h3
+ if conf is None:
+ verify_path = None
+ if isinstance(verify_mode, str):
+ verify_path = verify_mode
+ verify_mode = True
+ if h3:
+ alpn_protocols = ["h3"]
+ else:
+ alpn_protocols = ["doq", "doq-i03"]
+ conf = aioquic.quic.configuration.QuicConfiguration(
+ alpn_protocols=alpn_protocols,
+ verify_mode=verify_mode,
+ server_name=server_name,
+ )
+ if verify_path is not None:
+ cafile, capath = dns._tls_util.convert_verify_to_cafile_and_capath(
+ verify_path
+ )
+ conf.load_verify_locations(cafile=cafile, capath=capath)
+ self._conf = conf
+
+ def _connect(
+ self,
+ address,
+ port=853,
+ source=None,
+ source_port=0,
+ want_session_ticket=True,
+ want_token=True,
+ ):
+ connection = self._connections.get((address, port))
+ if connection is not None:
+ return (connection, False)
+ conf = self._conf
+ if want_session_ticket:
+ try:
+ session_ticket = self._session_tickets.pop((address, port))
+ # We found a session ticket, so make a configuration that uses it.
+ conf = copy.copy(conf)
+ conf.session_ticket = session_ticket
+ except KeyError:
+ # No session ticket.
+ pass
+ # Whether or not we found a session ticket, we want a handler to save
+ # one.
+ session_ticket_handler = functools.partial(
+ self.save_session_ticket, address, port
+ )
+ else:
+ session_ticket_handler = None
+ if want_token:
+ try:
+ token = self._tokens.pop((address, port))
+ # We found a token, so make a configuration that uses it.
+ conf = copy.copy(conf)
+ conf.token = token
+ except KeyError:
+ # No token
+ pass
+ # Whether or not we found a token, we want a handler to save # one.
+ token_handler = functools.partial(self.save_token, address, port)
+ else:
+ token_handler = None
+
+ qconn = aioquic.quic.connection.QuicConnection(
+ configuration=conf,
+ session_ticket_handler=session_ticket_handler,
+ token_handler=token_handler,
+ )
+ lladdress = dns.inet.low_level_address_tuple((address, port))
+ qconn.connect(lladdress, time.time())
+ connection = self._connection_factory(
+ qconn, address, port, source, source_port, self
+ )
+ self._connections[(address, port)] = connection
+ return (connection, True)
+
+ def closed(self, address, port):
+ try:
+ del self._connections[(address, port)]
+ except KeyError:
+ pass
+
+ def is_h3(self):
+ return self._h3
+
+ def save_session_ticket(self, address, port, ticket):
+ # We rely on dictionaries keys() being in insertion order here. We
+ # can't just popitem() as that would be LIFO which is the opposite of
+ # what we want.
+ l = len(self._session_tickets)
+ if l >= MAX_SESSION_TICKETS:
+ keys_to_delete = list(self._session_tickets.keys())[0:SESSIONS_TO_DELETE]
+ for key in keys_to_delete:
+ del self._session_tickets[key]
+ self._session_tickets[(address, port)] = ticket
+
+ def save_token(self, address, port, token):
+ # We rely on dictionaries keys() being in insertion order here. We
+ # can't just popitem() as that would be LIFO which is the opposite of
+ # what we want.
+ l = len(self._tokens)
+ if l >= MAX_SESSION_TICKETS:
+ keys_to_delete = list(self._tokens.keys())[0:SESSIONS_TO_DELETE]
+ for key in keys_to_delete:
+ del self._tokens[key]
+ self._tokens[(address, port)] = token
+
+
+class AsyncQuicManager(BaseQuicManager):
+ def connect(self, address, port=853, source=None, source_port=0):
+ raise NotImplementedError
diff --git a/tapdown/lib/python3.11/site-packages/dns/quic/_sync.py b/tapdown/lib/python3.11/site-packages/dns/quic/_sync.py
new file mode 100644
index 0000000..18f9d05
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/quic/_sync.py
@@ -0,0 +1,306 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import selectors
+import socket
+import ssl
+import struct
+import threading
+import time
+
+import aioquic.h3.connection # type: ignore
+import aioquic.h3.events # type: ignore
+import aioquic.quic.configuration # type: ignore
+import aioquic.quic.connection # type: ignore
+import aioquic.quic.events # type: ignore
+
+import dns.exception
+import dns.inet
+from dns.quic._common import (
+ QUIC_MAX_DATAGRAM,
+ BaseQuicConnection,
+ BaseQuicManager,
+ BaseQuicStream,
+ UnexpectedEOF,
+)
+
+# Function used to create a socket. Can be overridden if needed in special
+# situations.
+socket_factory = socket.socket
+
+
+class SyncQuicStream(BaseQuicStream):
+ def __init__(self, connection, stream_id):
+ super().__init__(connection, stream_id)
+ self._wake_up = threading.Condition()
+ self._lock = threading.Lock()
+
+ def wait_for(self, amount, expiration):
+ while True:
+ timeout = self._timeout_from_expiration(expiration)
+ with self._lock:
+ if self._buffer.have(amount):
+ return
+ self._expecting = amount
+ with self._wake_up:
+ if not self._wake_up.wait(timeout):
+ raise dns.exception.Timeout
+ self._expecting = 0
+
+ def wait_for_end(self, expiration):
+ while True:
+ timeout = self._timeout_from_expiration(expiration)
+ with self._lock:
+ if self._buffer.seen_end():
+ return
+ with self._wake_up:
+ if not self._wake_up.wait(timeout):
+ raise dns.exception.Timeout
+
+ def receive(self, timeout=None):
+ expiration = self._expiration_from_timeout(timeout)
+ if self._connection.is_h3():
+ self.wait_for_end(expiration)
+ with self._lock:
+ return self._buffer.get_all()
+ else:
+ self.wait_for(2, expiration)
+ with self._lock:
+ (size,) = struct.unpack("!H", self._buffer.get(2))
+ self.wait_for(size, expiration)
+ with self._lock:
+ return self._buffer.get(size)
+
+ def send(self, datagram, is_end=False):
+ data = self._encapsulate(datagram)
+ self._connection.write(self._stream_id, data, is_end)
+
+ def _add_input(self, data, is_end):
+ if self._common_add_input(data, is_end):
+ with self._wake_up:
+ self._wake_up.notify()
+
+ def close(self):
+ with self._lock:
+ self._close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ with self._wake_up:
+ self._wake_up.notify()
+ return False
+
+
+class SyncQuicConnection(BaseQuicConnection):
+ def __init__(self, connection, address, port, source, source_port, manager):
+ super().__init__(connection, address, port, source, source_port, manager)
+ self._socket = socket_factory(self._af, socket.SOCK_DGRAM, 0)
+ if self._source is not None:
+ try:
+ self._socket.bind(
+ dns.inet.low_level_address_tuple(self._source, self._af)
+ )
+ except Exception:
+ self._socket.close()
+ raise
+ self._socket.connect(self._peer)
+ (self._send_wakeup, self._receive_wakeup) = socket.socketpair()
+ self._receive_wakeup.setblocking(False)
+ self._socket.setblocking(False)
+ self._handshake_complete = threading.Event()
+ self._worker_thread = None
+ self._lock = threading.Lock()
+
+ def _read(self):
+ count = 0
+ while count < 10:
+ count += 1
+ try:
+ datagram = self._socket.recv(QUIC_MAX_DATAGRAM)
+ except BlockingIOError:
+ return
+ with self._lock:
+ self._connection.receive_datagram(datagram, self._peer, time.time())
+
+ def _drain_wakeup(self):
+ while True:
+ try:
+ self._receive_wakeup.recv(32)
+ except BlockingIOError:
+ return
+
+ def _worker(self):
+ try:
+ with selectors.DefaultSelector() as sel:
+ sel.register(self._socket, selectors.EVENT_READ, self._read)
+ sel.register(
+ self._receive_wakeup, selectors.EVENT_READ, self._drain_wakeup
+ )
+ while not self._done:
+ (expiration, interval) = self._get_timer_values(False)
+ items = sel.select(interval)
+ for key, _ in items:
+ key.data()
+ with self._lock:
+ self._handle_timer(expiration)
+ self._handle_events()
+ with self._lock:
+ datagrams = self._connection.datagrams_to_send(time.time())
+ for datagram, _ in datagrams:
+ try:
+ self._socket.send(datagram)
+ except BlockingIOError:
+ # we let QUIC handle any lossage
+ pass
+ except Exception:
+ # Eat all exceptions as we have no way to pass them back to the
+ # caller currently. It might be nice to fix this in the future.
+ pass
+ finally:
+ with self._lock:
+ self._done = True
+ self._socket.close()
+ # Ensure anyone waiting for this gets woken up.
+ self._handshake_complete.set()
+
+ def _handle_events(self):
+ while True:
+ with self._lock:
+ event = self._connection.next_event()
+ if event is None:
+ return
+ if isinstance(event, aioquic.quic.events.StreamDataReceived):
+ if self.is_h3():
+ assert self._h3_conn is not None
+ h3_events = self._h3_conn.handle_event(event)
+ for h3_event in h3_events:
+ if isinstance(h3_event, aioquic.h3.events.HeadersReceived):
+ with self._lock:
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ if stream._headers is None:
+ stream._headers = h3_event.headers
+ elif stream._trailers is None:
+ stream._trailers = h3_event.headers
+ if h3_event.stream_ended:
+ stream._add_input(b"", True)
+ elif isinstance(h3_event, aioquic.h3.events.DataReceived):
+ with self._lock:
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ stream._add_input(h3_event.data, h3_event.stream_ended)
+ else:
+ with self._lock:
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ stream._add_input(event.data, event.end_stream)
+ elif isinstance(event, aioquic.quic.events.HandshakeCompleted):
+ self._handshake_complete.set()
+ elif isinstance(event, aioquic.quic.events.ConnectionTerminated):
+ with self._lock:
+ self._done = True
+ elif isinstance(event, aioquic.quic.events.StreamReset):
+ with self._lock:
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ stream._add_input(b"", True)
+
+ def write(self, stream, data, is_end=False):
+ with self._lock:
+ self._connection.send_stream_data(stream, data, is_end)
+ self._send_wakeup.send(b"\x01")
+
+ def send_headers(self, stream_id, headers, is_end=False):
+ with self._lock:
+ super().send_headers(stream_id, headers, is_end)
+ if is_end:
+ self._send_wakeup.send(b"\x01")
+
+ def send_data(self, stream_id, data, is_end=False):
+ with self._lock:
+ super().send_data(stream_id, data, is_end)
+ if is_end:
+ self._send_wakeup.send(b"\x01")
+
+ def run(self):
+ if self._closed:
+ return
+ self._worker_thread = threading.Thread(target=self._worker)
+ self._worker_thread.start()
+
+ def make_stream(self, timeout=None):
+ if not self._handshake_complete.wait(timeout):
+ raise dns.exception.Timeout
+ with self._lock:
+ if self._done:
+ raise UnexpectedEOF
+ stream_id = self._connection.get_next_available_stream_id(False)
+ stream = SyncQuicStream(self, stream_id)
+ self._streams[stream_id] = stream
+ return stream
+
+ def close_stream(self, stream_id):
+ with self._lock:
+ super().close_stream(stream_id)
+
+ def close(self):
+ with self._lock:
+ if self._closed:
+ return
+ if self._manager is not None:
+ self._manager.closed(self._peer[0], self._peer[1])
+ self._closed = True
+ self._connection.close()
+ self._send_wakeup.send(b"\x01")
+ if self._worker_thread is not None:
+ self._worker_thread.join()
+
+
+class SyncQuicManager(BaseQuicManager):
+ def __init__(
+ self, conf=None, verify_mode=ssl.CERT_REQUIRED, server_name=None, h3=False
+ ):
+ super().__init__(conf, verify_mode, SyncQuicConnection, server_name, h3)
+ self._lock = threading.Lock()
+
+ def connect(
+ self,
+ address,
+ port=853,
+ source=None,
+ source_port=0,
+ want_session_ticket=True,
+ want_token=True,
+ ):
+ with self._lock:
+ (connection, start) = self._connect(
+ address, port, source, source_port, want_session_ticket, want_token
+ )
+ if start:
+ connection.run()
+ return connection
+
+ def closed(self, address, port):
+ with self._lock:
+ super().closed(address, port)
+
+ def save_session_ticket(self, address, port, ticket):
+ with self._lock:
+ super().save_session_ticket(address, port, ticket)
+
+ def save_token(self, address, port, token):
+ with self._lock:
+ super().save_token(address, port, token)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Copy the iterator into a list as exiting things will mutate the connections
+ # table.
+ connections = list(self._connections.values())
+ for connection in connections:
+ connection.close()
+ return False
diff --git a/tapdown/lib/python3.11/site-packages/dns/quic/_trio.py b/tapdown/lib/python3.11/site-packages/dns/quic/_trio.py
new file mode 100644
index 0000000..046e6aa
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/quic/_trio.py
@@ -0,0 +1,250 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import socket
+import ssl
+import struct
+import time
+
+import aioquic.h3.connection # type: ignore
+import aioquic.h3.events # type: ignore
+import aioquic.quic.configuration # type: ignore
+import aioquic.quic.connection # type: ignore
+import aioquic.quic.events # type: ignore
+import trio
+
+import dns.exception
+import dns.inet
+from dns._asyncbackend import NullContext
+from dns.quic._common import (
+ QUIC_MAX_DATAGRAM,
+ AsyncQuicConnection,
+ AsyncQuicManager,
+ BaseQuicStream,
+ UnexpectedEOF,
+)
+
+
+class TrioQuicStream(BaseQuicStream):
+ def __init__(self, connection, stream_id):
+ super().__init__(connection, stream_id)
+ self._wake_up = trio.Condition()
+
+ async def wait_for(self, amount):
+ while True:
+ if self._buffer.have(amount):
+ return
+ self._expecting = amount
+ async with self._wake_up:
+ await self._wake_up.wait()
+ self._expecting = 0
+
+ async def wait_for_end(self):
+ while True:
+ if self._buffer.seen_end():
+ return
+ async with self._wake_up:
+ await self._wake_up.wait()
+
+ async def receive(self, timeout=None):
+ if timeout is None:
+ context = NullContext(None)
+ else:
+ context = trio.move_on_after(timeout)
+ with context:
+ if self._connection.is_h3():
+ await self.wait_for_end()
+ return self._buffer.get_all()
+ else:
+ await self.wait_for(2)
+ (size,) = struct.unpack("!H", self._buffer.get(2))
+ await self.wait_for(size)
+ return self._buffer.get(size)
+ raise dns.exception.Timeout
+
+ async def send(self, datagram, is_end=False):
+ data = self._encapsulate(datagram)
+ await self._connection.write(self._stream_id, data, is_end)
+
+ async def _add_input(self, data, is_end):
+ if self._common_add_input(data, is_end):
+ async with self._wake_up:
+ self._wake_up.notify()
+
+ async def close(self):
+ self._close()
+
+ # Streams are async context managers
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ await self.close()
+ async with self._wake_up:
+ self._wake_up.notify()
+ return False
+
+
+class TrioQuicConnection(AsyncQuicConnection):
+ def __init__(self, connection, address, port, source, source_port, manager=None):
+ super().__init__(connection, address, port, source, source_port, manager)
+ self._socket = trio.socket.socket(self._af, socket.SOCK_DGRAM, 0)
+ self._handshake_complete = trio.Event()
+ self._run_done = trio.Event()
+ self._worker_scope = None
+ self._send_pending = False
+
+ async def _worker(self):
+ try:
+ if self._source:
+ await self._socket.bind(
+ dns.inet.low_level_address_tuple(self._source, self._af)
+ )
+ await self._socket.connect(self._peer)
+ while not self._done:
+ (expiration, interval) = self._get_timer_values(False)
+ if self._send_pending:
+ # Do not block forever if sends are pending. Even though we
+ # have a wake-up mechanism if we've already started the blocking
+ # read, the possibility of context switching in send means that
+ # more writes can happen while we have no wake up context, so
+ # we need self._send_pending to avoid (effectively) a "lost wakeup"
+ # race.
+ interval = 0.0
+ with trio.CancelScope(
+ deadline=trio.current_time() + interval # pyright: ignore
+ ) as self._worker_scope:
+ datagram = await self._socket.recv(QUIC_MAX_DATAGRAM)
+ self._connection.receive_datagram(datagram, self._peer, time.time())
+ self._worker_scope = None
+ self._handle_timer(expiration)
+ await self._handle_events()
+ # We clear this now, before sending anything, as sending can cause
+ # context switches that do more sends. We want to know if that
+ # happens so we don't block a long time on the recv() above.
+ self._send_pending = False
+ datagrams = self._connection.datagrams_to_send(time.time())
+ for datagram, _ in datagrams:
+ await self._socket.send(datagram)
+ finally:
+ self._done = True
+ self._socket.close()
+ self._handshake_complete.set()
+
+ async def _handle_events(self):
+ count = 0
+ while True:
+ event = self._connection.next_event()
+ if event is None:
+ return
+ if isinstance(event, aioquic.quic.events.StreamDataReceived):
+ if self.is_h3():
+ assert self._h3_conn is not None
+ h3_events = self._h3_conn.handle_event(event)
+ for h3_event in h3_events:
+ if isinstance(h3_event, aioquic.h3.events.HeadersReceived):
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ if stream._headers is None:
+ stream._headers = h3_event.headers
+ elif stream._trailers is None:
+ stream._trailers = h3_event.headers
+ if h3_event.stream_ended:
+ await stream._add_input(b"", True)
+ elif isinstance(h3_event, aioquic.h3.events.DataReceived):
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ await stream._add_input(
+ h3_event.data, h3_event.stream_ended
+ )
+ else:
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ await stream._add_input(event.data, event.end_stream)
+ elif isinstance(event, aioquic.quic.events.HandshakeCompleted):
+ self._handshake_complete.set()
+ elif isinstance(event, aioquic.quic.events.ConnectionTerminated):
+ self._done = True
+ self._socket.close()
+ elif isinstance(event, aioquic.quic.events.StreamReset):
+ stream = self._streams.get(event.stream_id)
+ if stream:
+ await stream._add_input(b"", True)
+ count += 1
+ if count > 10:
+ # yield
+ count = 0
+ await trio.sleep(0)
+
+ async def write(self, stream, data, is_end=False):
+ self._connection.send_stream_data(stream, data, is_end)
+ self._send_pending = True
+ if self._worker_scope is not None:
+ self._worker_scope.cancel()
+
+ async def run(self):
+ if self._closed:
+ return
+ async with trio.open_nursery() as nursery:
+ nursery.start_soon(self._worker)
+ self._run_done.set()
+
+ async def make_stream(self, timeout=None):
+ if timeout is None:
+ context = NullContext(None)
+ else:
+ context = trio.move_on_after(timeout)
+ with context:
+ await self._handshake_complete.wait()
+ if self._done:
+ raise UnexpectedEOF
+ stream_id = self._connection.get_next_available_stream_id(False)
+ stream = TrioQuicStream(self, stream_id)
+ self._streams[stream_id] = stream
+ return stream
+ raise dns.exception.Timeout
+
+ async def close(self):
+ if not self._closed:
+ if self._manager is not None:
+ self._manager.closed(self._peer[0], self._peer[1])
+ self._closed = True
+ self._connection.close()
+ self._send_pending = True
+ if self._worker_scope is not None:
+ self._worker_scope.cancel()
+ await self._run_done.wait()
+
+
+class TrioQuicManager(AsyncQuicManager):
+ def __init__(
+ self,
+ nursery,
+ conf=None,
+ verify_mode=ssl.CERT_REQUIRED,
+ server_name=None,
+ h3=False,
+ ):
+ super().__init__(conf, verify_mode, TrioQuicConnection, server_name, h3)
+ self._nursery = nursery
+
+ def connect(
+ self, address, port=853, source=None, source_port=0, want_session_ticket=True
+ ):
+ (connection, start) = self._connect(
+ address, port, source, source_port, want_session_ticket
+ )
+ if start:
+ self._nursery.start_soon(connection.run)
+ return connection
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
+ # Copy the iterator into a list as exiting things will mutate the connections
+ # table.
+ connections = list(self._connections.values())
+ for connection in connections:
+ await connection.close()
+ return False
diff --git a/tapdown/lib/python3.11/site-packages/dns/rcode.py b/tapdown/lib/python3.11/site-packages/dns/rcode.py
new file mode 100644
index 0000000..7bb8467
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rcode.py
@@ -0,0 +1,168 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Result Codes."""
+
+from typing import Tuple, Type
+
+import dns.enum
+import dns.exception
+
+
+class Rcode(dns.enum.IntEnum):
+ #: No error
+ NOERROR = 0
+ #: Format error
+ FORMERR = 1
+ #: Server failure
+ SERVFAIL = 2
+ #: Name does not exist ("Name Error" in RFC 1025 terminology).
+ NXDOMAIN = 3
+ #: Not implemented
+ NOTIMP = 4
+ #: Refused
+ REFUSED = 5
+ #: Name exists.
+ YXDOMAIN = 6
+ #: RRset exists.
+ YXRRSET = 7
+ #: RRset does not exist.
+ NXRRSET = 8
+ #: Not authoritative.
+ NOTAUTH = 9
+ #: Name not in zone.
+ NOTZONE = 10
+ #: DSO-TYPE Not Implemented
+ DSOTYPENI = 11
+ #: Bad EDNS version.
+ BADVERS = 16
+ #: TSIG Signature Failure
+ BADSIG = 16
+ #: Key not recognized.
+ BADKEY = 17
+ #: Signature out of time window.
+ BADTIME = 18
+ #: Bad TKEY Mode.
+ BADMODE = 19
+ #: Duplicate key name.
+ BADNAME = 20
+ #: Algorithm not supported.
+ BADALG = 21
+ #: Bad Truncation
+ BADTRUNC = 22
+ #: Bad/missing Server Cookie
+ BADCOOKIE = 23
+
+ @classmethod
+ def _maximum(cls):
+ return 4095
+
+ @classmethod
+ def _unknown_exception_class(cls) -> Type[Exception]:
+ return UnknownRcode
+
+
+class UnknownRcode(dns.exception.DNSException):
+ """A DNS rcode is unknown."""
+
+
+def from_text(text: str) -> Rcode:
+ """Convert text into an rcode.
+
+ *text*, a ``str``, the textual rcode or an integer in textual form.
+
+ Raises ``dns.rcode.UnknownRcode`` if the rcode mnemonic is unknown.
+
+ Returns a ``dns.rcode.Rcode``.
+ """
+
+ return Rcode.from_text(text)
+
+
+def from_flags(flags: int, ednsflags: int) -> Rcode:
+ """Return the rcode value encoded by flags and ednsflags.
+
+ *flags*, an ``int``, the DNS flags field.
+
+ *ednsflags*, an ``int``, the EDNS flags field.
+
+ Raises ``ValueError`` if rcode is < 0 or > 4095
+
+ Returns a ``dns.rcode.Rcode``.
+ """
+
+ value = (flags & 0x000F) | ((ednsflags >> 20) & 0xFF0)
+ return Rcode.make(value)
+
+
+def to_flags(value: Rcode) -> Tuple[int, int]:
+ """Return a (flags, ednsflags) tuple which encodes the rcode.
+
+ *value*, a ``dns.rcode.Rcode``, the rcode.
+
+ Raises ``ValueError`` if rcode is < 0 or > 4095.
+
+ Returns an ``(int, int)`` tuple.
+ """
+
+ if value < 0 or value > 4095:
+ raise ValueError("rcode must be >= 0 and <= 4095")
+ v = value & 0xF
+ ev = (value & 0xFF0) << 20
+ return (v, ev)
+
+
+def to_text(value: Rcode, tsig: bool = False) -> str:
+ """Convert rcode into text.
+
+ *value*, a ``dns.rcode.Rcode``, the rcode.
+
+ Raises ``ValueError`` if rcode is < 0 or > 4095.
+
+ Returns a ``str``.
+ """
+
+ if tsig and value == Rcode.BADVERS:
+ return "BADSIG"
+ return Rcode.to_text(value)
+
+
+### BEGIN generated Rcode constants
+
+NOERROR = Rcode.NOERROR
+FORMERR = Rcode.FORMERR
+SERVFAIL = Rcode.SERVFAIL
+NXDOMAIN = Rcode.NXDOMAIN
+NOTIMP = Rcode.NOTIMP
+REFUSED = Rcode.REFUSED
+YXDOMAIN = Rcode.YXDOMAIN
+YXRRSET = Rcode.YXRRSET
+NXRRSET = Rcode.NXRRSET
+NOTAUTH = Rcode.NOTAUTH
+NOTZONE = Rcode.NOTZONE
+DSOTYPENI = Rcode.DSOTYPENI
+BADVERS = Rcode.BADVERS
+BADSIG = Rcode.BADSIG
+BADKEY = Rcode.BADKEY
+BADTIME = Rcode.BADTIME
+BADMODE = Rcode.BADMODE
+BADNAME = Rcode.BADNAME
+BADALG = Rcode.BADALG
+BADTRUNC = Rcode.BADTRUNC
+BADCOOKIE = Rcode.BADCOOKIE
+
+### END generated Rcode constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdata.py b/tapdown/lib/python3.11/site-packages/dns/rdata.py
new file mode 100644
index 0000000..c4522e6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdata.py
@@ -0,0 +1,935 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata."""
+
+import base64
+import binascii
+import inspect
+import io
+import ipaddress
+import itertools
+import random
+from importlib import import_module
+from typing import Any, Dict, Tuple
+
+import dns.exception
+import dns.immutable
+import dns.ipv4
+import dns.ipv6
+import dns.name
+import dns.rdataclass
+import dns.rdatatype
+import dns.tokenizer
+import dns.ttl
+import dns.wire
+
+_chunksize = 32
+
+# We currently allow comparisons for rdata with relative names for backwards
+# compatibility, but in the future we will not, as these kinds of comparisons
+# can lead to subtle bugs if code is not carefully written.
+#
+# This switch allows the future behavior to be turned on so code can be
+# tested with it.
+_allow_relative_comparisons = True
+
+
+class NoRelativeRdataOrdering(dns.exception.DNSException):
+ """An attempt was made to do an ordered comparison of one or more
+ rdata with relative names. The only reliable way of sorting rdata
+ is to use non-relativized rdata.
+
+ """
+
+
+def _wordbreak(data, chunksize=_chunksize, separator=b" "):
+ """Break a binary string into chunks of chunksize characters separated by
+ a space.
+ """
+
+ if not chunksize:
+ return data.decode()
+ return separator.join(
+ [data[i : i + chunksize] for i in range(0, len(data), chunksize)]
+ ).decode()
+
+
+# pylint: disable=unused-argument
+
+
+def _hexify(data, chunksize=_chunksize, separator=b" ", **kw):
+ """Convert a binary string into its hex encoding, broken up into chunks
+ of chunksize characters separated by a separator.
+ """
+
+ return _wordbreak(binascii.hexlify(data), chunksize, separator)
+
+
+def _base64ify(data, chunksize=_chunksize, separator=b" ", **kw):
+ """Convert a binary string into its base64 encoding, broken up into chunks
+ of chunksize characters separated by a separator.
+ """
+
+ return _wordbreak(base64.b64encode(data), chunksize, separator)
+
+
+# pylint: enable=unused-argument
+
+__escaped = b'"\\'
+
+
+def _escapify(qstring):
+ """Escape the characters in a quoted string which need it."""
+
+ if isinstance(qstring, str):
+ qstring = qstring.encode()
+ if not isinstance(qstring, bytearray):
+ qstring = bytearray(qstring)
+
+ text = ""
+ for c in qstring:
+ if c in __escaped:
+ text += "\\" + chr(c)
+ elif c >= 0x20 and c < 0x7F:
+ text += chr(c)
+ else:
+ text += f"\\{c:03d}"
+ return text
+
+
+def _truncate_bitmap(what):
+ """Determine the index of greatest byte that isn't all zeros, and
+ return the bitmap that contains all the bytes less than that index.
+ """
+
+ for i in range(len(what) - 1, -1, -1):
+ if what[i] != 0:
+ return what[0 : i + 1]
+ return what[0:1]
+
+
+# So we don't have to edit all the rdata classes...
+_constify = dns.immutable.constify
+
+
+@dns.immutable.immutable
+class Rdata:
+ """Base class for all DNS rdata types."""
+
+ __slots__ = ["rdclass", "rdtype", "rdcomment"]
+
+ def __init__(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ ) -> None:
+ """Initialize an rdata.
+
+ *rdclass*, an ``int`` is the rdataclass of the Rdata.
+
+ *rdtype*, an ``int`` is the rdatatype of the Rdata.
+ """
+
+ self.rdclass = self._as_rdataclass(rdclass)
+ self.rdtype = self._as_rdatatype(rdtype)
+ self.rdcomment = None
+
+ def _get_all_slots(self):
+ return itertools.chain.from_iterable(
+ getattr(cls, "__slots__", []) for cls in self.__class__.__mro__
+ )
+
+ def __getstate__(self):
+ # We used to try to do a tuple of all slots here, but it
+ # doesn't work as self._all_slots isn't available at
+ # __setstate__() time. Before that we tried to store a tuple
+ # of __slots__, but that didn't work as it didn't store the
+ # slots defined by ancestors. This older way didn't fail
+ # outright, but ended up with partially broken objects, e.g.
+ # if you unpickled an A RR it wouldn't have rdclass and rdtype
+ # attributes, and would compare badly.
+ state = {}
+ for slot in self._get_all_slots():
+ state[slot] = getattr(self, slot)
+ return state
+
+ def __setstate__(self, state):
+ for slot, val in state.items():
+ object.__setattr__(self, slot, val)
+ if not hasattr(self, "rdcomment"):
+ # Pickled rdata from 2.0.x might not have a rdcomment, so add
+ # it if needed.
+ object.__setattr__(self, "rdcomment", None)
+
+ def covers(self) -> dns.rdatatype.RdataType:
+ """Return the type a Rdata covers.
+
+ DNS SIG/RRSIG rdatas apply to a specific type; this type is
+ returned by the covers() function. If the rdata type is not
+ SIG or RRSIG, dns.rdatatype.NONE is returned. This is useful when
+ creating rdatasets, allowing the rdataset to contain only RRSIGs
+ of a particular type, e.g. RRSIG(NS).
+
+ Returns a ``dns.rdatatype.RdataType``.
+ """
+
+ return dns.rdatatype.NONE
+
+ def extended_rdatatype(self) -> int:
+ """Return a 32-bit type value, the least significant 16 bits of
+ which are the ordinary DNS type, and the upper 16 bits of which are
+ the "covered" type, if any.
+
+ Returns an ``int``.
+ """
+
+ return self.covers() << 16 | self.rdtype
+
+ def to_text(
+ self,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ **kw: Dict[str, Any],
+ ) -> str:
+ """Convert an rdata to text format.
+
+ Returns a ``str``.
+ """
+
+ raise NotImplementedError # pragma: no cover
+
+ def _to_wire(
+ self,
+ file: Any,
+ compress: dns.name.CompressType | None = None,
+ origin: dns.name.Name | None = None,
+ canonicalize: bool = False,
+ ) -> None:
+ raise NotImplementedError # pragma: no cover
+
+ def to_wire(
+ self,
+ file: Any | None = None,
+ compress: dns.name.CompressType | None = None,
+ origin: dns.name.Name | None = None,
+ canonicalize: bool = False,
+ ) -> bytes | None:
+ """Convert an rdata to wire format.
+
+ Returns a ``bytes`` if no output file was specified, or ``None`` otherwise.
+ """
+
+ if file:
+ # We call _to_wire() and then return None explicitly instead of
+ # of just returning the None from _to_wire() as mypy's func-returns-value
+ # unhelpfully errors out with "error: "_to_wire" of "Rdata" does not return
+ # a value (it only ever returns None)"
+ self._to_wire(file, compress, origin, canonicalize)
+ return None
+ else:
+ f = io.BytesIO()
+ self._to_wire(f, compress, origin, canonicalize)
+ return f.getvalue()
+
+ def to_generic(self, origin: dns.name.Name | None = None) -> "GenericRdata":
+ """Creates a dns.rdata.GenericRdata equivalent of this rdata.
+
+ Returns a ``dns.rdata.GenericRdata``.
+ """
+ wire = self.to_wire(origin=origin)
+ assert wire is not None # for type checkers
+ return GenericRdata(self.rdclass, self.rdtype, wire)
+
+ def to_digestable(self, origin: dns.name.Name | None = None) -> bytes:
+ """Convert rdata to a format suitable for digesting in hashes. This
+ is also the DNSSEC canonical form.
+
+ Returns a ``bytes``.
+ """
+ wire = self.to_wire(origin=origin, canonicalize=True)
+ assert wire is not None # for mypy
+ return wire
+
+ def __repr__(self):
+ covers = self.covers()
+ if covers == dns.rdatatype.NONE:
+ ctext = ""
+ else:
+ ctext = "(" + dns.rdatatype.to_text(covers) + ")"
+ return (
+ ""
+ )
+
+ def __str__(self):
+ return self.to_text()
+
+ def _cmp(self, other):
+ """Compare an rdata with another rdata of the same rdtype and
+ rdclass.
+
+ For rdata with only absolute names:
+ Return < 0 if self < other in the DNSSEC ordering, 0 if self
+ == other, and > 0 if self > other.
+ For rdata with at least one relative names:
+ The rdata sorts before any rdata with only absolute names.
+ When compared with another relative rdata, all names are
+ made absolute as if they were relative to the root, as the
+ proper origin is not available. While this creates a stable
+ ordering, it is NOT guaranteed to be the DNSSEC ordering.
+ In the future, all ordering comparisons for rdata with
+ relative names will be disallowed.
+ """
+ # the next two lines are for type checkers, so they are bound
+ our = b""
+ their = b""
+ try:
+ our = self.to_digestable()
+ our_relative = False
+ except dns.name.NeedAbsoluteNameOrOrigin:
+ if _allow_relative_comparisons:
+ our = self.to_digestable(dns.name.root)
+ our_relative = True
+ try:
+ their = other.to_digestable()
+ their_relative = False
+ except dns.name.NeedAbsoluteNameOrOrigin:
+ if _allow_relative_comparisons:
+ their = other.to_digestable(dns.name.root)
+ their_relative = True
+ if _allow_relative_comparisons:
+ if our_relative != their_relative:
+ # For the purpose of comparison, all rdata with at least one
+ # relative name is less than an rdata with only absolute names.
+ if our_relative:
+ return -1
+ else:
+ return 1
+ elif our_relative or their_relative:
+ raise NoRelativeRdataOrdering
+ if our == their:
+ return 0
+ elif our > their:
+ return 1
+ else:
+ return -1
+
+ def __eq__(self, other):
+ if not isinstance(other, Rdata):
+ return False
+ if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return False
+ our_relative = False
+ their_relative = False
+ try:
+ our = self.to_digestable()
+ except dns.name.NeedAbsoluteNameOrOrigin:
+ our = self.to_digestable(dns.name.root)
+ our_relative = True
+ try:
+ their = other.to_digestable()
+ except dns.name.NeedAbsoluteNameOrOrigin:
+ their = other.to_digestable(dns.name.root)
+ their_relative = True
+ if our_relative != their_relative:
+ return False
+ return our == their
+
+ def __ne__(self, other):
+ if not isinstance(other, Rdata):
+ return True
+ if self.rdclass != other.rdclass or self.rdtype != other.rdtype:
+ return True
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ if (
+ not isinstance(other, Rdata)
+ or self.rdclass != other.rdclass
+ or self.rdtype != other.rdtype
+ ):
+ return NotImplemented
+ return self._cmp(other) < 0
+
+ def __le__(self, other):
+ if (
+ not isinstance(other, Rdata)
+ or self.rdclass != other.rdclass
+ or self.rdtype != other.rdtype
+ ):
+ return NotImplemented
+ return self._cmp(other) <= 0
+
+ def __ge__(self, other):
+ if (
+ not isinstance(other, Rdata)
+ or self.rdclass != other.rdclass
+ or self.rdtype != other.rdtype
+ ):
+ return NotImplemented
+ return self._cmp(other) >= 0
+
+ def __gt__(self, other):
+ if (
+ not isinstance(other, Rdata)
+ or self.rdclass != other.rdclass
+ or self.rdtype != other.rdtype
+ ):
+ return NotImplemented
+ return self._cmp(other) > 0
+
+ def __hash__(self):
+ return hash(self.to_digestable(dns.name.root))
+
+ @classmethod
+ def from_text(
+ cls,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ tok: dns.tokenizer.Tokenizer,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+ ) -> "Rdata":
+ raise NotImplementedError # pragma: no cover
+
+ @classmethod
+ def from_wire_parser(
+ cls,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ parser: dns.wire.Parser,
+ origin: dns.name.Name | None = None,
+ ) -> "Rdata":
+ raise NotImplementedError # pragma: no cover
+
+ def replace(self, **kwargs: Any) -> "Rdata":
+ """
+ Create a new Rdata instance based on the instance replace was
+ invoked on. It is possible to pass different parameters to
+ override the corresponding properties of the base Rdata.
+
+ Any field specific to the Rdata type can be replaced, but the
+ *rdtype* and *rdclass* fields cannot.
+
+ Returns an instance of the same Rdata subclass as *self*.
+ """
+
+ # Get the constructor parameters.
+ parameters = inspect.signature(self.__init__).parameters # type: ignore
+
+ # Ensure that all of the arguments correspond to valid fields.
+ # Don't allow rdclass or rdtype to be changed, though.
+ for key in kwargs:
+ if key == "rdcomment":
+ continue
+ if key not in parameters:
+ raise AttributeError(
+ f"'{self.__class__.__name__}' object has no attribute '{key}'"
+ )
+ if key in ("rdclass", "rdtype"):
+ raise AttributeError(
+ f"Cannot overwrite '{self.__class__.__name__}' attribute '{key}'"
+ )
+
+ # Construct the parameter list. For each field, use the value in
+ # kwargs if present, and the current value otherwise.
+ args = (kwargs.get(key, getattr(self, key)) for key in parameters)
+
+ # Create, validate, and return the new object.
+ rd = self.__class__(*args)
+ # The comment is not set in the constructor, so give it special
+ # handling.
+ rdcomment = kwargs.get("rdcomment", self.rdcomment)
+ if rdcomment is not None:
+ object.__setattr__(rd, "rdcomment", rdcomment)
+ return rd
+
+ # Type checking and conversion helpers. These are class methods as
+ # they don't touch object state and may be useful to others.
+
+ @classmethod
+ def _as_rdataclass(cls, value):
+ return dns.rdataclass.RdataClass.make(value)
+
+ @classmethod
+ def _as_rdatatype(cls, value):
+ return dns.rdatatype.RdataType.make(value)
+
+ @classmethod
+ def _as_bytes(
+ cls,
+ value: Any,
+ encode: bool = False,
+ max_length: int | None = None,
+ empty_ok: bool = True,
+ ) -> bytes:
+ if encode and isinstance(value, str):
+ bvalue = value.encode()
+ elif isinstance(value, bytearray):
+ bvalue = bytes(value)
+ elif isinstance(value, bytes):
+ bvalue = value
+ else:
+ raise ValueError("not bytes")
+ if max_length is not None and len(bvalue) > max_length:
+ raise ValueError("too long")
+ if not empty_ok and len(bvalue) == 0:
+ raise ValueError("empty bytes not allowed")
+ return bvalue
+
+ @classmethod
+ def _as_name(cls, value):
+ # Note that proper name conversion (e.g. with origin and IDNA
+ # awareness) is expected to be done via from_text. This is just
+ # a simple thing for people invoking the constructor directly.
+ if isinstance(value, str):
+ return dns.name.from_text(value)
+ elif not isinstance(value, dns.name.Name):
+ raise ValueError("not a name")
+ return value
+
+ @classmethod
+ def _as_uint8(cls, value):
+ if not isinstance(value, int):
+ raise ValueError("not an integer")
+ if value < 0 or value > 255:
+ raise ValueError("not a uint8")
+ return value
+
+ @classmethod
+ def _as_uint16(cls, value):
+ if not isinstance(value, int):
+ raise ValueError("not an integer")
+ if value < 0 or value > 65535:
+ raise ValueError("not a uint16")
+ return value
+
+ @classmethod
+ def _as_uint32(cls, value):
+ if not isinstance(value, int):
+ raise ValueError("not an integer")
+ if value < 0 or value > 4294967295:
+ raise ValueError("not a uint32")
+ return value
+
+ @classmethod
+ def _as_uint48(cls, value):
+ if not isinstance(value, int):
+ raise ValueError("not an integer")
+ if value < 0 or value > 281474976710655:
+ raise ValueError("not a uint48")
+ return value
+
+ @classmethod
+ def _as_int(cls, value, low=None, high=None):
+ if not isinstance(value, int):
+ raise ValueError("not an integer")
+ if low is not None and value < low:
+ raise ValueError("value too small")
+ if high is not None and value > high:
+ raise ValueError("value too large")
+ return value
+
+ @classmethod
+ def _as_ipv4_address(cls, value):
+ if isinstance(value, str):
+ return dns.ipv4.canonicalize(value)
+ elif isinstance(value, bytes):
+ return dns.ipv4.inet_ntoa(value)
+ elif isinstance(value, ipaddress.IPv4Address):
+ return dns.ipv4.inet_ntoa(value.packed)
+ else:
+ raise ValueError("not an IPv4 address")
+
+ @classmethod
+ def _as_ipv6_address(cls, value):
+ if isinstance(value, str):
+ return dns.ipv6.canonicalize(value)
+ elif isinstance(value, bytes):
+ return dns.ipv6.inet_ntoa(value)
+ elif isinstance(value, ipaddress.IPv6Address):
+ return dns.ipv6.inet_ntoa(value.packed)
+ else:
+ raise ValueError("not an IPv6 address")
+
+ @classmethod
+ def _as_bool(cls, value):
+ if isinstance(value, bool):
+ return value
+ else:
+ raise ValueError("not a boolean")
+
+ @classmethod
+ def _as_ttl(cls, value):
+ if isinstance(value, int):
+ return cls._as_int(value, 0, dns.ttl.MAX_TTL)
+ elif isinstance(value, str):
+ return dns.ttl.from_text(value)
+ else:
+ raise ValueError("not a TTL")
+
+ @classmethod
+ def _as_tuple(cls, value, as_value):
+ try:
+ # For user convenience, if value is a singleton of the list
+ # element type, wrap it in a tuple.
+ return (as_value(value),)
+ except Exception:
+ # Otherwise, check each element of the iterable *value*
+ # against *as_value*.
+ return tuple(as_value(v) for v in value)
+
+ # Processing order
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ items = list(iterable)
+ random.shuffle(items)
+ return items
+
+
+@dns.immutable.immutable
+class GenericRdata(Rdata):
+ """Generic Rdata Class
+
+ This class is used for rdata types for which we have no better
+ implementation. It implements the DNS "unknown RRs" scheme.
+ """
+
+ __slots__ = ["data"]
+
+ def __init__(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ data: bytes,
+ ) -> None:
+ super().__init__(rdclass, rdtype)
+ self.data = data
+
+ def to_text(
+ self,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ **kw: Dict[str, Any],
+ ) -> str:
+ return rf"\# {len(self.data)} " + _hexify(self.data, **kw) # pyright: ignore
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ token = tok.get()
+ if not token.is_identifier() or token.value != r"\#":
+ raise dns.exception.SyntaxError(r"generic rdata does not start with \#")
+ length = tok.get_int()
+ hex = tok.concatenate_remaining_identifiers(True).encode()
+ data = binascii.unhexlify(hex)
+ if len(data) != length:
+ raise dns.exception.SyntaxError("generic rdata hex data has wrong length")
+ return cls(rdclass, rdtype, data)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(self.data)
+
+ def to_generic(self, origin: dns.name.Name | None = None) -> "GenericRdata":
+ return self
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ return cls(rdclass, rdtype, parser.get_remaining())
+
+
+_rdata_classes: Dict[Tuple[dns.rdataclass.RdataClass, dns.rdatatype.RdataType], Any] = (
+ {}
+)
+_module_prefix = "dns.rdtypes"
+_dynamic_load_allowed = True
+
+
+def get_rdata_class(rdclass, rdtype, use_generic=True):
+ cls = _rdata_classes.get((rdclass, rdtype))
+ if not cls:
+ cls = _rdata_classes.get((dns.rdataclass.ANY, rdtype))
+ if not cls and _dynamic_load_allowed:
+ rdclass_text = dns.rdataclass.to_text(rdclass)
+ rdtype_text = dns.rdatatype.to_text(rdtype)
+ rdtype_text = rdtype_text.replace("-", "_")
+ try:
+ mod = import_module(
+ ".".join([_module_prefix, rdclass_text, rdtype_text])
+ )
+ cls = getattr(mod, rdtype_text)
+ _rdata_classes[(rdclass, rdtype)] = cls
+ except ImportError:
+ try:
+ mod = import_module(".".join([_module_prefix, "ANY", rdtype_text]))
+ cls = getattr(mod, rdtype_text)
+ _rdata_classes[(dns.rdataclass.ANY, rdtype)] = cls
+ _rdata_classes[(rdclass, rdtype)] = cls
+ except ImportError:
+ pass
+ if not cls and use_generic:
+ cls = GenericRdata
+ _rdata_classes[(rdclass, rdtype)] = cls
+ return cls
+
+
+def load_all_types(disable_dynamic_load=True):
+ """Load all rdata types for which dnspython has a non-generic implementation.
+
+ Normally dnspython loads DNS rdatatype implementations on demand, but in some
+ specialized cases loading all types at an application-controlled time is preferred.
+
+ If *disable_dynamic_load*, a ``bool``, is ``True`` then dnspython will not attempt
+ to use its dynamic loading mechanism if an unknown type is subsequently encountered,
+ and will simply use the ``GenericRdata`` class.
+ """
+ # Load class IN and ANY types.
+ for rdtype in dns.rdatatype.RdataType:
+ get_rdata_class(dns.rdataclass.IN, rdtype, False)
+ # Load the one non-ANY implementation we have in CH. Everything
+ # else in CH is an ANY type, and we'll discover those on demand but won't
+ # have to import anything.
+ get_rdata_class(dns.rdataclass.CH, dns.rdatatype.A, False)
+ if disable_dynamic_load:
+ # Now disable dynamic loading so any subsequent unknown type immediately becomes
+ # GenericRdata without a load attempt.
+ global _dynamic_load_allowed
+ _dynamic_load_allowed = False
+
+
+def from_text(
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ tok: dns.tokenizer.Tokenizer | str,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+ idna_codec: dns.name.IDNACodec | None = None,
+) -> Rdata:
+ """Build an rdata object from text format.
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_text() class method is called
+ with the parameters to this function.
+
+ If *tok* is a ``str``, then a tokenizer is created and the string
+ is used as its input.
+
+ *rdclass*, a ``dns.rdataclass.RdataClass`` or ``str``, the rdataclass.
+
+ *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdatatype.
+
+ *tok*, a ``dns.tokenizer.Tokenizer`` or a ``str``.
+
+ *origin*, a ``dns.name.Name`` (or ``None``), the
+ origin to use for relative names.
+
+ *relativize*, a ``bool``. If true, name will be relativized.
+
+ *relativize_to*, a ``dns.name.Name`` (or ``None``), the origin to use
+ when relativizing names. If not set, the *origin* value will be used.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder to use if a tokenizer needs to be created. If
+ ``None``, the default IDNA 2003 encoder/decoder is used. If a
+ tokenizer is not created, then the codec associated with the tokenizer
+ is the one that is used.
+
+ Returns an instance of the chosen Rdata subclass.
+
+ """
+ if isinstance(tok, str):
+ tok = dns.tokenizer.Tokenizer(tok, idna_codec=idna_codec)
+ if not isinstance(tok, dns.tokenizer.Tokenizer):
+ raise ValueError("tok must be a string or a Tokenizer")
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ cls = get_rdata_class(rdclass, rdtype)
+ assert cls is not None # for type checkers
+ with dns.exception.ExceptionWrapper(dns.exception.SyntaxError):
+ rdata = None
+ if cls != GenericRdata:
+ # peek at first token
+ token = tok.get()
+ tok.unget(token)
+ if token.is_identifier() and token.value == r"\#":
+ #
+ # Known type using the generic syntax. Extract the
+ # wire form from the generic syntax, and then run
+ # from_wire on it.
+ #
+ grdata = GenericRdata.from_text(
+ rdclass, rdtype, tok, origin, relativize, relativize_to
+ )
+ rdata = from_wire(
+ rdclass, rdtype, grdata.data, 0, len(grdata.data), origin
+ )
+ #
+ # If this comparison isn't equal, then there must have been
+ # compressed names in the wire format, which is an error,
+ # there being no reasonable context to decompress with.
+ #
+ rwire = rdata.to_wire()
+ if rwire != grdata.data:
+ raise dns.exception.SyntaxError(
+ "compressed data in "
+ "generic syntax form "
+ "of known rdatatype"
+ )
+ if rdata is None:
+ rdata = cls.from_text(
+ rdclass, rdtype, tok, origin, relativize, relativize_to
+ )
+ token = tok.get_eol_as_token()
+ if token.comment is not None:
+ object.__setattr__(rdata, "rdcomment", token.comment)
+ return rdata
+
+
+def from_wire_parser(
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ parser: dns.wire.Parser,
+ origin: dns.name.Name | None = None,
+) -> Rdata:
+ """Build an rdata object from wire format
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_wire() class method is called
+ with the parameters to this function.
+
+ *rdclass*, a ``dns.rdataclass.RdataClass`` or ``str``, the rdataclass.
+
+ *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdatatype.
+
+ *parser*, a ``dns.wire.Parser``, the parser, which should be
+ restricted to the rdata length.
+
+ *origin*, a ``dns.name.Name`` (or ``None``). If not ``None``,
+ then names will be relativized to this origin.
+
+ Returns an instance of the chosen Rdata subclass.
+ """
+
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ cls = get_rdata_class(rdclass, rdtype)
+ assert cls is not None # for type checkers
+ with dns.exception.ExceptionWrapper(dns.exception.FormError):
+ return cls.from_wire_parser(rdclass, rdtype, parser, origin)
+
+
+def from_wire(
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ wire: bytes,
+ current: int,
+ rdlen: int,
+ origin: dns.name.Name | None = None,
+) -> Rdata:
+ """Build an rdata object from wire format
+
+ This function attempts to dynamically load a class which
+ implements the specified rdata class and type. If there is no
+ class-and-type-specific implementation, the GenericRdata class
+ is used.
+
+ Once a class is chosen, its from_wire() class method is called
+ with the parameters to this function.
+
+ *rdclass*, an ``int``, the rdataclass.
+
+ *rdtype*, an ``int``, the rdatatype.
+
+ *wire*, a ``bytes``, the wire-format message.
+
+ *current*, an ``int``, the offset in wire of the beginning of
+ the rdata.
+
+ *rdlen*, an ``int``, the length of the wire-format rdata
+
+ *origin*, a ``dns.name.Name`` (or ``None``). If not ``None``,
+ then names will be relativized to this origin.
+
+ Returns an instance of the chosen Rdata subclass.
+ """
+ parser = dns.wire.Parser(wire, current)
+ with parser.restrict_to(rdlen):
+ return from_wire_parser(rdclass, rdtype, parser, origin)
+
+
+class RdatatypeExists(dns.exception.DNSException):
+ """DNS rdatatype already exists."""
+
+ supp_kwargs = {"rdclass", "rdtype"}
+ fmt = (
+ "The rdata type with class {rdclass:d} and rdtype {rdtype:d} "
+ + "already exists."
+ )
+
+
+def register_type(
+ implementation: Any,
+ rdtype: int,
+ rdtype_text: str,
+ is_singleton: bool = False,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+) -> None:
+ """Dynamically register a module to handle an rdatatype.
+
+ *implementation*, a subclass of ``dns.rdata.Rdata`` implementing the type,
+ or a module containing such a class named by its text form.
+
+ *rdtype*, an ``int``, the rdatatype to register.
+
+ *rdtype_text*, a ``str``, the textual form of the rdatatype.
+
+ *is_singleton*, a ``bool``, indicating if the type is a singleton (i.e.
+ RRsets of the type can have only one member.)
+
+ *rdclass*, the rdataclass of the type, or ``dns.rdataclass.ANY`` if
+ it applies to all classes.
+ """
+
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ existing_cls = get_rdata_class(rdclass, rdtype)
+ if existing_cls != GenericRdata or dns.rdatatype.is_metatype(rdtype):
+ raise RdatatypeExists(rdclass=rdclass, rdtype=rdtype)
+ if isinstance(implementation, type) and issubclass(implementation, Rdata):
+ impclass = implementation
+ else:
+ impclass = getattr(implementation, rdtype_text.replace("-", "_"))
+ _rdata_classes[(rdclass, rdtype)] = impclass
+ dns.rdatatype.register_type(rdtype, rdtype_text, is_singleton)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdataclass.py b/tapdown/lib/python3.11/site-packages/dns/rdataclass.py
new file mode 100644
index 0000000..89b85a7
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdataclass.py
@@ -0,0 +1,118 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Classes."""
+
+import dns.enum
+import dns.exception
+
+
+class RdataClass(dns.enum.IntEnum):
+ """DNS Rdata Class"""
+
+ RESERVED0 = 0
+ IN = 1
+ INTERNET = IN
+ CH = 3
+ CHAOS = CH
+ HS = 4
+ HESIOD = HS
+ NONE = 254
+ ANY = 255
+
+ @classmethod
+ def _maximum(cls):
+ return 65535
+
+ @classmethod
+ def _short_name(cls):
+ return "class"
+
+ @classmethod
+ def _prefix(cls):
+ return "CLASS"
+
+ @classmethod
+ def _unknown_exception_class(cls):
+ return UnknownRdataclass
+
+
+_metaclasses = {RdataClass.NONE, RdataClass.ANY}
+
+
+class UnknownRdataclass(dns.exception.DNSException):
+ """A DNS class is unknown."""
+
+
+def from_text(text: str) -> RdataClass:
+ """Convert text into a DNS rdata class value.
+
+ The input text can be a defined DNS RR class mnemonic or
+ instance of the DNS generic class syntax.
+
+ For example, "IN" and "CLASS1" will both result in a value of 1.
+
+ Raises ``dns.rdatatype.UnknownRdataclass`` if the class is unknown.
+
+ Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535.
+
+ Returns a ``dns.rdataclass.RdataClass``.
+ """
+
+ return RdataClass.from_text(text)
+
+
+def to_text(value: RdataClass) -> str:
+ """Convert a DNS rdata class value to text.
+
+ If the value has a known mnemonic, it will be used, otherwise the
+ DNS generic class syntax will be used.
+
+ Raises ``ValueError`` if the rdata class value is not >= 0 and <= 65535.
+
+ Returns a ``str``.
+ """
+
+ return RdataClass.to_text(value)
+
+
+def is_metaclass(rdclass: RdataClass) -> bool:
+ """True if the specified class is a metaclass.
+
+ The currently defined metaclasses are ANY and NONE.
+
+ *rdclass* is a ``dns.rdataclass.RdataClass``.
+ """
+
+ if rdclass in _metaclasses:
+ return True
+ return False
+
+
+### BEGIN generated RdataClass constants
+
+RESERVED0 = RdataClass.RESERVED0
+IN = RdataClass.IN
+INTERNET = RdataClass.INTERNET
+CH = RdataClass.CH
+CHAOS = RdataClass.CHAOS
+HS = RdataClass.HS
+HESIOD = RdataClass.HESIOD
+NONE = RdataClass.NONE
+ANY = RdataClass.ANY
+
+### END generated RdataClass constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdataset.py b/tapdown/lib/python3.11/site-packages/dns/rdataset.py
new file mode 100644
index 0000000..1edf67d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdataset.py
@@ -0,0 +1,508 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdatasets (an rdataset is a set of rdatas of a given type and class)"""
+
+import io
+import random
+import struct
+from typing import Any, Collection, Dict, List, cast
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.renderer
+import dns.set
+import dns.ttl
+
+# define SimpleSet here for backwards compatibility
+SimpleSet = dns.set.Set
+
+
+class DifferingCovers(dns.exception.DNSException):
+ """An attempt was made to add a DNS SIG/RRSIG whose covered type
+ is not the same as that of the other rdatas in the rdataset."""
+
+
+class IncompatibleTypes(dns.exception.DNSException):
+ """An attempt was made to add DNS RR data of an incompatible type."""
+
+
+class Rdataset(dns.set.Set):
+ """A DNS rdataset."""
+
+ __slots__ = ["rdclass", "rdtype", "covers", "ttl"]
+
+ def __init__(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ ttl: int = 0,
+ ):
+ """Create a new rdataset of the specified class and type.
+
+ *rdclass*, a ``dns.rdataclass.RdataClass``, the rdataclass.
+
+ *rdtype*, an ``dns.rdatatype.RdataType``, the rdatatype.
+
+ *covers*, an ``dns.rdatatype.RdataType``, the covered rdatatype.
+
+ *ttl*, an ``int``, the TTL.
+ """
+
+ super().__init__()
+ self.rdclass = rdclass
+ self.rdtype: dns.rdatatype.RdataType = rdtype
+ self.covers: dns.rdatatype.RdataType = covers
+ self.ttl = ttl
+
+ def _clone(self):
+ obj = cast(Rdataset, super()._clone())
+ obj.rdclass = self.rdclass
+ obj.rdtype = self.rdtype
+ obj.covers = self.covers
+ obj.ttl = self.ttl
+ return obj
+
+ def update_ttl(self, ttl: int) -> None:
+ """Perform TTL minimization.
+
+ Set the TTL of the rdataset to be the lesser of the set's current
+ TTL or the specified TTL. If the set contains no rdatas, set the TTL
+ to the specified TTL.
+
+ *ttl*, an ``int`` or ``str``.
+ """
+ ttl = dns.ttl.make(ttl)
+ if len(self) == 0:
+ self.ttl = ttl
+ elif ttl < self.ttl:
+ self.ttl = ttl
+
+ # pylint: disable=arguments-differ,arguments-renamed
+ def add( # pyright: ignore
+ self, rd: dns.rdata.Rdata, ttl: int | None = None
+ ) -> None:
+ """Add the specified rdata to the rdataset.
+
+ If the optional *ttl* parameter is supplied, then
+ ``self.update_ttl(ttl)`` will be called prior to adding the rdata.
+
+ *rd*, a ``dns.rdata.Rdata``, the rdata
+
+ *ttl*, an ``int``, the TTL.
+
+ Raises ``dns.rdataset.IncompatibleTypes`` if the type and class
+ do not match the type and class of the rdataset.
+
+ Raises ``dns.rdataset.DifferingCovers`` if the type is a signature
+ type and the covered type does not match that of the rdataset.
+ """
+
+ #
+ # If we're adding a signature, do some special handling to
+ # check that the signature covers the same type as the
+ # other rdatas in this rdataset. If this is the first rdata
+ # in the set, initialize the covers field.
+ #
+ if self.rdclass != rd.rdclass or self.rdtype != rd.rdtype:
+ raise IncompatibleTypes
+ if ttl is not None:
+ self.update_ttl(ttl)
+ if self.rdtype == dns.rdatatype.RRSIG or self.rdtype == dns.rdatatype.SIG:
+ covers = rd.covers()
+ if len(self) == 0 and self.covers == dns.rdatatype.NONE:
+ self.covers = covers
+ elif self.covers != covers:
+ raise DifferingCovers
+ if dns.rdatatype.is_singleton(rd.rdtype) and len(self) > 0:
+ self.clear()
+ super().add(rd)
+
+ def union_update(self, other):
+ self.update_ttl(other.ttl)
+ super().union_update(other)
+
+ def intersection_update(self, other):
+ self.update_ttl(other.ttl)
+ super().intersection_update(other)
+
+ def update(self, other):
+ """Add all rdatas in other to self.
+
+ *other*, a ``dns.rdataset.Rdataset``, the rdataset from which
+ to update.
+ """
+
+ self.update_ttl(other.ttl)
+ super().update(other)
+
+ def _rdata_repr(self):
+ def maybe_truncate(s):
+ if len(s) > 100:
+ return s[:100] + "..."
+ return s
+
+ return "[" + ", ".join(f"<{maybe_truncate(str(rr))}>" for rr in self) + "]"
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ""
+ else:
+ ctext = "(" + dns.rdatatype.to_text(self.covers) + ")"
+ return (
+ ""
+ )
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ if not isinstance(other, Rdataset):
+ return False
+ if (
+ self.rdclass != other.rdclass
+ or self.rdtype != other.rdtype
+ or self.covers != other.covers
+ ):
+ return False
+ return super().__eq__(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def to_text(
+ self,
+ name: dns.name.Name | None = None,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ override_rdclass: dns.rdataclass.RdataClass | None = None,
+ want_comments: bool = False,
+ **kw: Dict[str, Any],
+ ) -> str:
+ """Convert the rdataset into DNS zone file format.
+
+ See ``dns.name.Name.choose_relativity`` for more information
+ on how *origin* and *relativize* determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ ``to_text()`` method.
+
+ *name*, a ``dns.name.Name``. If name is not ``None``, emit RRs with
+ *name* as the owner name.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin for relative
+ names.
+
+ *relativize*, a ``bool``. If ``True``, names will be relativized
+ to *origin*.
+
+ *override_rdclass*, a ``dns.rdataclass.RdataClass`` or ``None``.
+ If not ``None``, use this class instead of the Rdataset's class.
+
+ *want_comments*, a ``bool``. If ``True``, emit comments for rdata
+ which have them. The default is ``False``.
+ """
+
+ if name is not None:
+ name = name.choose_relativity(origin, relativize)
+ ntext = str(name)
+ pad = " "
+ else:
+ ntext = ""
+ pad = ""
+ s = io.StringIO()
+ if override_rdclass is not None:
+ rdclass = override_rdclass
+ else:
+ rdclass = self.rdclass
+ if len(self) == 0:
+ #
+ # Empty rdatasets are used for the question section, and in
+ # some dynamic updates, so we don't need to print out the TTL
+ # (which is meaningless anyway).
+ #
+ s.write(
+ f"{ntext}{pad}{dns.rdataclass.to_text(rdclass)} "
+ f"{dns.rdatatype.to_text(self.rdtype)}\n"
+ )
+ else:
+ for rd in self:
+ extra = ""
+ if want_comments:
+ if rd.rdcomment:
+ extra = f" ;{rd.rdcomment}"
+ s.write(
+ f"{ntext}{pad}{self.ttl} "
+ f"{dns.rdataclass.to_text(rdclass)} "
+ f"{dns.rdatatype.to_text(self.rdtype)} "
+ f"{rd.to_text(origin=origin, relativize=relativize, **kw)}"
+ f"{extra}\n"
+ )
+ #
+ # We strip off the final \n for the caller's convenience in printing
+ #
+ return s.getvalue()[:-1]
+
+ def to_wire(
+ self,
+ name: dns.name.Name,
+ file: Any,
+ compress: dns.name.CompressType | None = None,
+ origin: dns.name.Name | None = None,
+ override_rdclass: dns.rdataclass.RdataClass | None = None,
+ want_shuffle: bool = True,
+ ) -> int:
+ """Convert the rdataset to wire format.
+
+ *name*, a ``dns.name.Name`` is the owner name to use.
+
+ *file* is the file where the name is emitted (typically a
+ BytesIO file).
+
+ *compress*, a ``dict``, is the compression table to use. If
+ ``None`` (the default), names will not be compressed.
+
+ *origin* is a ``dns.name.Name`` or ``None``. If the name is
+ relative and origin is not ``None``, then *origin* will be appended
+ to it.
+
+ *override_rdclass*, an ``int``, is used as the class instead of the
+ class of the rdataset. This is useful when rendering rdatasets
+ associated with dynamic updates.
+
+ *want_shuffle*, a ``bool``. If ``True``, then the order of the
+ Rdatas within the Rdataset will be shuffled before rendering.
+
+ Returns an ``int``, the number of records emitted.
+ """
+
+ if override_rdclass is not None:
+ rdclass = override_rdclass
+ want_shuffle = False
+ else:
+ rdclass = self.rdclass
+ if len(self) == 0:
+ name.to_wire(file, compress, origin)
+ file.write(struct.pack("!HHIH", self.rdtype, rdclass, 0, 0))
+ return 1
+ else:
+ l: Rdataset | List[dns.rdata.Rdata]
+ if want_shuffle:
+ l = list(self)
+ random.shuffle(l)
+ else:
+ l = self
+ for rd in l:
+ name.to_wire(file, compress, origin)
+ file.write(struct.pack("!HHI", self.rdtype, rdclass, self.ttl))
+ with dns.renderer.prefixed_length(file, 2):
+ rd.to_wire(file, compress, origin)
+ return len(self)
+
+ def match(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType,
+ ) -> bool:
+ """Returns ``True`` if this rdataset matches the specified class,
+ type, and covers.
+ """
+ if self.rdclass == rdclass and self.rdtype == rdtype and self.covers == covers:
+ return True
+ return False
+
+ def processing_order(self) -> List[dns.rdata.Rdata]:
+ """Return rdatas in a valid processing order according to the type's
+ specification. For example, MX records are in preference order from
+ lowest to highest preferences, with items of the same preference
+ shuffled.
+
+ For types that do not define a processing order, the rdatas are
+ simply shuffled.
+ """
+ if len(self) == 0:
+ return []
+ else:
+ return self[0]._processing_order(iter(self)) # pyright: ignore
+
+
+@dns.immutable.immutable
+class ImmutableRdataset(Rdataset): # lgtm[py/missing-equals]
+ """An immutable DNS rdataset."""
+
+ _clone_class = Rdataset
+
+ def __init__(self, rdataset: Rdataset):
+ """Create an immutable rdataset from the specified rdataset."""
+
+ super().__init__(
+ rdataset.rdclass, rdataset.rdtype, rdataset.covers, rdataset.ttl
+ )
+ self.items = dns.immutable.Dict(rdataset.items)
+
+ def update_ttl(self, ttl):
+ raise TypeError("immutable")
+
+ def add(self, rd, ttl=None):
+ raise TypeError("immutable")
+
+ def union_update(self, other):
+ raise TypeError("immutable")
+
+ def intersection_update(self, other):
+ raise TypeError("immutable")
+
+ def update(self, other):
+ raise TypeError("immutable")
+
+ def __delitem__(self, i):
+ raise TypeError("immutable")
+
+ # lgtm complains about these not raising ArithmeticError, but there is
+ # precedent for overrides of these methods in other classes to raise
+ # TypeError, and it seems like the better exception.
+
+ def __ior__(self, other): # lgtm[py/unexpected-raise-in-special-method]
+ raise TypeError("immutable")
+
+ def __iand__(self, other): # lgtm[py/unexpected-raise-in-special-method]
+ raise TypeError("immutable")
+
+ def __iadd__(self, other): # lgtm[py/unexpected-raise-in-special-method]
+ raise TypeError("immutable")
+
+ def __isub__(self, other): # lgtm[py/unexpected-raise-in-special-method]
+ raise TypeError("immutable")
+
+ def clear(self):
+ raise TypeError("immutable")
+
+ def __copy__(self):
+ return ImmutableRdataset(super().copy()) # pyright: ignore
+
+ def copy(self):
+ return ImmutableRdataset(super().copy()) # pyright: ignore
+
+ def union(self, other):
+ return ImmutableRdataset(super().union(other)) # pyright: ignore
+
+ def intersection(self, other):
+ return ImmutableRdataset(super().intersection(other)) # pyright: ignore
+
+ def difference(self, other):
+ return ImmutableRdataset(super().difference(other)) # pyright: ignore
+
+ def symmetric_difference(self, other):
+ return ImmutableRdataset(super().symmetric_difference(other)) # pyright: ignore
+
+
+def from_text_list(
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ ttl: int,
+ text_rdatas: Collection[str],
+ idna_codec: dns.name.IDNACodec | None = None,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+) -> Rdataset:
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified list of rdatas in text format.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder to use; if ``None``, the default IDNA 2003
+ encoder/decoder is used.
+
+ *origin*, a ``dns.name.Name`` (or ``None``), the
+ origin to use for relative names.
+
+ *relativize*, a ``bool``. If true, name will be relativized.
+
+ *relativize_to*, a ``dns.name.Name`` (or ``None``), the origin to use
+ when relativizing names. If not set, the *origin* value will be used.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ r = Rdataset(rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(
+ r.rdclass, r.rdtype, t, origin, relativize, relativize_to, idna_codec
+ )
+ r.add(rd)
+ return r
+
+
+def from_text(
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ ttl: int,
+ *text_rdatas: Any,
+) -> Rdataset:
+ """Create an rdataset with the specified class, type, and TTL, and with
+ the specified rdatas in text format.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ return from_text_list(rdclass, rdtype, ttl, cast(Collection[str], text_rdatas))
+
+
+def from_rdata_list(ttl: int, rdatas: Collection[dns.rdata.Rdata]) -> Rdataset:
+ """Create an rdataset with the specified TTL, and with
+ the specified list of rdata objects.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = Rdataset(rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ r.add(rd)
+ assert r is not None
+ return r
+
+
+def from_rdata(ttl: int, *rdatas: Any) -> Rdataset:
+ """Create an rdataset with the specified TTL, and with
+ the specified rdata objects.
+
+ Returns a ``dns.rdataset.Rdataset`` object.
+ """
+
+ return from_rdata_list(ttl, cast(Collection[dns.rdata.Rdata], rdatas))
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdatatype.py b/tapdown/lib/python3.11/site-packages/dns/rdatatype.py
new file mode 100644
index 0000000..211d810
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdatatype.py
@@ -0,0 +1,338 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Rdata Types."""
+
+from typing import Dict
+
+import dns.enum
+import dns.exception
+
+
+class RdataType(dns.enum.IntEnum):
+ """DNS Rdata Type"""
+
+ TYPE0 = 0
+ NONE = 0
+ A = 1
+ NS = 2
+ MD = 3
+ MF = 4
+ CNAME = 5
+ SOA = 6
+ MB = 7
+ MG = 8
+ MR = 9
+ NULL = 10
+ WKS = 11
+ PTR = 12
+ HINFO = 13
+ MINFO = 14
+ MX = 15
+ TXT = 16
+ RP = 17
+ AFSDB = 18
+ X25 = 19
+ ISDN = 20
+ RT = 21
+ NSAP = 22
+ NSAP_PTR = 23
+ SIG = 24
+ KEY = 25
+ PX = 26
+ GPOS = 27
+ AAAA = 28
+ LOC = 29
+ NXT = 30
+ SRV = 33
+ NAPTR = 35
+ KX = 36
+ CERT = 37
+ A6 = 38
+ DNAME = 39
+ OPT = 41
+ APL = 42
+ DS = 43
+ SSHFP = 44
+ IPSECKEY = 45
+ RRSIG = 46
+ NSEC = 47
+ DNSKEY = 48
+ DHCID = 49
+ NSEC3 = 50
+ NSEC3PARAM = 51
+ TLSA = 52
+ SMIMEA = 53
+ HIP = 55
+ NINFO = 56
+ CDS = 59
+ CDNSKEY = 60
+ OPENPGPKEY = 61
+ CSYNC = 62
+ ZONEMD = 63
+ SVCB = 64
+ HTTPS = 65
+ DSYNC = 66
+ SPF = 99
+ UNSPEC = 103
+ NID = 104
+ L32 = 105
+ L64 = 106
+ LP = 107
+ EUI48 = 108
+ EUI64 = 109
+ TKEY = 249
+ TSIG = 250
+ IXFR = 251
+ AXFR = 252
+ MAILB = 253
+ MAILA = 254
+ ANY = 255
+ URI = 256
+ CAA = 257
+ AVC = 258
+ AMTRELAY = 260
+ RESINFO = 261
+ WALLET = 262
+ TA = 32768
+ DLV = 32769
+
+ @classmethod
+ def _maximum(cls):
+ return 65535
+
+ @classmethod
+ def _short_name(cls):
+ return "type"
+
+ @classmethod
+ def _prefix(cls):
+ return "TYPE"
+
+ @classmethod
+ def _extra_from_text(cls, text):
+ if text.find("-") >= 0:
+ try:
+ return cls[text.replace("-", "_")]
+ except KeyError: # pragma: no cover
+ pass
+ return _registered_by_text.get(text)
+
+ @classmethod
+ def _extra_to_text(cls, value, current_text):
+ if current_text is None:
+ return _registered_by_value.get(value)
+ if current_text.find("_") >= 0:
+ return current_text.replace("_", "-")
+ return current_text
+
+ @classmethod
+ def _unknown_exception_class(cls):
+ return UnknownRdatatype
+
+
+_registered_by_text: Dict[str, RdataType] = {}
+_registered_by_value: Dict[RdataType, str] = {}
+
+_metatypes = {RdataType.OPT}
+
+_singletons = {
+ RdataType.SOA,
+ RdataType.NXT,
+ RdataType.DNAME,
+ RdataType.NSEC,
+ RdataType.CNAME,
+}
+
+
+class UnknownRdatatype(dns.exception.DNSException):
+ """DNS resource record type is unknown."""
+
+
+def from_text(text: str) -> RdataType:
+ """Convert text into a DNS rdata type value.
+
+ The input text can be a defined DNS RR type mnemonic or
+ instance of the DNS generic type syntax.
+
+ For example, "NS" and "TYPE2" will both result in a value of 2.
+
+ Raises ``dns.rdatatype.UnknownRdatatype`` if the type is unknown.
+
+ Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535.
+
+ Returns a ``dns.rdatatype.RdataType``.
+ """
+
+ return RdataType.from_text(text)
+
+
+def to_text(value: RdataType) -> str:
+ """Convert a DNS rdata type value to text.
+
+ If the value has a known mnemonic, it will be used, otherwise the
+ DNS generic type syntax will be used.
+
+ Raises ``ValueError`` if the rdata type value is not >= 0 and <= 65535.
+
+ Returns a ``str``.
+ """
+
+ return RdataType.to_text(value)
+
+
+def is_metatype(rdtype: RdataType) -> bool:
+ """True if the specified type is a metatype.
+
+ *rdtype* is a ``dns.rdatatype.RdataType``.
+
+ The currently defined metatypes are TKEY, TSIG, IXFR, AXFR, MAILA,
+ MAILB, ANY, and OPT.
+
+ Returns a ``bool``.
+ """
+
+ return (256 > rdtype >= 128) or rdtype in _metatypes
+
+
+def is_singleton(rdtype: RdataType) -> bool:
+ """Is the specified type a singleton type?
+
+ Singleton types can only have a single rdata in an rdataset, or a single
+ RR in an RRset.
+
+ The currently defined singleton types are CNAME, DNAME, NSEC, NXT, and
+ SOA.
+
+ *rdtype* is an ``int``.
+
+ Returns a ``bool``.
+ """
+
+ if rdtype in _singletons:
+ return True
+ return False
+
+
+# pylint: disable=redefined-outer-name
+def register_type(
+ rdtype: RdataType, rdtype_text: str, is_singleton: bool = False
+) -> None:
+ """Dynamically register an rdatatype.
+
+ *rdtype*, a ``dns.rdatatype.RdataType``, the rdatatype to register.
+
+ *rdtype_text*, a ``str``, the textual form of the rdatatype.
+
+ *is_singleton*, a ``bool``, indicating if the type is a singleton (i.e.
+ RRsets of the type can have only one member.)
+ """
+
+ _registered_by_text[rdtype_text] = rdtype
+ _registered_by_value[rdtype] = rdtype_text
+ if is_singleton:
+ _singletons.add(rdtype)
+
+
+### BEGIN generated RdataType constants
+
+TYPE0 = RdataType.TYPE0
+NONE = RdataType.NONE
+A = RdataType.A
+NS = RdataType.NS
+MD = RdataType.MD
+MF = RdataType.MF
+CNAME = RdataType.CNAME
+SOA = RdataType.SOA
+MB = RdataType.MB
+MG = RdataType.MG
+MR = RdataType.MR
+NULL = RdataType.NULL
+WKS = RdataType.WKS
+PTR = RdataType.PTR
+HINFO = RdataType.HINFO
+MINFO = RdataType.MINFO
+MX = RdataType.MX
+TXT = RdataType.TXT
+RP = RdataType.RP
+AFSDB = RdataType.AFSDB
+X25 = RdataType.X25
+ISDN = RdataType.ISDN
+RT = RdataType.RT
+NSAP = RdataType.NSAP
+NSAP_PTR = RdataType.NSAP_PTR
+SIG = RdataType.SIG
+KEY = RdataType.KEY
+PX = RdataType.PX
+GPOS = RdataType.GPOS
+AAAA = RdataType.AAAA
+LOC = RdataType.LOC
+NXT = RdataType.NXT
+SRV = RdataType.SRV
+NAPTR = RdataType.NAPTR
+KX = RdataType.KX
+CERT = RdataType.CERT
+A6 = RdataType.A6
+DNAME = RdataType.DNAME
+OPT = RdataType.OPT
+APL = RdataType.APL
+DS = RdataType.DS
+SSHFP = RdataType.SSHFP
+IPSECKEY = RdataType.IPSECKEY
+RRSIG = RdataType.RRSIG
+NSEC = RdataType.NSEC
+DNSKEY = RdataType.DNSKEY
+DHCID = RdataType.DHCID
+NSEC3 = RdataType.NSEC3
+NSEC3PARAM = RdataType.NSEC3PARAM
+TLSA = RdataType.TLSA
+SMIMEA = RdataType.SMIMEA
+HIP = RdataType.HIP
+NINFO = RdataType.NINFO
+CDS = RdataType.CDS
+CDNSKEY = RdataType.CDNSKEY
+OPENPGPKEY = RdataType.OPENPGPKEY
+CSYNC = RdataType.CSYNC
+ZONEMD = RdataType.ZONEMD
+SVCB = RdataType.SVCB
+HTTPS = RdataType.HTTPS
+DSYNC = RdataType.DSYNC
+SPF = RdataType.SPF
+UNSPEC = RdataType.UNSPEC
+NID = RdataType.NID
+L32 = RdataType.L32
+L64 = RdataType.L64
+LP = RdataType.LP
+EUI48 = RdataType.EUI48
+EUI64 = RdataType.EUI64
+TKEY = RdataType.TKEY
+TSIG = RdataType.TSIG
+IXFR = RdataType.IXFR
+AXFR = RdataType.AXFR
+MAILB = RdataType.MAILB
+MAILA = RdataType.MAILA
+ANY = RdataType.ANY
+URI = RdataType.URI
+CAA = RdataType.CAA
+AVC = RdataType.AVC
+AMTRELAY = RdataType.AMTRELAY
+RESINFO = RdataType.RESINFO
+WALLET = RdataType.WALLET
+TA = RdataType.TA
+DLV = RdataType.DLV
+
+### END generated RdataType constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AFSDB.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AFSDB.py
new file mode 100644
index 0000000..06a3b97
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AFSDB.py
@@ -0,0 +1,45 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.mxbase
+
+
+@dns.immutable.immutable
+class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+ """AFSDB record"""
+
+ # Use the property mechanism to make "subtype" an alias for the
+ # "preference" attribute, and "hostname" an alias for the "exchange"
+ # attribute.
+ #
+ # This lets us inherit the UncompressedMX implementation but lets
+ # the caller use appropriate attribute names for the rdata type.
+ #
+ # We probably lose some performance vs. a cut-and-paste
+ # implementation, but this way we don't copy code, and that's
+ # good.
+
+ @property
+ def subtype(self):
+ "the AFSDB subtype"
+ return self.preference
+
+ @property
+ def hostname(self):
+ "the AFSDB hostname"
+ return self.exchange
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AMTRELAY.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AMTRELAY.py
new file mode 100644
index 0000000..dc9fa87
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AMTRELAY.py
@@ -0,0 +1,89 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.rdtypes.util
+
+
+class Relay(dns.rdtypes.util.Gateway):
+ name = "AMTRELAY relay"
+
+ @property
+ def relay(self):
+ return self.gateway
+
+
+@dns.immutable.immutable
+class AMTRELAY(dns.rdata.Rdata):
+ """AMTRELAY record"""
+
+ # see: RFC 8777
+
+ __slots__ = ["precedence", "discovery_optional", "relay_type", "relay"]
+
+ def __init__(
+ self, rdclass, rdtype, precedence, discovery_optional, relay_type, relay
+ ):
+ super().__init__(rdclass, rdtype)
+ relay = Relay(relay_type, relay)
+ self.precedence = self._as_uint8(precedence)
+ self.discovery_optional = self._as_bool(discovery_optional)
+ self.relay_type = relay.type
+ self.relay = relay.relay
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ relay = Relay(self.relay_type, self.relay).to_text(origin, relativize)
+ return (
+ f"{self.precedence} {self.discovery_optional:d} {self.relay_type} {relay}"
+ )
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ precedence = tok.get_uint8()
+ discovery_optional = tok.get_uint8()
+ if discovery_optional > 1:
+ raise dns.exception.SyntaxError("expecting 0 or 1")
+ discovery_optional = bool(discovery_optional)
+ relay_type = tok.get_uint8()
+ if relay_type > 0x7F:
+ raise dns.exception.SyntaxError("expecting an integer <= 127")
+ relay = Relay.from_text(relay_type, tok, origin, relativize, relativize_to)
+ return cls(
+ rdclass, rdtype, precedence, discovery_optional, relay_type, relay.relay
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ relay_type = self.relay_type | (self.discovery_optional << 7)
+ header = struct.pack("!BB", self.precedence, relay_type)
+ file.write(header)
+ Relay(self.relay_type, self.relay).to_wire(file, compress, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (precedence, relay_type) = parser.get_struct("!BB")
+ discovery_optional = bool(relay_type >> 7)
+ relay_type &= 0x7F
+ relay = Relay.from_wire_parser(relay_type, parser, origin)
+ return cls(
+ rdclass, rdtype, precedence, discovery_optional, relay_type, relay.relay
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AVC.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AVC.py
new file mode 100644
index 0000000..a27ae2d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/AVC.py
@@ -0,0 +1,26 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.txtbase
+
+
+@dns.immutable.immutable
+class AVC(dns.rdtypes.txtbase.TXTBase):
+ """AVC record"""
+
+ # See: IANA dns parameters for AVC
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CAA.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CAA.py
new file mode 100644
index 0000000..8c62e62
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CAA.py
@@ -0,0 +1,67 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class CAA(dns.rdata.Rdata):
+ """CAA (Certification Authority Authorization) record"""
+
+ # see: RFC 6844
+
+ __slots__ = ["flags", "tag", "value"]
+
+ def __init__(self, rdclass, rdtype, flags, tag, value):
+ super().__init__(rdclass, rdtype)
+ self.flags = self._as_uint8(flags)
+ self.tag = self._as_bytes(tag, True, 255)
+ if not tag.isalnum():
+ raise ValueError("tag is not alphanumeric")
+ self.value = self._as_bytes(value)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f'{self.flags} {dns.rdata._escapify(self.tag)} "{dns.rdata._escapify(self.value)}"'
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ flags = tok.get_uint8()
+ tag = tok.get_string().encode()
+ value = tok.get_string().encode()
+ return cls(rdclass, rdtype, flags, tag, value)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!B", self.flags))
+ l = len(self.tag)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.tag)
+ file.write(self.value)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ flags = parser.get_uint8()
+ tag = parser.get_counted_bytes()
+ value = parser.get_remaining()
+ return cls(rdclass, rdtype, flags, tag, value)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CDNSKEY.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CDNSKEY.py
new file mode 100644
index 0000000..b613409
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CDNSKEY.py
@@ -0,0 +1,33 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.dnskeybase # lgtm[py/import-and-import-from]
+
+# pylint: disable=unused-import
+from dns.rdtypes.dnskeybase import ( # noqa: F401 lgtm[py/unused-import]
+ REVOKE,
+ SEP,
+ ZONE,
+)
+
+# pylint: enable=unused-import
+
+
+@dns.immutable.immutable
+class CDNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
+ """CDNSKEY record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CDS.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CDS.py
new file mode 100644
index 0000000..8312b97
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CDS.py
@@ -0,0 +1,29 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.dsbase
+
+
+@dns.immutable.immutable
+class CDS(dns.rdtypes.dsbase.DSBase):
+ """CDS record"""
+
+ _digest_length_by_type = {
+ **dns.rdtypes.dsbase.DSBase._digest_length_by_type,
+ 0: 1, # delete, RFC 8078 Sec. 4 (including Errata ID 5049)
+ }
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CERT.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CERT.py
new file mode 100644
index 0000000..4d5e5bd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CERT.py
@@ -0,0 +1,113 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import struct
+
+import dns.dnssectypes
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+_ctype_by_value = {
+ 1: "PKIX",
+ 2: "SPKI",
+ 3: "PGP",
+ 4: "IPKIX",
+ 5: "ISPKI",
+ 6: "IPGP",
+ 7: "ACPKIX",
+ 8: "IACPKIX",
+ 253: "URI",
+ 254: "OID",
+}
+
+_ctype_by_name = {
+ "PKIX": 1,
+ "SPKI": 2,
+ "PGP": 3,
+ "IPKIX": 4,
+ "ISPKI": 5,
+ "IPGP": 6,
+ "ACPKIX": 7,
+ "IACPKIX": 8,
+ "URI": 253,
+ "OID": 254,
+}
+
+
+def _ctype_from_text(what):
+ v = _ctype_by_name.get(what)
+ if v is not None:
+ return v
+ return int(what)
+
+
+def _ctype_to_text(what):
+ v = _ctype_by_value.get(what)
+ if v is not None:
+ return v
+ return str(what)
+
+
+@dns.immutable.immutable
+class CERT(dns.rdata.Rdata):
+ """CERT record"""
+
+ # see RFC 4398
+
+ __slots__ = ["certificate_type", "key_tag", "algorithm", "certificate"]
+
+ def __init__(
+ self, rdclass, rdtype, certificate_type, key_tag, algorithm, certificate
+ ):
+ super().__init__(rdclass, rdtype)
+ self.certificate_type = self._as_uint16(certificate_type)
+ self.key_tag = self._as_uint16(key_tag)
+ self.algorithm = self._as_uint8(algorithm)
+ self.certificate = self._as_bytes(certificate)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ certificate_type = _ctype_to_text(self.certificate_type)
+ algorithm = dns.dnssectypes.Algorithm.to_text(self.algorithm)
+ certificate = dns.rdata._base64ify(self.certificate, **kw) # pyright: ignore
+ return f"{certificate_type} {self.key_tag} {algorithm} {certificate}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ certificate_type = _ctype_from_text(tok.get_string())
+ key_tag = tok.get_uint16()
+ algorithm = dns.dnssectypes.Algorithm.from_text(tok.get_string())
+ b64 = tok.concatenate_remaining_identifiers().encode()
+ certificate = base64.b64decode(b64)
+ return cls(rdclass, rdtype, certificate_type, key_tag, algorithm, certificate)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ prefix = struct.pack(
+ "!HHB", self.certificate_type, self.key_tag, self.algorithm
+ )
+ file.write(prefix)
+ file.write(self.certificate)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (certificate_type, key_tag, algorithm) = parser.get_struct("!HHB")
+ certificate = parser.get_remaining()
+ return cls(rdclass, rdtype, certificate_type, key_tag, algorithm, certificate)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CNAME.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CNAME.py
new file mode 100644
index 0000000..665e407
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CNAME.py
@@ -0,0 +1,28 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.nsbase
+
+
+@dns.immutable.immutable
+class CNAME(dns.rdtypes.nsbase.NSBase):
+ """CNAME record
+
+ Note: although CNAME is officially a singleton type, dnspython allows
+ non-singleton CNAME rdatasets because such sets have been commonly
+ used by BIND and other nameservers for load balancing."""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CSYNC.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CSYNC.py
new file mode 100644
index 0000000..103486d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/CSYNC.py
@@ -0,0 +1,68 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011, 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdatatype
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class Bitmap(dns.rdtypes.util.Bitmap):
+ type_name = "CSYNC"
+
+
+@dns.immutable.immutable
+class CSYNC(dns.rdata.Rdata):
+ """CSYNC record"""
+
+ __slots__ = ["serial", "flags", "windows"]
+
+ def __init__(self, rdclass, rdtype, serial, flags, windows):
+ super().__init__(rdclass, rdtype)
+ self.serial = self._as_uint32(serial)
+ self.flags = self._as_uint16(flags)
+ if not isinstance(windows, Bitmap):
+ windows = Bitmap(windows)
+ self.windows = tuple(windows.windows)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ text = Bitmap(self.windows).to_text()
+ return f"{self.serial} {self.flags}{text}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ serial = tok.get_uint32()
+ flags = tok.get_uint16()
+ bitmap = Bitmap.from_text(tok)
+ return cls(rdclass, rdtype, serial, flags, bitmap)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!IH", self.serial, self.flags))
+ Bitmap(self.windows).to_wire(file)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (serial, flags) = parser.get_struct("!IH")
+ bitmap = Bitmap.from_wire_parser(parser)
+ return cls(rdclass, rdtype, serial, flags, bitmap)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DLV.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DLV.py
new file mode 100644
index 0000000..6c134f1
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DLV.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.dsbase
+
+
+@dns.immutable.immutable
+class DLV(dns.rdtypes.dsbase.DSBase):
+ """DLV record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DNAME.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DNAME.py
new file mode 100644
index 0000000..bbf9186
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DNAME.py
@@ -0,0 +1,27 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.nsbase
+
+
+@dns.immutable.immutable
+class DNAME(dns.rdtypes.nsbase.UncompressedNS):
+ """DNAME record"""
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.target.to_wire(file, None, origin, canonicalize)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DNSKEY.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DNSKEY.py
new file mode 100644
index 0000000..6d961a9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DNSKEY.py
@@ -0,0 +1,33 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.dnskeybase # lgtm[py/import-and-import-from]
+
+# pylint: disable=unused-import
+from dns.rdtypes.dnskeybase import ( # noqa: F401 lgtm[py/unused-import]
+ REVOKE,
+ SEP,
+ ZONE,
+)
+
+# pylint: enable=unused-import
+
+
+@dns.immutable.immutable
+class DNSKEY(dns.rdtypes.dnskeybase.DNSKEYBase):
+ """DNSKEY record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DS.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DS.py
new file mode 100644
index 0000000..58b3108
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DS.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.dsbase
+
+
+@dns.immutable.immutable
+class DS(dns.rdtypes.dsbase.DSBase):
+ """DS record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DSYNC.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DSYNC.py
new file mode 100644
index 0000000..e8d1394
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/DSYNC.py
@@ -0,0 +1,72 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import struct
+
+import dns.enum
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+import dns.rdtypes.util
+
+
+class UnknownScheme(dns.exception.DNSException):
+ """Unknown DSYNC scheme"""
+
+
+class Scheme(dns.enum.IntEnum):
+ """DSYNC SCHEME"""
+
+ NOTIFY = 1
+
+ @classmethod
+ def _maximum(cls):
+ return 255
+
+ @classmethod
+ def _unknown_exception_class(cls):
+ return UnknownScheme
+
+
+@dns.immutable.immutable
+class DSYNC(dns.rdata.Rdata):
+ """DSYNC record"""
+
+ # see: draft-ietf-dnsop-generalized-notify
+
+ __slots__ = ["rrtype", "scheme", "port", "target"]
+
+ def __init__(self, rdclass, rdtype, rrtype, scheme, port, target):
+ super().__init__(rdclass, rdtype)
+ self.rrtype = self._as_rdatatype(rrtype)
+ self.scheme = Scheme.make(scheme)
+ self.port = self._as_uint16(port)
+ self.target = self._as_name(target)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return (
+ f"{dns.rdatatype.to_text(self.rrtype)} {Scheme.to_text(self.scheme)} "
+ f"{self.port} {target}"
+ )
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ rrtype = dns.rdatatype.from_text(tok.get_string())
+ scheme = Scheme.make(tok.get_string())
+ port = tok.get_uint16()
+ target = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, rrtype, scheme, port, target)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ three_ints = struct.pack("!HBH", self.rrtype, self.scheme, self.port)
+ file.write(three_ints)
+ self.target.to_wire(file, None, origin, False)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (rrtype, scheme, port) = parser.get_struct("!HBH")
+ target = parser.get_name(origin)
+ return cls(rdclass, rdtype, rrtype, scheme, port, target)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/EUI48.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/EUI48.py
new file mode 100644
index 0000000..c843be5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/EUI48.py
@@ -0,0 +1,30 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.euibase
+
+
+@dns.immutable.immutable
+class EUI48(dns.rdtypes.euibase.EUIBase):
+ """EUI48 record"""
+
+ # see: rfc7043.txt
+
+ byte_len = 6 # 0123456789ab (in hex)
+ text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/EUI64.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/EUI64.py
new file mode 100644
index 0000000..f6d7e25
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/EUI64.py
@@ -0,0 +1,30 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.euibase
+
+
+@dns.immutable.immutable
+class EUI64(dns.rdtypes.euibase.EUIBase):
+ """EUI64 record"""
+
+ # see: rfc7043.txt
+
+ byte_len = 8 # 0123456789abcdef (in hex)
+ text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab-cd-ef
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/GPOS.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/GPOS.py
new file mode 100644
index 0000000..d79f4a0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/GPOS.py
@@ -0,0 +1,126 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+def _validate_float_string(what):
+ if len(what) == 0:
+ raise dns.exception.FormError
+ if what[0] == b"-"[0] or what[0] == b"+"[0]:
+ what = what[1:]
+ if what.isdigit():
+ return
+ try:
+ (left, right) = what.split(b".")
+ except ValueError:
+ raise dns.exception.FormError
+ if left == b"" and right == b"":
+ raise dns.exception.FormError
+ if not left == b"" and not left.decode().isdigit():
+ raise dns.exception.FormError
+ if not right == b"" and not right.decode().isdigit():
+ raise dns.exception.FormError
+
+
+@dns.immutable.immutable
+class GPOS(dns.rdata.Rdata):
+ """GPOS record"""
+
+ # see: RFC 1712
+
+ __slots__ = ["latitude", "longitude", "altitude"]
+
+ def __init__(self, rdclass, rdtype, latitude, longitude, altitude):
+ super().__init__(rdclass, rdtype)
+ if isinstance(latitude, float) or isinstance(latitude, int):
+ latitude = str(latitude)
+ if isinstance(longitude, float) or isinstance(longitude, int):
+ longitude = str(longitude)
+ if isinstance(altitude, float) or isinstance(altitude, int):
+ altitude = str(altitude)
+ latitude = self._as_bytes(latitude, True, 255)
+ longitude = self._as_bytes(longitude, True, 255)
+ altitude = self._as_bytes(altitude, True, 255)
+ _validate_float_string(latitude)
+ _validate_float_string(longitude)
+ _validate_float_string(altitude)
+ self.latitude = latitude
+ self.longitude = longitude
+ self.altitude = altitude
+ flat = self.float_latitude
+ if flat < -90.0 or flat > 90.0:
+ raise dns.exception.FormError("bad latitude")
+ flong = self.float_longitude
+ if flong < -180.0 or flong > 180.0:
+ raise dns.exception.FormError("bad longitude")
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return (
+ f"{self.latitude.decode()} {self.longitude.decode()} "
+ f"{self.altitude.decode()}"
+ )
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ latitude = tok.get_string()
+ longitude = tok.get_string()
+ altitude = tok.get_string()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ l = len(self.latitude)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.latitude)
+ l = len(self.longitude)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.longitude)
+ l = len(self.altitude)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.altitude)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ latitude = parser.get_counted_bytes()
+ longitude = parser.get_counted_bytes()
+ altitude = parser.get_counted_bytes()
+ return cls(rdclass, rdtype, latitude, longitude, altitude)
+
+ @property
+ def float_latitude(self):
+ "latitude as a floating point value"
+ return float(self.latitude)
+
+ @property
+ def float_longitude(self):
+ "longitude as a floating point value"
+ return float(self.longitude)
+
+ @property
+ def float_altitude(self):
+ "altitude as a floating point value"
+ return float(self.altitude)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/HINFO.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/HINFO.py
new file mode 100644
index 0000000..06ad348
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/HINFO.py
@@ -0,0 +1,64 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class HINFO(dns.rdata.Rdata):
+ """HINFO record"""
+
+ # see: RFC 1035
+
+ __slots__ = ["cpu", "os"]
+
+ def __init__(self, rdclass, rdtype, cpu, os):
+ super().__init__(rdclass, rdtype)
+ self.cpu = self._as_bytes(cpu, True, 255)
+ self.os = self._as_bytes(os, True, 255)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f'"{dns.rdata._escapify(self.cpu)}" "{dns.rdata._escapify(self.os)}"'
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ cpu = tok.get_string(max_length=255)
+ os = tok.get_string(max_length=255)
+ return cls(rdclass, rdtype, cpu, os)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ l = len(self.cpu)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.cpu)
+ l = len(self.os)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.os)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ cpu = parser.get_counted_bytes()
+ os = parser.get_counted_bytes()
+ return cls(rdclass, rdtype, cpu, os)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/HIP.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/HIP.py
new file mode 100644
index 0000000..dc7948a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/HIP.py
@@ -0,0 +1,85 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2010, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import binascii
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+
+
+@dns.immutable.immutable
+class HIP(dns.rdata.Rdata):
+ """HIP record"""
+
+ # see: RFC 5205
+
+ __slots__ = ["hit", "algorithm", "key", "servers"]
+
+ def __init__(self, rdclass, rdtype, hit, algorithm, key, servers):
+ super().__init__(rdclass, rdtype)
+ self.hit = self._as_bytes(hit, True, 255)
+ self.algorithm = self._as_uint8(algorithm)
+ self.key = self._as_bytes(key, True)
+ self.servers = self._as_tuple(servers, self._as_name)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ hit = binascii.hexlify(self.hit).decode()
+ key = base64.b64encode(self.key).replace(b"\n", b"").decode()
+ text = ""
+ servers = []
+ for server in self.servers:
+ servers.append(server.choose_relativity(origin, relativize))
+ if len(servers) > 0:
+ text += " " + " ".join(x.to_unicode() for x in servers)
+ return f"{self.algorithm} {hit} {key}{text}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ algorithm = tok.get_uint8()
+ hit = binascii.unhexlify(tok.get_string().encode())
+ key = base64.b64decode(tok.get_string().encode())
+ servers = []
+ for token in tok.get_remaining():
+ server = tok.as_name(token, origin, relativize, relativize_to)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ lh = len(self.hit)
+ lk = len(self.key)
+ file.write(struct.pack("!BBH", lh, self.algorithm, lk))
+ file.write(self.hit)
+ file.write(self.key)
+ for server in self.servers:
+ server.to_wire(file, None, origin, False)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (lh, algorithm, lk) = parser.get_struct("!BBH")
+ hit = parser.get_bytes(lh)
+ key = parser.get_bytes(lk)
+ servers = []
+ while parser.remaining() > 0:
+ server = parser.get_name(origin)
+ servers.append(server)
+ return cls(rdclass, rdtype, hit, algorithm, key, servers)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/ISDN.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/ISDN.py
new file mode 100644
index 0000000..6428a0a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/ISDN.py
@@ -0,0 +1,78 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class ISDN(dns.rdata.Rdata):
+ """ISDN record"""
+
+ # see: RFC 1183
+
+ __slots__ = ["address", "subaddress"]
+
+ def __init__(self, rdclass, rdtype, address, subaddress):
+ super().__init__(rdclass, rdtype)
+ self.address = self._as_bytes(address, True, 255)
+ self.subaddress = self._as_bytes(subaddress, True, 255)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.subaddress:
+ return (
+ f'"{dns.rdata._escapify(self.address)}" '
+ f'"{dns.rdata._escapify(self.subaddress)}"'
+ )
+ else:
+ return f'"{dns.rdata._escapify(self.address)}"'
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ address = tok.get_string()
+ tokens = tok.get_remaining(max_tokens=1)
+ if len(tokens) >= 1:
+ subaddress = tokens[0].unescape().value
+ else:
+ subaddress = ""
+ return cls(rdclass, rdtype, address, subaddress)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ l = len(self.address)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.address)
+ l = len(self.subaddress)
+ if l > 0:
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.subaddress)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ address = parser.get_counted_bytes()
+ if parser.remaining() > 0:
+ subaddress = parser.get_counted_bytes()
+ else:
+ subaddress = b""
+ return cls(rdclass, rdtype, address, subaddress)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/L32.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/L32.py
new file mode 100644
index 0000000..f51e5c7
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/L32.py
@@ -0,0 +1,42 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import struct
+
+import dns.immutable
+import dns.ipv4
+import dns.rdata
+
+
+@dns.immutable.immutable
+class L32(dns.rdata.Rdata):
+ """L32 record"""
+
+ # see: rfc6742.txt
+
+ __slots__ = ["preference", "locator32"]
+
+ def __init__(self, rdclass, rdtype, preference, locator32):
+ super().__init__(rdclass, rdtype)
+ self.preference = self._as_uint16(preference)
+ self.locator32 = self._as_ipv4_address(locator32)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f"{self.preference} {self.locator32}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ preference = tok.get_uint16()
+ nodeid = tok.get_identifier()
+ return cls(rdclass, rdtype, preference, nodeid)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!H", self.preference))
+ file.write(dns.ipv4.inet_aton(self.locator32))
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ preference = parser.get_uint16()
+ locator32 = parser.get_remaining()
+ return cls(rdclass, rdtype, preference, locator32)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/L64.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/L64.py
new file mode 100644
index 0000000..a47da19
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/L64.py
@@ -0,0 +1,48 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import struct
+
+import dns.immutable
+import dns.rdata
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class L64(dns.rdata.Rdata):
+ """L64 record"""
+
+ # see: rfc6742.txt
+
+ __slots__ = ["preference", "locator64"]
+
+ def __init__(self, rdclass, rdtype, preference, locator64):
+ super().__init__(rdclass, rdtype)
+ self.preference = self._as_uint16(preference)
+ if isinstance(locator64, bytes):
+ if len(locator64) != 8:
+ raise ValueError("invalid locator64")
+ self.locator64 = dns.rdata._hexify(locator64, 4, b":")
+ else:
+ dns.rdtypes.util.parse_formatted_hex(locator64, 4, 4, ":")
+ self.locator64 = locator64
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f"{self.preference} {self.locator64}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ preference = tok.get_uint16()
+ locator64 = tok.get_identifier()
+ return cls(rdclass, rdtype, preference, locator64)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!H", self.preference))
+ file.write(dns.rdtypes.util.parse_formatted_hex(self.locator64, 4, 4, ":"))
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ preference = parser.get_uint16()
+ locator64 = parser.get_remaining()
+ return cls(rdclass, rdtype, preference, locator64)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/LOC.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/LOC.py
new file mode 100644
index 0000000..6c7fe5e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/LOC.py
@@ -0,0 +1,347 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+_pows = tuple(10**i for i in range(0, 11))
+
+# default values are in centimeters
+_default_size = 100.0
+_default_hprec = 1000000.0
+_default_vprec = 1000.0
+
+# for use by from_wire()
+_MAX_LATITUDE = 0x80000000 + 90 * 3600000
+_MIN_LATITUDE = 0x80000000 - 90 * 3600000
+_MAX_LONGITUDE = 0x80000000 + 180 * 3600000
+_MIN_LONGITUDE = 0x80000000 - 180 * 3600000
+
+
+def _exponent_of(what, desc):
+ if what == 0:
+ return 0
+ exp = None
+ for i, pow in enumerate(_pows):
+ if what < pow:
+ exp = i - 1
+ break
+ if exp is None or exp < 0:
+ raise dns.exception.SyntaxError(f"{desc} value out of bounds")
+ return exp
+
+
+def _float_to_tuple(what):
+ if what < 0:
+ sign = -1
+ what *= -1
+ else:
+ sign = 1
+ what = round(what * 3600000)
+ degrees = int(what // 3600000)
+ what -= degrees * 3600000
+ minutes = int(what // 60000)
+ what -= minutes * 60000
+ seconds = int(what // 1000)
+ what -= int(seconds * 1000)
+ what = int(what)
+ return (degrees, minutes, seconds, what, sign)
+
+
+def _tuple_to_float(what):
+ value = float(what[0])
+ value += float(what[1]) / 60.0
+ value += float(what[2]) / 3600.0
+ value += float(what[3]) / 3600000.0
+ return float(what[4]) * value
+
+
+def _encode_size(what, desc):
+ what = int(what)
+ exponent = _exponent_of(what, desc) & 0xF
+ base = what // pow(10, exponent) & 0xF
+ return base * 16 + exponent
+
+
+def _decode_size(what, desc):
+ exponent = what & 0x0F
+ if exponent > 9:
+ raise dns.exception.FormError(f"bad {desc} exponent")
+ base = (what & 0xF0) >> 4
+ if base > 9:
+ raise dns.exception.FormError(f"bad {desc} base")
+ return base * pow(10, exponent)
+
+
+def _check_coordinate_list(value, low, high):
+ if value[0] < low or value[0] > high:
+ raise ValueError(f"not in range [{low}, {high}]")
+ if value[1] < 0 or value[1] > 59:
+ raise ValueError("bad minutes value")
+ if value[2] < 0 or value[2] > 59:
+ raise ValueError("bad seconds value")
+ if value[3] < 0 or value[3] > 999:
+ raise ValueError("bad milliseconds value")
+ if value[4] != 1 and value[4] != -1:
+ raise ValueError("bad hemisphere value")
+
+
+@dns.immutable.immutable
+class LOC(dns.rdata.Rdata):
+ """LOC record"""
+
+ # see: RFC 1876
+
+ __slots__ = [
+ "latitude",
+ "longitude",
+ "altitude",
+ "size",
+ "horizontal_precision",
+ "vertical_precision",
+ ]
+
+ def __init__(
+ self,
+ rdclass,
+ rdtype,
+ latitude,
+ longitude,
+ altitude,
+ size=_default_size,
+ hprec=_default_hprec,
+ vprec=_default_vprec,
+ ):
+ """Initialize a LOC record instance.
+
+ The parameters I{latitude} and I{longitude} may be either a 4-tuple
+ of integers specifying (degrees, minutes, seconds, milliseconds),
+ or they may be floating point values specifying the number of
+ degrees. The other parameters are floats. Size, horizontal precision,
+ and vertical precision are specified in centimeters."""
+
+ super().__init__(rdclass, rdtype)
+ if isinstance(latitude, int):
+ latitude = float(latitude)
+ if isinstance(latitude, float):
+ latitude = _float_to_tuple(latitude)
+ _check_coordinate_list(latitude, -90, 90)
+ self.latitude = tuple(latitude) # pyright: ignore
+ if isinstance(longitude, int):
+ longitude = float(longitude)
+ if isinstance(longitude, float):
+ longitude = _float_to_tuple(longitude)
+ _check_coordinate_list(longitude, -180, 180)
+ self.longitude = tuple(longitude) # pyright: ignore
+ self.altitude = float(altitude)
+ self.size = float(size)
+ self.horizontal_precision = float(hprec)
+ self.vertical_precision = float(vprec)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.latitude[4] > 0:
+ lat_hemisphere = "N"
+ else:
+ lat_hemisphere = "S"
+ if self.longitude[4] > 0:
+ long_hemisphere = "E"
+ else:
+ long_hemisphere = "W"
+ text = (
+ f"{self.latitude[0]} {self.latitude[1]} "
+ f"{self.latitude[2]}.{self.latitude[3]:03d} {lat_hemisphere} "
+ f"{self.longitude[0]} {self.longitude[1]} "
+ f"{self.longitude[2]}.{self.longitude[3]:03d} {long_hemisphere} "
+ f"{(self.altitude / 100.0):0.2f}m"
+ )
+
+ # do not print default values
+ if (
+ self.size != _default_size
+ or self.horizontal_precision != _default_hprec
+ or self.vertical_precision != _default_vprec
+ ):
+ text += (
+ f" {self.size / 100.0:0.2f}m {self.horizontal_precision / 100.0:0.2f}m"
+ f" {self.vertical_precision / 100.0:0.2f}m"
+ )
+ return text
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ latitude = [0, 0, 0, 0, 1]
+ longitude = [0, 0, 0, 0, 1]
+ size = _default_size
+ hprec = _default_hprec
+ vprec = _default_vprec
+
+ latitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ latitude[1] = int(t)
+ t = tok.get_string()
+ if "." in t:
+ (seconds, milliseconds) = t.split(".")
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError("bad latitude seconds value")
+ latitude[2] = int(seconds)
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError("bad latitude milliseconds value")
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ latitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ latitude[2] = int(t)
+ t = tok.get_string()
+ if t == "S":
+ latitude[4] = -1
+ elif t != "N":
+ raise dns.exception.SyntaxError("bad latitude hemisphere value")
+
+ longitude[0] = tok.get_int()
+ t = tok.get_string()
+ if t.isdigit():
+ longitude[1] = int(t)
+ t = tok.get_string()
+ if "." in t:
+ (seconds, milliseconds) = t.split(".")
+ if not seconds.isdigit():
+ raise dns.exception.SyntaxError("bad longitude seconds value")
+ longitude[2] = int(seconds)
+ l = len(milliseconds)
+ if l == 0 or l > 3 or not milliseconds.isdigit():
+ raise dns.exception.SyntaxError("bad longitude milliseconds value")
+ if l == 1:
+ m = 100
+ elif l == 2:
+ m = 10
+ else:
+ m = 1
+ longitude[3] = m * int(milliseconds)
+ t = tok.get_string()
+ elif t.isdigit():
+ longitude[2] = int(t)
+ t = tok.get_string()
+ if t == "W":
+ longitude[4] = -1
+ elif t != "E":
+ raise dns.exception.SyntaxError("bad longitude hemisphere value")
+
+ t = tok.get_string()
+ if t[-1] == "m":
+ t = t[0:-1]
+ altitude = float(t) * 100.0 # m -> cm
+
+ tokens = tok.get_remaining(max_tokens=3)
+ if len(tokens) >= 1:
+ value = tokens[0].unescape().value
+ if value[-1] == "m":
+ value = value[0:-1]
+ size = float(value) * 100.0 # m -> cm
+ if len(tokens) >= 2:
+ value = tokens[1].unescape().value
+ if value[-1] == "m":
+ value = value[0:-1]
+ hprec = float(value) * 100.0 # m -> cm
+ if len(tokens) >= 3:
+ value = tokens[2].unescape().value
+ if value[-1] == "m":
+ value = value[0:-1]
+ vprec = float(value) * 100.0 # m -> cm
+
+ # Try encoding these now so we raise if they are bad
+ _encode_size(size, "size")
+ _encode_size(hprec, "horizontal precision")
+ _encode_size(vprec, "vertical precision")
+
+ return cls(rdclass, rdtype, latitude, longitude, altitude, size, hprec, vprec)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ milliseconds = (
+ self.latitude[0] * 3600000
+ + self.latitude[1] * 60000
+ + self.latitude[2] * 1000
+ + self.latitude[3]
+ ) * self.latitude[4]
+ latitude = 0x80000000 + milliseconds
+ milliseconds = (
+ self.longitude[0] * 3600000
+ + self.longitude[1] * 60000
+ + self.longitude[2] * 1000
+ + self.longitude[3]
+ ) * self.longitude[4]
+ longitude = 0x80000000 + milliseconds
+ altitude = int(self.altitude) + 10000000
+ size = _encode_size(self.size, "size")
+ hprec = _encode_size(self.horizontal_precision, "horizontal precision")
+ vprec = _encode_size(self.vertical_precision, "vertical precision")
+ wire = struct.pack(
+ "!BBBBIII", 0, size, hprec, vprec, latitude, longitude, altitude
+ )
+ file.write(wire)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (
+ version,
+ size,
+ hprec,
+ vprec,
+ latitude,
+ longitude,
+ altitude,
+ ) = parser.get_struct("!BBBBIII")
+ if version != 0:
+ raise dns.exception.FormError("LOC version not zero")
+ if latitude < _MIN_LATITUDE or latitude > _MAX_LATITUDE:
+ raise dns.exception.FormError("bad latitude")
+ if latitude > 0x80000000:
+ latitude = (latitude - 0x80000000) / 3600000
+ else:
+ latitude = -1 * (0x80000000 - latitude) / 3600000
+ if longitude < _MIN_LONGITUDE or longitude > _MAX_LONGITUDE:
+ raise dns.exception.FormError("bad longitude")
+ if longitude > 0x80000000:
+ longitude = (longitude - 0x80000000) / 3600000
+ else:
+ longitude = -1 * (0x80000000 - longitude) / 3600000
+ altitude = float(altitude) - 10000000.0
+ size = _decode_size(size, "size")
+ hprec = _decode_size(hprec, "horizontal precision")
+ vprec = _decode_size(vprec, "vertical precision")
+ return cls(rdclass, rdtype, latitude, longitude, altitude, size, hprec, vprec)
+
+ @property
+ def float_latitude(self):
+ "latitude as a floating point value"
+ return _tuple_to_float(self.latitude)
+
+ @property
+ def float_longitude(self):
+ "longitude as a floating point value"
+ return _tuple_to_float(self.longitude)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/LP.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/LP.py
new file mode 100644
index 0000000..379c862
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/LP.py
@@ -0,0 +1,42 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import struct
+
+import dns.immutable
+import dns.rdata
+
+
+@dns.immutable.immutable
+class LP(dns.rdata.Rdata):
+ """LP record"""
+
+ # see: rfc6742.txt
+
+ __slots__ = ["preference", "fqdn"]
+
+ def __init__(self, rdclass, rdtype, preference, fqdn):
+ super().__init__(rdclass, rdtype)
+ self.preference = self._as_uint16(preference)
+ self.fqdn = self._as_name(fqdn)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ fqdn = self.fqdn.choose_relativity(origin, relativize)
+ return f"{self.preference} {fqdn}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ preference = tok.get_uint16()
+ fqdn = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, preference, fqdn)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!H", self.preference))
+ self.fqdn.to_wire(file, compress, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ preference = parser.get_uint16()
+ fqdn = parser.get_name(origin)
+ return cls(rdclass, rdtype, preference, fqdn)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/MX.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/MX.py
new file mode 100644
index 0000000..0c300c5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/MX.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.mxbase
+
+
+@dns.immutable.immutable
+class MX(dns.rdtypes.mxbase.MXBase):
+ """MX record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NID.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NID.py
new file mode 100644
index 0000000..fa0dad5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NID.py
@@ -0,0 +1,48 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import struct
+
+import dns.immutable
+import dns.rdata
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class NID(dns.rdata.Rdata):
+ """NID record"""
+
+ # see: rfc6742.txt
+
+ __slots__ = ["preference", "nodeid"]
+
+ def __init__(self, rdclass, rdtype, preference, nodeid):
+ super().__init__(rdclass, rdtype)
+ self.preference = self._as_uint16(preference)
+ if isinstance(nodeid, bytes):
+ if len(nodeid) != 8:
+ raise ValueError("invalid nodeid")
+ self.nodeid = dns.rdata._hexify(nodeid, 4, b":")
+ else:
+ dns.rdtypes.util.parse_formatted_hex(nodeid, 4, 4, ":")
+ self.nodeid = nodeid
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f"{self.preference} {self.nodeid}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ preference = tok.get_uint16()
+ nodeid = tok.get_identifier()
+ return cls(rdclass, rdtype, preference, nodeid)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!H", self.preference))
+ file.write(dns.rdtypes.util.parse_formatted_hex(self.nodeid, 4, 4, ":"))
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ preference = parser.get_uint16()
+ nodeid = parser.get_remaining()
+ return cls(rdclass, rdtype, preference, nodeid)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NINFO.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NINFO.py
new file mode 100644
index 0000000..b177bdd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NINFO.py
@@ -0,0 +1,26 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.txtbase
+
+
+@dns.immutable.immutable
+class NINFO(dns.rdtypes.txtbase.TXTBase):
+ """NINFO record"""
+
+ # see: draft-reid-dnsext-zs-01
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NS.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NS.py
new file mode 100644
index 0000000..c3f34ce
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NS.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.nsbase
+
+
+@dns.immutable.immutable
+class NS(dns.rdtypes.nsbase.NSBase):
+ """NS record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC.py
new file mode 100644
index 0000000..3c78b72
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC.py
@@ -0,0 +1,67 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdatatype
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class Bitmap(dns.rdtypes.util.Bitmap):
+ type_name = "NSEC"
+
+
+@dns.immutable.immutable
+class NSEC(dns.rdata.Rdata):
+ """NSEC record"""
+
+ __slots__ = ["next", "windows"]
+
+ def __init__(self, rdclass, rdtype, next, windows):
+ super().__init__(rdclass, rdtype)
+ self.next = self._as_name(next)
+ if not isinstance(windows, Bitmap):
+ windows = Bitmap(windows)
+ self.windows = tuple(windows.windows)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = self.next.choose_relativity(origin, relativize)
+ text = Bitmap(self.windows).to_text()
+ return f"{next}{text}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ next = tok.get_name(origin, relativize, relativize_to)
+ windows = Bitmap.from_text(tok)
+ return cls(rdclass, rdtype, next, windows)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ # Note that NSEC downcasing, originally mandated by RFC 4034
+ # section 6.2 was removed by RFC 6840 section 5.1.
+ self.next.to_wire(file, None, origin, False)
+ Bitmap(self.windows).to_wire(file)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ next = parser.get_name(origin)
+ bitmap = Bitmap.from_wire_parser(parser)
+ return cls(rdclass, rdtype, next, bitmap)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC3.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC3.py
new file mode 100644
index 0000000..6899418
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC3.py
@@ -0,0 +1,120 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import binascii
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdatatype
+import dns.rdtypes.util
+
+b32_hex_to_normal = bytes.maketrans(
+ b"0123456789ABCDEFGHIJKLMNOPQRSTUV", b"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567"
+)
+b32_normal_to_hex = bytes.maketrans(
+ b"ABCDEFGHIJKLMNOPQRSTUVWXYZ234567", b"0123456789ABCDEFGHIJKLMNOPQRSTUV"
+)
+
+# hash algorithm constants
+SHA1 = 1
+
+# flag constants
+OPTOUT = 1
+
+
+@dns.immutable.immutable
+class Bitmap(dns.rdtypes.util.Bitmap):
+ type_name = "NSEC3"
+
+
+@dns.immutable.immutable
+class NSEC3(dns.rdata.Rdata):
+ """NSEC3 record"""
+
+ __slots__ = ["algorithm", "flags", "iterations", "salt", "next", "windows"]
+
+ def __init__(
+ self, rdclass, rdtype, algorithm, flags, iterations, salt, next, windows
+ ):
+ super().__init__(rdclass, rdtype)
+ self.algorithm = self._as_uint8(algorithm)
+ self.flags = self._as_uint8(flags)
+ self.iterations = self._as_uint16(iterations)
+ self.salt = self._as_bytes(salt, True, 255)
+ self.next = self._as_bytes(next, True, 255)
+ if not isinstance(windows, Bitmap):
+ windows = Bitmap(windows)
+ self.windows = tuple(windows.windows)
+
+ def _next_text(self):
+ next = base64.b32encode(self.next).translate(b32_normal_to_hex).lower().decode()
+ next = next.rstrip("=")
+ return next
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ next = self._next_text()
+ if self.salt == b"":
+ salt = "-"
+ else:
+ salt = binascii.hexlify(self.salt).decode()
+ text = Bitmap(self.windows).to_text()
+ return f"{self.algorithm} {self.flags} {self.iterations} {salt} {next}{text}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == "-":
+ salt = b""
+ else:
+ salt = binascii.unhexlify(salt.encode("ascii"))
+ next = tok.get_string().encode("ascii").upper().translate(b32_hex_to_normal)
+ if next.endswith(b"="):
+ raise binascii.Error("Incorrect padding")
+ if len(next) % 8 != 0:
+ next += b"=" * (8 - len(next) % 8)
+ next = base64.b32decode(next)
+ bitmap = Bitmap.from_text(tok)
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, bitmap)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags, self.iterations, l))
+ file.write(self.salt)
+ l = len(self.next)
+ file.write(struct.pack("!B", l))
+ file.write(self.next)
+ Bitmap(self.windows).to_wire(file)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (algorithm, flags, iterations) = parser.get_struct("!BBH")
+ salt = parser.get_counted_bytes()
+ next = parser.get_counted_bytes()
+ bitmap = Bitmap.from_wire_parser(parser)
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt, next, bitmap)
+
+ def next_name(self, origin=None):
+ return dns.name.from_text(self._next_text(), origin)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC3PARAM.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC3PARAM.py
new file mode 100644
index 0000000..e867872
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/NSEC3PARAM.py
@@ -0,0 +1,69 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+
+@dns.immutable.immutable
+class NSEC3PARAM(dns.rdata.Rdata):
+ """NSEC3PARAM record"""
+
+ __slots__ = ["algorithm", "flags", "iterations", "salt"]
+
+ def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
+ super().__init__(rdclass, rdtype)
+ self.algorithm = self._as_uint8(algorithm)
+ self.flags = self._as_uint8(flags)
+ self.iterations = self._as_uint16(iterations)
+ self.salt = self._as_bytes(salt, True, 255)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ if self.salt == b"":
+ salt = "-"
+ else:
+ salt = binascii.hexlify(self.salt).decode()
+ return f"{self.algorithm} {self.flags} {self.iterations} {salt}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ algorithm = tok.get_uint8()
+ flags = tok.get_uint8()
+ iterations = tok.get_uint16()
+ salt = tok.get_string()
+ if salt == "-":
+ salt = ""
+ else:
+ salt = binascii.unhexlify(salt.encode())
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ l = len(self.salt)
+ file.write(struct.pack("!BBHB", self.algorithm, self.flags, self.iterations, l))
+ file.write(self.salt)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (algorithm, flags, iterations) = parser.get_struct("!BBH")
+ salt = parser.get_counted_bytes()
+ return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/OPENPGPKEY.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/OPENPGPKEY.py
new file mode 100644
index 0000000..ac1841c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/OPENPGPKEY.py
@@ -0,0 +1,53 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2016 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class OPENPGPKEY(dns.rdata.Rdata):
+ """OPENPGPKEY record"""
+
+ # see: RFC 7929
+
+ def __init__(self, rdclass, rdtype, key):
+ super().__init__(rdclass, rdtype)
+ self.key = self._as_bytes(key)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._base64ify(self.key, chunksize=None, **kw) # pyright: ignore
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ b64 = tok.concatenate_remaining_identifiers().encode()
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, key)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(self.key)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ key = parser.get_remaining()
+ return cls(rdclass, rdtype, key)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/OPT.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/OPT.py
new file mode 100644
index 0000000..d343dfa
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/OPT.py
@@ -0,0 +1,77 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.edns
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+# We don't implement from_text, and that's ok.
+# pylint: disable=abstract-method
+
+
+@dns.immutable.immutable
+class OPT(dns.rdata.Rdata):
+ """OPT record"""
+
+ __slots__ = ["options"]
+
+ def __init__(self, rdclass, rdtype, options):
+ """Initialize an OPT rdata.
+
+ *rdclass*, an ``int`` is the rdataclass of the Rdata,
+ which is also the payload size.
+
+ *rdtype*, an ``int`` is the rdatatype of the Rdata.
+
+ *options*, a tuple of ``bytes``
+ """
+
+ super().__init__(rdclass, rdtype)
+
+ def as_option(option):
+ if not isinstance(option, dns.edns.Option):
+ raise ValueError("option is not a dns.edns.option")
+ return option
+
+ self.options = self._as_tuple(options, as_option)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ for opt in self.options:
+ owire = opt.to_wire()
+ file.write(struct.pack("!HH", opt.otype, len(owire)))
+ file.write(owire)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return " ".join(opt.to_text() for opt in self.options)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ options = []
+ while parser.remaining() > 0:
+ (otype, olen) = parser.get_struct("!HH")
+ with parser.restrict_to(olen):
+ opt = dns.edns.option_from_wire_parser(otype, parser)
+ options.append(opt)
+ return cls(rdclass, rdtype, options)
+
+ @property
+ def payload(self):
+ "payload size"
+ return self.rdclass
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/PTR.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/PTR.py
new file mode 100644
index 0000000..98c3616
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/PTR.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.nsbase
+
+
+@dns.immutable.immutable
+class PTR(dns.rdtypes.nsbase.NSBase):
+ """PTR record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RESINFO.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RESINFO.py
new file mode 100644
index 0000000..76c8ea2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RESINFO.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.txtbase
+
+
+@dns.immutable.immutable
+class RESINFO(dns.rdtypes.txtbase.TXTBase):
+ """RESINFO record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RP.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RP.py
new file mode 100644
index 0000000..a66cfc5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RP.py
@@ -0,0 +1,58 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+
+
+@dns.immutable.immutable
+class RP(dns.rdata.Rdata):
+ """RP record"""
+
+ # see: RFC 1183
+
+ __slots__ = ["mbox", "txt"]
+
+ def __init__(self, rdclass, rdtype, mbox, txt):
+ super().__init__(rdclass, rdtype)
+ self.mbox = self._as_name(mbox)
+ self.txt = self._as_name(txt)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mbox = self.mbox.choose_relativity(origin, relativize)
+ txt = self.txt.choose_relativity(origin, relativize)
+ return f"{str(mbox)} {str(txt)}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ mbox = tok.get_name(origin, relativize, relativize_to)
+ txt = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, mbox, txt)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.mbox.to_wire(file, None, origin, canonicalize)
+ self.txt.to_wire(file, None, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ mbox = parser.get_name(origin)
+ txt = parser.get_name(origin)
+ return cls(rdclass, rdtype, mbox, txt)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RRSIG.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RRSIG.py
new file mode 100644
index 0000000..5556cba
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RRSIG.py
@@ -0,0 +1,155 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import calendar
+import struct
+import time
+
+import dns.dnssectypes
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+
+
+class BadSigTime(dns.exception.DNSException):
+ """Time in DNS SIG or RRSIG resource record cannot be parsed."""
+
+
+def sigtime_to_posixtime(what):
+ if len(what) <= 10 and what.isdigit():
+ return int(what)
+ if len(what) != 14:
+ raise BadSigTime
+ year = int(what[0:4])
+ month = int(what[4:6])
+ day = int(what[6:8])
+ hour = int(what[8:10])
+ minute = int(what[10:12])
+ second = int(what[12:14])
+ return calendar.timegm((year, month, day, hour, minute, second, 0, 0, 0))
+
+
+def posixtime_to_sigtime(what):
+ return time.strftime("%Y%m%d%H%M%S", time.gmtime(what))
+
+
+@dns.immutable.immutable
+class RRSIG(dns.rdata.Rdata):
+ """RRSIG record"""
+
+ __slots__ = [
+ "type_covered",
+ "algorithm",
+ "labels",
+ "original_ttl",
+ "expiration",
+ "inception",
+ "key_tag",
+ "signer",
+ "signature",
+ ]
+
+ def __init__(
+ self,
+ rdclass,
+ rdtype,
+ type_covered,
+ algorithm,
+ labels,
+ original_ttl,
+ expiration,
+ inception,
+ key_tag,
+ signer,
+ signature,
+ ):
+ super().__init__(rdclass, rdtype)
+ self.type_covered = self._as_rdatatype(type_covered)
+ self.algorithm = dns.dnssectypes.Algorithm.make(algorithm)
+ self.labels = self._as_uint8(labels)
+ self.original_ttl = self._as_ttl(original_ttl)
+ self.expiration = self._as_uint32(expiration)
+ self.inception = self._as_uint32(inception)
+ self.key_tag = self._as_uint16(key_tag)
+ self.signer = self._as_name(signer)
+ self.signature = self._as_bytes(signature)
+
+ def covers(self):
+ return self.type_covered
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return (
+ f"{dns.rdatatype.to_text(self.type_covered)} "
+ f"{self.algorithm} {self.labels} {self.original_ttl} "
+ f"{posixtime_to_sigtime(self.expiration)} "
+ f"{posixtime_to_sigtime(self.inception)} "
+ f"{self.key_tag} "
+ f"{self.signer.choose_relativity(origin, relativize)} "
+ f"{dns.rdata._base64ify(self.signature, **kw)}" # pyright: ignore
+ )
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ type_covered = dns.rdatatype.from_text(tok.get_string())
+ algorithm = dns.dnssectypes.Algorithm.from_text(tok.get_string())
+ labels = tok.get_int()
+ original_ttl = tok.get_ttl()
+ expiration = sigtime_to_posixtime(tok.get_string())
+ inception = sigtime_to_posixtime(tok.get_string())
+ key_tag = tok.get_int()
+ signer = tok.get_name(origin, relativize, relativize_to)
+ b64 = tok.concatenate_remaining_identifiers().encode()
+ signature = base64.b64decode(b64)
+ return cls(
+ rdclass,
+ rdtype,
+ type_covered,
+ algorithm,
+ labels,
+ original_ttl,
+ expiration,
+ inception,
+ key_tag,
+ signer,
+ signature,
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack(
+ "!HBBIIIH",
+ self.type_covered,
+ self.algorithm,
+ self.labels,
+ self.original_ttl,
+ self.expiration,
+ self.inception,
+ self.key_tag,
+ )
+ file.write(header)
+ self.signer.to_wire(file, None, origin, canonicalize)
+ file.write(self.signature)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("!HBBIIIH")
+ signer = parser.get_name(origin)
+ signature = parser.get_remaining()
+ return cls(rdclass, rdtype, *header, signer, signature) # pyright: ignore
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RT.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RT.py
new file mode 100644
index 0000000..5a4d45c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/RT.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.mxbase
+
+
+@dns.immutable.immutable
+class RT(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+ """RT record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SMIMEA.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SMIMEA.py
new file mode 100644
index 0000000..55d87bf
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SMIMEA.py
@@ -0,0 +1,9 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import dns.immutable
+import dns.rdtypes.tlsabase
+
+
+@dns.immutable.immutable
+class SMIMEA(dns.rdtypes.tlsabase.TLSABase):
+ """SMIMEA record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SOA.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SOA.py
new file mode 100644
index 0000000..3c7cd8c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SOA.py
@@ -0,0 +1,78 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+
+
+@dns.immutable.immutable
+class SOA(dns.rdata.Rdata):
+ """SOA record"""
+
+ # see: RFC 1035
+
+ __slots__ = ["mname", "rname", "serial", "refresh", "retry", "expire", "minimum"]
+
+ def __init__(
+ self, rdclass, rdtype, mname, rname, serial, refresh, retry, expire, minimum
+ ):
+ super().__init__(rdclass, rdtype)
+ self.mname = self._as_name(mname)
+ self.rname = self._as_name(rname)
+ self.serial = self._as_uint32(serial)
+ self.refresh = self._as_ttl(refresh)
+ self.retry = self._as_ttl(retry)
+ self.expire = self._as_ttl(expire)
+ self.minimum = self._as_ttl(minimum)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ mname = self.mname.choose_relativity(origin, relativize)
+ rname = self.rname.choose_relativity(origin, relativize)
+ return f"{mname} {rname} {self.serial} {self.refresh} {self.retry} {self.expire} {self.minimum}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ mname = tok.get_name(origin, relativize, relativize_to)
+ rname = tok.get_name(origin, relativize, relativize_to)
+ serial = tok.get_uint32()
+ refresh = tok.get_ttl()
+ retry = tok.get_ttl()
+ expire = tok.get_ttl()
+ minimum = tok.get_ttl()
+ return cls(
+ rdclass, rdtype, mname, rname, serial, refresh, retry, expire, minimum
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.mname.to_wire(file, compress, origin, canonicalize)
+ self.rname.to_wire(file, compress, origin, canonicalize)
+ five_ints = struct.pack(
+ "!IIIII", self.serial, self.refresh, self.retry, self.expire, self.minimum
+ )
+ file.write(five_ints)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ mname = parser.get_name(origin)
+ rname = parser.get_name(origin)
+ return cls(rdclass, rdtype, mname, rname, *parser.get_struct("!IIIII"))
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SPF.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SPF.py
new file mode 100644
index 0000000..1df3b70
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SPF.py
@@ -0,0 +1,26 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.txtbase
+
+
+@dns.immutable.immutable
+class SPF(dns.rdtypes.txtbase.TXTBase):
+ """SPF record"""
+
+ # see: RFC 4408
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SSHFP.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SSHFP.py
new file mode 100644
index 0000000..3f08f3a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/SSHFP.py
@@ -0,0 +1,67 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+import struct
+
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+
+
+@dns.immutable.immutable
+class SSHFP(dns.rdata.Rdata):
+ """SSHFP record"""
+
+ # See RFC 4255
+
+ __slots__ = ["algorithm", "fp_type", "fingerprint"]
+
+ def __init__(self, rdclass, rdtype, algorithm, fp_type, fingerprint):
+ super().__init__(rdclass, rdtype)
+ self.algorithm = self._as_uint8(algorithm)
+ self.fp_type = self._as_uint8(fp_type)
+ self.fingerprint = self._as_bytes(fingerprint, True)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ kw = kw.copy()
+ chunksize = kw.pop("chunksize", 128)
+ fingerprint = dns.rdata._hexify(
+ self.fingerprint, chunksize=chunksize, **kw # pyright: ignore
+ )
+ return f"{self.algorithm} {self.fp_type} {fingerprint}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ algorithm = tok.get_uint8()
+ fp_type = tok.get_uint8()
+ fingerprint = tok.concatenate_remaining_identifiers().encode()
+ fingerprint = binascii.unhexlify(fingerprint)
+ return cls(rdclass, rdtype, algorithm, fp_type, fingerprint)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack("!BB", self.algorithm, self.fp_type)
+ file.write(header)
+ file.write(self.fingerprint)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("BB")
+ fingerprint = parser.get_remaining()
+ return cls(rdclass, rdtype, header[0], header[1], fingerprint)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TKEY.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TKEY.py
new file mode 100644
index 0000000..f9189b1
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TKEY.py
@@ -0,0 +1,135 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+
+@dns.immutable.immutable
+class TKEY(dns.rdata.Rdata):
+ """TKEY Record"""
+
+ __slots__ = [
+ "algorithm",
+ "inception",
+ "expiration",
+ "mode",
+ "error",
+ "key",
+ "other",
+ ]
+
+ def __init__(
+ self,
+ rdclass,
+ rdtype,
+ algorithm,
+ inception,
+ expiration,
+ mode,
+ error,
+ key,
+ other=b"",
+ ):
+ super().__init__(rdclass, rdtype)
+ self.algorithm = self._as_name(algorithm)
+ self.inception = self._as_uint32(inception)
+ self.expiration = self._as_uint32(expiration)
+ self.mode = self._as_uint16(mode)
+ self.error = self._as_uint16(error)
+ self.key = self._as_bytes(key)
+ self.other = self._as_bytes(other)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ _algorithm = self.algorithm.choose_relativity(origin, relativize)
+ key = dns.rdata._base64ify(self.key, 0)
+ other = ""
+ if len(self.other) > 0:
+ other = " " + dns.rdata._base64ify(self.other, 0)
+ return f"{_algorithm} {self.inception} {self.expiration} {self.mode} {self.error} {key}{other}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ algorithm = tok.get_name(relativize=False)
+ inception = tok.get_uint32()
+ expiration = tok.get_uint32()
+ mode = tok.get_uint16()
+ error = tok.get_uint16()
+ key_b64 = tok.get_string().encode()
+ key = base64.b64decode(key_b64)
+ other_b64 = tok.concatenate_remaining_identifiers(True).encode()
+ other = base64.b64decode(other_b64)
+
+ return cls(
+ rdclass, rdtype, algorithm, inception, expiration, mode, error, key, other
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.algorithm.to_wire(file, compress, origin)
+ file.write(
+ struct.pack("!IIHH", self.inception, self.expiration, self.mode, self.error)
+ )
+ file.write(struct.pack("!H", len(self.key)))
+ file.write(self.key)
+ file.write(struct.pack("!H", len(self.other)))
+ if len(self.other) > 0:
+ file.write(self.other)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ algorithm = parser.get_name(origin)
+ inception, expiration, mode, error = parser.get_struct("!IIHH")
+ key = parser.get_counted_bytes(2)
+ other = parser.get_counted_bytes(2)
+
+ return cls(
+ rdclass, rdtype, algorithm, inception, expiration, mode, error, key, other
+ )
+
+ # Constants for the mode field - from RFC 2930:
+ # 2.5 The Mode Field
+ #
+ # The mode field specifies the general scheme for key agreement or
+ # the purpose of the TKEY DNS message. Servers and resolvers
+ # supporting this specification MUST implement the Diffie-Hellman key
+ # agreement mode and the key deletion mode for queries. All other
+ # modes are OPTIONAL. A server supporting TKEY that receives a TKEY
+ # request with a mode it does not support returns the BADMODE error.
+ # The following values of the Mode octet are defined, available, or
+ # reserved:
+ #
+ # Value Description
+ # ----- -----------
+ # 0 - reserved, see section 7
+ # 1 server assignment
+ # 2 Diffie-Hellman exchange
+ # 3 GSS-API negotiation
+ # 4 resolver assignment
+ # 5 key deletion
+ # 6-65534 - available, see section 7
+ # 65535 - reserved, see section 7
+ SERVER_ASSIGNMENT = 1
+ DIFFIE_HELLMAN_EXCHANGE = 2
+ GSSAPI_NEGOTIATION = 3
+ RESOLVER_ASSIGNMENT = 4
+ KEY_DELETION = 5
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TLSA.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TLSA.py
new file mode 100644
index 0000000..4dffc55
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TLSA.py
@@ -0,0 +1,9 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import dns.immutable
+import dns.rdtypes.tlsabase
+
+
+@dns.immutable.immutable
+class TLSA(dns.rdtypes.tlsabase.TLSABase):
+ """TLSA record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TSIG.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TSIG.py
new file mode 100644
index 0000000..7942382
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TSIG.py
@@ -0,0 +1,160 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rcode
+import dns.rdata
+
+
+@dns.immutable.immutable
+class TSIG(dns.rdata.Rdata):
+ """TSIG record"""
+
+ __slots__ = [
+ "algorithm",
+ "time_signed",
+ "fudge",
+ "mac",
+ "original_id",
+ "error",
+ "other",
+ ]
+
+ def __init__(
+ self,
+ rdclass,
+ rdtype,
+ algorithm,
+ time_signed,
+ fudge,
+ mac,
+ original_id,
+ error,
+ other,
+ ):
+ """Initialize a TSIG rdata.
+
+ *rdclass*, an ``int`` is the rdataclass of the Rdata.
+
+ *rdtype*, an ``int`` is the rdatatype of the Rdata.
+
+ *algorithm*, a ``dns.name.Name``.
+
+ *time_signed*, an ``int``.
+
+ *fudge*, an ``int`.
+
+ *mac*, a ``bytes``
+
+ *original_id*, an ``int``
+
+ *error*, an ``int``
+
+ *other*, a ``bytes``
+ """
+
+ super().__init__(rdclass, rdtype)
+ self.algorithm = self._as_name(algorithm)
+ self.time_signed = self._as_uint48(time_signed)
+ self.fudge = self._as_uint16(fudge)
+ self.mac = self._as_bytes(mac)
+ self.original_id = self._as_uint16(original_id)
+ self.error = dns.rcode.Rcode.make(error)
+ self.other = self._as_bytes(other)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ algorithm = self.algorithm.choose_relativity(origin, relativize)
+ error = dns.rcode.to_text(self.error, True)
+ text = (
+ f"{algorithm} {self.time_signed} {self.fudge} "
+ + f"{len(self.mac)} {dns.rdata._base64ify(self.mac, 0)} "
+ + f"{self.original_id} {error} {len(self.other)}"
+ )
+ if self.other:
+ text += f" {dns.rdata._base64ify(self.other, 0)}"
+ return text
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ algorithm = tok.get_name(relativize=False)
+ time_signed = tok.get_uint48()
+ fudge = tok.get_uint16()
+ mac_len = tok.get_uint16()
+ mac = base64.b64decode(tok.get_string())
+ if len(mac) != mac_len:
+ raise SyntaxError("invalid MAC")
+ original_id = tok.get_uint16()
+ error = dns.rcode.from_text(tok.get_string())
+ other_len = tok.get_uint16()
+ if other_len > 0:
+ other = base64.b64decode(tok.get_string())
+ if len(other) != other_len:
+ raise SyntaxError("invalid other data")
+ else:
+ other = b""
+ return cls(
+ rdclass,
+ rdtype,
+ algorithm,
+ time_signed,
+ fudge,
+ mac,
+ original_id,
+ error,
+ other,
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.algorithm.to_wire(file, None, origin, False)
+ file.write(
+ struct.pack(
+ "!HIHH",
+ (self.time_signed >> 32) & 0xFFFF,
+ self.time_signed & 0xFFFFFFFF,
+ self.fudge,
+ len(self.mac),
+ )
+ )
+ file.write(self.mac)
+ file.write(struct.pack("!HHH", self.original_id, self.error, len(self.other)))
+ file.write(self.other)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ algorithm = parser.get_name()
+ time_signed = parser.get_uint48()
+ fudge = parser.get_uint16()
+ mac = parser.get_counted_bytes(2)
+ (original_id, error) = parser.get_struct("!HH")
+ other = parser.get_counted_bytes(2)
+ return cls(
+ rdclass,
+ rdtype,
+ algorithm,
+ time_signed,
+ fudge,
+ mac,
+ original_id,
+ error,
+ other,
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TXT.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TXT.py
new file mode 100644
index 0000000..6d4dae2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/TXT.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.txtbase
+
+
+@dns.immutable.immutable
+class TXT(dns.rdtypes.txtbase.TXTBase):
+ """TXT record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/URI.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/URI.py
new file mode 100644
index 0000000..021391d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/URI.py
@@ -0,0 +1,79 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+# Copyright (C) 2015 Red Hat, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class URI(dns.rdata.Rdata):
+ """URI record"""
+
+ # see RFC 7553
+
+ __slots__ = ["priority", "weight", "target"]
+
+ def __init__(self, rdclass, rdtype, priority, weight, target):
+ super().__init__(rdclass, rdtype)
+ self.priority = self._as_uint16(priority)
+ self.weight = self._as_uint16(weight)
+ self.target = self._as_bytes(target, True)
+ if len(self.target) == 0:
+ raise dns.exception.SyntaxError("URI target cannot be empty")
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f'{self.priority} {self.weight} "{self.target.decode()}"'
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ target = tok.get().unescape()
+ if not (target.is_quoted_string() or target.is_identifier()):
+ raise dns.exception.SyntaxError("URI target must be a string")
+ return cls(rdclass, rdtype, priority, weight, target.value)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ two_ints = struct.pack("!HH", self.priority, self.weight)
+ file.write(two_ints)
+ file.write(self.target)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (priority, weight) = parser.get_struct("!HH")
+ target = parser.get_remaining()
+ if len(target) == 0:
+ raise dns.exception.FormError("URI target may not be empty")
+ return cls(rdclass, rdtype, priority, weight, target)
+
+ def _processing_priority(self):
+ return self.priority
+
+ def _processing_weight(self):
+ return self.weight
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ return dns.rdtypes.util.weighted_processing_order(iterable)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/WALLET.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/WALLET.py
new file mode 100644
index 0000000..ff46476
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/WALLET.py
@@ -0,0 +1,9 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import dns.immutable
+import dns.rdtypes.txtbase
+
+
+@dns.immutable.immutable
+class WALLET(dns.rdtypes.txtbase.TXTBase):
+ """WALLET record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/X25.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/X25.py
new file mode 100644
index 0000000..2436ddb
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/X25.py
@@ -0,0 +1,57 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class X25(dns.rdata.Rdata):
+ """X25 record"""
+
+ # see RFC 1183
+
+ __slots__ = ["address"]
+
+ def __init__(self, rdclass, rdtype, address):
+ super().__init__(rdclass, rdtype)
+ self.address = self._as_bytes(address, True, 255)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f'"{dns.rdata._escapify(self.address)}"'
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ address = tok.get_string()
+ return cls(rdclass, rdtype, address)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ l = len(self.address)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(self.address)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ address = parser.get_counted_bytes()
+ return cls(rdclass, rdtype, address)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/ZONEMD.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/ZONEMD.py
new file mode 100644
index 0000000..acef4f2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/ZONEMD.py
@@ -0,0 +1,64 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import binascii
+import struct
+
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+import dns.zonetypes
+
+
+@dns.immutable.immutable
+class ZONEMD(dns.rdata.Rdata):
+ """ZONEMD record"""
+
+ # See RFC 8976
+
+ __slots__ = ["serial", "scheme", "hash_algorithm", "digest"]
+
+ def __init__(self, rdclass, rdtype, serial, scheme, hash_algorithm, digest):
+ super().__init__(rdclass, rdtype)
+ self.serial = self._as_uint32(serial)
+ self.scheme = dns.zonetypes.DigestScheme.make(scheme)
+ self.hash_algorithm = dns.zonetypes.DigestHashAlgorithm.make(hash_algorithm)
+ self.digest = self._as_bytes(digest)
+
+ if self.scheme == 0: # reserved, RFC 8976 Sec. 5.2
+ raise ValueError("scheme 0 is reserved")
+ if self.hash_algorithm == 0: # reserved, RFC 8976 Sec. 5.3
+ raise ValueError("hash_algorithm 0 is reserved")
+
+ hasher = dns.zonetypes._digest_hashers.get(self.hash_algorithm)
+ if hasher and hasher().digest_size != len(self.digest):
+ raise ValueError("digest length inconsistent with hash algorithm")
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ kw = kw.copy()
+ chunksize = kw.pop("chunksize", 128)
+ digest = dns.rdata._hexify(
+ self.digest, chunksize=chunksize, **kw # pyright: ignore
+ )
+ return f"{self.serial} {self.scheme} {self.hash_algorithm} {digest}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ serial = tok.get_uint32()
+ scheme = tok.get_uint8()
+ hash_algorithm = tok.get_uint8()
+ digest = tok.concatenate_remaining_identifiers().encode()
+ digest = binascii.unhexlify(digest)
+ return cls(rdclass, rdtype, serial, scheme, hash_algorithm, digest)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack("!IBB", self.serial, self.scheme, self.hash_algorithm)
+ file.write(header)
+ file.write(self.digest)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("!IBB")
+ digest = parser.get_remaining()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/__init__.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/__init__.py
new file mode 100644
index 0000000..cc39f86
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/ANY/__init__.py
@@ -0,0 +1,71 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class ANY (generic) rdata type classes."""
+
+__all__ = [
+ "AFSDB",
+ "AMTRELAY",
+ "AVC",
+ "CAA",
+ "CDNSKEY",
+ "CDS",
+ "CERT",
+ "CNAME",
+ "CSYNC",
+ "DLV",
+ "DNAME",
+ "DNSKEY",
+ "DS",
+ "DSYNC",
+ "EUI48",
+ "EUI64",
+ "GPOS",
+ "HINFO",
+ "HIP",
+ "ISDN",
+ "L32",
+ "L64",
+ "LOC",
+ "LP",
+ "MX",
+ "NID",
+ "NINFO",
+ "NS",
+ "NSEC",
+ "NSEC3",
+ "NSEC3PARAM",
+ "OPENPGPKEY",
+ "OPT",
+ "PTR",
+ "RESINFO",
+ "RP",
+ "RRSIG",
+ "RT",
+ "SMIMEA",
+ "SOA",
+ "SPF",
+ "SSHFP",
+ "TKEY",
+ "TLSA",
+ "TSIG",
+ "TXT",
+ "URI",
+ "WALLET",
+ "X25",
+ "ZONEMD",
+]
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/CH/A.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/CH/A.py
new file mode 100644
index 0000000..e3e0752
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/CH/A.py
@@ -0,0 +1,60 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.immutable
+import dns.rdata
+import dns.rdtypes.mxbase
+
+
+@dns.immutable.immutable
+class A(dns.rdata.Rdata):
+ """A record for Chaosnet"""
+
+ # domain: the domain of the address
+ # address: the 16-bit address
+
+ __slots__ = ["domain", "address"]
+
+ def __init__(self, rdclass, rdtype, domain, address):
+ super().__init__(rdclass, rdtype)
+ self.domain = self._as_name(domain)
+ self.address = self._as_uint16(address)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ domain = self.domain.choose_relativity(origin, relativize)
+ return f"{domain} {self.address:o}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ domain = tok.get_name(origin, relativize, relativize_to)
+ address = tok.get_uint16(base=8)
+ return cls(rdclass, rdtype, domain, address)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.domain.to_wire(file, compress, origin, canonicalize)
+ pref = struct.pack("!H", self.address)
+ file.write(pref)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ domain = parser.get_name(origin)
+ address = parser.get_uint16()
+ return cls(rdclass, rdtype, domain, address)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/CH/__init__.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/CH/__init__.py
new file mode 100644
index 0000000..0760c26
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/CH/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class CH rdata type classes."""
+
+__all__ = [
+ "A",
+]
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/A.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/A.py
new file mode 100644
index 0000000..e09d611
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/A.py
@@ -0,0 +1,51 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.immutable
+import dns.ipv4
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class A(dns.rdata.Rdata):
+ """A record."""
+
+ __slots__ = ["address"]
+
+ def __init__(self, rdclass, rdtype, address):
+ super().__init__(rdclass, rdtype)
+ self.address = self._as_ipv4_address(address)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ address = tok.get_identifier()
+ return cls(rdclass, rdtype, address)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(dns.ipv4.inet_aton(self.address))
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ address = parser.get_remaining()
+ return cls(rdclass, rdtype, address)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/AAAA.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/AAAA.py
new file mode 100644
index 0000000..0cd139e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/AAAA.py
@@ -0,0 +1,51 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.exception
+import dns.immutable
+import dns.ipv6
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class AAAA(dns.rdata.Rdata):
+ """AAAA record."""
+
+ __slots__ = ["address"]
+
+ def __init__(self, rdclass, rdtype, address):
+ super().__init__(rdclass, rdtype)
+ self.address = self._as_ipv6_address(address)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return self.address
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ address = tok.get_identifier()
+ return cls(rdclass, rdtype, address)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(dns.ipv6.inet_aton(self.address))
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ address = parser.get_remaining()
+ return cls(rdclass, rdtype, address)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/APL.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/APL.py
new file mode 100644
index 0000000..c4ce6e4
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/APL.py
@@ -0,0 +1,150 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+import codecs
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.ipv4
+import dns.ipv6
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class APLItem:
+ """An APL list item."""
+
+ __slots__ = ["family", "negation", "address", "prefix"]
+
+ def __init__(self, family, negation, address, prefix):
+ self.family = dns.rdata.Rdata._as_uint16(family)
+ self.negation = dns.rdata.Rdata._as_bool(negation)
+ if self.family == 1:
+ self.address = dns.rdata.Rdata._as_ipv4_address(address)
+ self.prefix = dns.rdata.Rdata._as_int(prefix, 0, 32)
+ elif self.family == 2:
+ self.address = dns.rdata.Rdata._as_ipv6_address(address)
+ self.prefix = dns.rdata.Rdata._as_int(prefix, 0, 128)
+ else:
+ self.address = dns.rdata.Rdata._as_bytes(address, max_length=127)
+ self.prefix = dns.rdata.Rdata._as_uint8(prefix)
+
+ def __str__(self):
+ if self.negation:
+ return f"!{self.family}:{self.address}/{self.prefix}"
+ else:
+ return f"{self.family}:{self.address}/{self.prefix}"
+
+ def to_wire(self, file):
+ if self.family == 1:
+ address = dns.ipv4.inet_aton(self.address)
+ elif self.family == 2:
+ address = dns.ipv6.inet_aton(self.address)
+ else:
+ address = binascii.unhexlify(self.address)
+ #
+ # Truncate least significant zero bytes.
+ #
+ last = 0
+ for i in range(len(address) - 1, -1, -1):
+ if address[i] != 0:
+ last = i + 1
+ break
+ address = address[0:last]
+ l = len(address)
+ assert l < 128
+ if self.negation:
+ l |= 0x80
+ header = struct.pack("!HBB", self.family, self.prefix, l)
+ file.write(header)
+ file.write(address)
+
+
+@dns.immutable.immutable
+class APL(dns.rdata.Rdata):
+ """APL record."""
+
+ # see: RFC 3123
+
+ __slots__ = ["items"]
+
+ def __init__(self, rdclass, rdtype, items):
+ super().__init__(rdclass, rdtype)
+ for item in items:
+ if not isinstance(item, APLItem):
+ raise ValueError("item not an APLItem")
+ self.items = tuple(items)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return " ".join(map(str, self.items))
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ items = []
+ for token in tok.get_remaining():
+ item = token.unescape().value
+ if item[0] == "!":
+ negation = True
+ item = item[1:]
+ else:
+ negation = False
+ (family, rest) = item.split(":", 1)
+ family = int(family)
+ (address, prefix) = rest.split("/", 1)
+ prefix = int(prefix)
+ item = APLItem(family, negation, address, prefix)
+ items.append(item)
+
+ return cls(rdclass, rdtype, items)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ for item in self.items:
+ item.to_wire(file)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ items = []
+ while parser.remaining() > 0:
+ header = parser.get_struct("!HBB")
+ afdlen = header[2]
+ if afdlen > 127:
+ negation = True
+ afdlen -= 128
+ else:
+ negation = False
+ address = parser.get_bytes(afdlen)
+ l = len(address)
+ if header[0] == 1:
+ if l < 4:
+ address += b"\x00" * (4 - l)
+ elif header[0] == 2:
+ if l < 16:
+ address += b"\x00" * (16 - l)
+ else:
+ #
+ # This isn't really right according to the RFC, but it
+ # seems better than throwing an exception
+ #
+ address = codecs.encode(address, "hex_codec")
+ item = APLItem(header[0], negation, address, header[1])
+ items.append(item)
+ return cls(rdclass, rdtype, items)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/DHCID.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/DHCID.py
new file mode 100644
index 0000000..8de8cdf
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/DHCID.py
@@ -0,0 +1,54 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+
+@dns.immutable.immutable
+class DHCID(dns.rdata.Rdata):
+ """DHCID record"""
+
+ # see: RFC 4701
+
+ __slots__ = ["data"]
+
+ def __init__(self, rdclass, rdtype, data):
+ super().__init__(rdclass, rdtype)
+ self.data = self._as_bytes(data)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._base64ify(self.data, **kw) # pyright: ignore
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ b64 = tok.concatenate_remaining_identifiers().encode()
+ data = base64.b64decode(b64)
+ return cls(rdclass, rdtype, data)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(self.data)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ data = parser.get_remaining()
+ return cls(rdclass, rdtype, data)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/HTTPS.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/HTTPS.py
new file mode 100644
index 0000000..15464cb
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/HTTPS.py
@@ -0,0 +1,9 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import dns.immutable
+import dns.rdtypes.svcbbase
+
+
+@dns.immutable.immutable
+class HTTPS(dns.rdtypes.svcbbase.SVCBBase):
+ """HTTPS record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/IPSECKEY.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/IPSECKEY.py
new file mode 100644
index 0000000..aef93ae
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/IPSECKEY.py
@@ -0,0 +1,87 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.rdtypes.util
+
+
+class Gateway(dns.rdtypes.util.Gateway):
+ name = "IPSECKEY gateway"
+
+
+@dns.immutable.immutable
+class IPSECKEY(dns.rdata.Rdata):
+ """IPSECKEY record"""
+
+ # see: RFC 4025
+
+ __slots__ = ["precedence", "gateway_type", "algorithm", "gateway", "key"]
+
+ def __init__(
+ self, rdclass, rdtype, precedence, gateway_type, algorithm, gateway, key
+ ):
+ super().__init__(rdclass, rdtype)
+ gateway = Gateway(gateway_type, gateway)
+ self.precedence = self._as_uint8(precedence)
+ self.gateway_type = gateway.type
+ self.algorithm = self._as_uint8(algorithm)
+ self.gateway = gateway.gateway
+ self.key = self._as_bytes(key)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ gateway = Gateway(self.gateway_type, self.gateway).to_text(origin, relativize)
+ key = dns.rdata._base64ify(self.key, **kw) # pyright: ignore
+ return f"{self.precedence} {self.gateway_type} {self.algorithm} {gateway} {key}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ precedence = tok.get_uint8()
+ gateway_type = tok.get_uint8()
+ algorithm = tok.get_uint8()
+ gateway = Gateway.from_text(
+ gateway_type, tok, origin, relativize, relativize_to
+ )
+ b64 = tok.concatenate_remaining_identifiers().encode()
+ key = base64.b64decode(b64)
+ return cls(
+ rdclass, rdtype, precedence, gateway_type, algorithm, gateway.gateway, key
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack("!BBB", self.precedence, self.gateway_type, self.algorithm)
+ file.write(header)
+ Gateway(self.gateway_type, self.gateway).to_wire(
+ file, compress, origin, canonicalize
+ )
+ file.write(self.key)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("!BBB")
+ gateway_type = header[1]
+ gateway = Gateway.from_wire_parser(gateway_type, parser, origin)
+ key = parser.get_remaining()
+ return cls(
+ rdclass, rdtype, header[0], gateway_type, header[2], gateway.gateway, key
+ )
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/KX.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/KX.py
new file mode 100644
index 0000000..6073df4
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/KX.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.mxbase
+
+
+@dns.immutable.immutable
+class KX(dns.rdtypes.mxbase.UncompressedDowncasingMX):
+ """KX record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NAPTR.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NAPTR.py
new file mode 100644
index 0000000..98bbf4a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NAPTR.py
@@ -0,0 +1,109 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdtypes.util
+
+
+def _write_string(file, s):
+ l = len(s)
+ assert l < 256
+ file.write(struct.pack("!B", l))
+ file.write(s)
+
+
+@dns.immutable.immutable
+class NAPTR(dns.rdata.Rdata):
+ """NAPTR record"""
+
+ # see: RFC 3403
+
+ __slots__ = ["order", "preference", "flags", "service", "regexp", "replacement"]
+
+ def __init__(
+ self, rdclass, rdtype, order, preference, flags, service, regexp, replacement
+ ):
+ super().__init__(rdclass, rdtype)
+ self.flags = self._as_bytes(flags, True, 255)
+ self.service = self._as_bytes(service, True, 255)
+ self.regexp = self._as_bytes(regexp, True, 255)
+ self.order = self._as_uint16(order)
+ self.preference = self._as_uint16(preference)
+ self.replacement = self._as_name(replacement)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ replacement = self.replacement.choose_relativity(origin, relativize)
+ return (
+ f"{self.order} {self.preference} "
+ f'"{dns.rdata._escapify(self.flags)}" '
+ f'"{dns.rdata._escapify(self.service)}" '
+ f'"{dns.rdata._escapify(self.regexp)}" '
+ f"{replacement}"
+ )
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ order = tok.get_uint16()
+ preference = tok.get_uint16()
+ flags = tok.get_string()
+ service = tok.get_string()
+ regexp = tok.get_string()
+ replacement = tok.get_name(origin, relativize, relativize_to)
+ return cls(
+ rdclass, rdtype, order, preference, flags, service, regexp, replacement
+ )
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ two_ints = struct.pack("!HH", self.order, self.preference)
+ file.write(two_ints)
+ _write_string(file, self.flags)
+ _write_string(file, self.service)
+ _write_string(file, self.regexp)
+ self.replacement.to_wire(file, compress, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (order, preference) = parser.get_struct("!HH")
+ strings = []
+ for _ in range(3):
+ s = parser.get_counted_bytes()
+ strings.append(s)
+ replacement = parser.get_name(origin)
+ return cls(
+ rdclass,
+ rdtype,
+ order,
+ preference,
+ strings[0],
+ strings[1],
+ strings[2],
+ replacement,
+ )
+
+ def _processing_priority(self):
+ return (self.order, self.preference)
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ return dns.rdtypes.util.priority_processing_order(iterable)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NSAP.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NSAP.py
new file mode 100644
index 0000000..d55edb7
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NSAP.py
@@ -0,0 +1,60 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class NSAP(dns.rdata.Rdata):
+ """NSAP record."""
+
+ # see: RFC 1706
+
+ __slots__ = ["address"]
+
+ def __init__(self, rdclass, rdtype, address):
+ super().__init__(rdclass, rdtype)
+ self.address = self._as_bytes(address)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return f"0x{binascii.hexlify(self.address).decode()}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ address = tok.get_string()
+ if address[0:2] != "0x":
+ raise dns.exception.SyntaxError("string does not start with 0x")
+ address = address[2:].replace(".", "")
+ if len(address) % 2 != 0:
+ raise dns.exception.SyntaxError("hexstring has odd length")
+ address = binascii.unhexlify(address.encode())
+ return cls(rdclass, rdtype, address)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(self.address)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ address = parser.get_remaining()
+ return cls(rdclass, rdtype, address)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NSAP_PTR.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NSAP_PTR.py
new file mode 100644
index 0000000..ce1c663
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/NSAP_PTR.py
@@ -0,0 +1,24 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import dns.immutable
+import dns.rdtypes.nsbase
+
+
+@dns.immutable.immutable
+class NSAP_PTR(dns.rdtypes.nsbase.UncompressedNS):
+ """NSAP-PTR record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/PX.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/PX.py
new file mode 100644
index 0000000..20143bf
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/PX.py
@@ -0,0 +1,73 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class PX(dns.rdata.Rdata):
+ """PX record."""
+
+ # see: RFC 2163
+
+ __slots__ = ["preference", "map822", "mapx400"]
+
+ def __init__(self, rdclass, rdtype, preference, map822, mapx400):
+ super().__init__(rdclass, rdtype)
+ self.preference = self._as_uint16(preference)
+ self.map822 = self._as_name(map822)
+ self.mapx400 = self._as_name(mapx400)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ map822 = self.map822.choose_relativity(origin, relativize)
+ mapx400 = self.mapx400.choose_relativity(origin, relativize)
+ return f"{self.preference} {map822} {mapx400}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ preference = tok.get_uint16()
+ map822 = tok.get_name(origin, relativize, relativize_to)
+ mapx400 = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.map822.to_wire(file, None, origin, canonicalize)
+ self.mapx400.to_wire(file, None, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ preference = parser.get_uint16()
+ map822 = parser.get_name(origin)
+ mapx400 = parser.get_name(origin)
+ return cls(rdclass, rdtype, preference, map822, mapx400)
+
+ def _processing_priority(self):
+ return self.preference
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ return dns.rdtypes.util.priority_processing_order(iterable)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/SRV.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/SRV.py
new file mode 100644
index 0000000..044c10e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/SRV.py
@@ -0,0 +1,75 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class SRV(dns.rdata.Rdata):
+ """SRV record"""
+
+ # see: RFC 2782
+
+ __slots__ = ["priority", "weight", "port", "target"]
+
+ def __init__(self, rdclass, rdtype, priority, weight, port, target):
+ super().__init__(rdclass, rdtype)
+ self.priority = self._as_uint16(priority)
+ self.weight = self._as_uint16(weight)
+ self.port = self._as_uint16(port)
+ self.target = self._as_name(target)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return f"{self.priority} {self.weight} {self.port} {target}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ priority = tok.get_uint16()
+ weight = tok.get_uint16()
+ port = tok.get_uint16()
+ target = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ three_ints = struct.pack("!HHH", self.priority, self.weight, self.port)
+ file.write(three_ints)
+ self.target.to_wire(file, compress, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ (priority, weight, port) = parser.get_struct("!HHH")
+ target = parser.get_name(origin)
+ return cls(rdclass, rdtype, priority, weight, port, target)
+
+ def _processing_priority(self):
+ return self.priority
+
+ def _processing_weight(self):
+ return self.weight
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ return dns.rdtypes.util.weighted_processing_order(iterable)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/SVCB.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/SVCB.py
new file mode 100644
index 0000000..ff3e932
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/SVCB.py
@@ -0,0 +1,9 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import dns.immutable
+import dns.rdtypes.svcbbase
+
+
+@dns.immutable.immutable
+class SVCB(dns.rdtypes.svcbbase.SVCBBase):
+ """SVCB record"""
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/WKS.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/WKS.py
new file mode 100644
index 0000000..cc6c373
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/WKS.py
@@ -0,0 +1,100 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+
+import dns.immutable
+import dns.ipv4
+import dns.rdata
+
+try:
+ _proto_tcp = socket.getprotobyname("tcp")
+ _proto_udp = socket.getprotobyname("udp")
+except OSError:
+ # Fall back to defaults in case /etc/protocols is unavailable.
+ _proto_tcp = 6
+ _proto_udp = 17
+
+
+@dns.immutable.immutable
+class WKS(dns.rdata.Rdata):
+ """WKS record"""
+
+ # see: RFC 1035
+
+ __slots__ = ["address", "protocol", "bitmap"]
+
+ def __init__(self, rdclass, rdtype, address, protocol, bitmap):
+ super().__init__(rdclass, rdtype)
+ self.address = self._as_ipv4_address(address)
+ self.protocol = self._as_uint8(protocol)
+ self.bitmap = self._as_bytes(bitmap)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ bits = []
+ for i, byte in enumerate(self.bitmap):
+ for j in range(0, 8):
+ if byte & (0x80 >> j):
+ bits.append(str(i * 8 + j))
+ text = " ".join(bits)
+ return f"{self.address} {self.protocol} {text}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ address = tok.get_string()
+ protocol = tok.get_string()
+ if protocol.isdigit():
+ protocol = int(protocol)
+ else:
+ protocol = socket.getprotobyname(protocol)
+ bitmap = bytearray()
+ for token in tok.get_remaining():
+ value = token.unescape().value
+ if value.isdigit():
+ serv = int(value)
+ else:
+ if protocol != _proto_udp and protocol != _proto_tcp:
+ raise NotImplementedError("protocol must be TCP or UDP")
+ if protocol == _proto_udp:
+ protocol_text = "udp"
+ else:
+ protocol_text = "tcp"
+ serv = socket.getservbyname(value, protocol_text)
+ i = serv // 8
+ l = len(bitmap)
+ if l < i + 1:
+ for _ in range(l, i + 1):
+ bitmap.append(0)
+ bitmap[i] = bitmap[i] | (0x80 >> (serv % 8))
+ bitmap = dns.rdata._truncate_bitmap(bitmap)
+ return cls(rdclass, rdtype, address, protocol, bitmap)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(dns.ipv4.inet_aton(self.address))
+ protocol = struct.pack("!B", self.protocol)
+ file.write(protocol)
+ file.write(self.bitmap)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ address = parser.get_bytes(4)
+ protocol = parser.get_uint8()
+ bitmap = parser.get_remaining()
+ return cls(rdclass, rdtype, address, protocol, bitmap)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/__init__.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/__init__.py
new file mode 100644
index 0000000..dcec4dd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/IN/__init__.py
@@ -0,0 +1,35 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Class IN rdata type classes."""
+
+__all__ = [
+ "A",
+ "AAAA",
+ "APL",
+ "DHCID",
+ "HTTPS",
+ "IPSECKEY",
+ "KX",
+ "NAPTR",
+ "NSAP",
+ "NSAP_PTR",
+ "PX",
+ "SRV",
+ "SVCB",
+ "WKS",
+]
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/__init__.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/__init__.py
new file mode 100644
index 0000000..3997f84
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/__init__.py
@@ -0,0 +1,33 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS rdata type classes"""
+
+__all__ = [
+ "ANY",
+ "IN",
+ "CH",
+ "dnskeybase",
+ "dsbase",
+ "euibase",
+ "mxbase",
+ "nsbase",
+ "svcbbase",
+ "tlsabase",
+ "txtbase",
+ "util",
+]
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/dnskeybase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/dnskeybase.py
new file mode 100644
index 0000000..fb49f92
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/dnskeybase.py
@@ -0,0 +1,83 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import base64
+import enum
+import struct
+
+import dns.dnssectypes
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+# wildcard import
+__all__ = ["SEP", "REVOKE", "ZONE"] # noqa: F822
+
+
+class Flag(enum.IntFlag):
+ SEP = 0x0001
+ REVOKE = 0x0080
+ ZONE = 0x0100
+
+
+@dns.immutable.immutable
+class DNSKEYBase(dns.rdata.Rdata):
+ """Base class for rdata that is like a DNSKEY record"""
+
+ __slots__ = ["flags", "protocol", "algorithm", "key"]
+
+ def __init__(self, rdclass, rdtype, flags, protocol, algorithm, key):
+ super().__init__(rdclass, rdtype)
+ self.flags = Flag(self._as_uint16(flags))
+ self.protocol = self._as_uint8(protocol)
+ self.algorithm = dns.dnssectypes.Algorithm.make(algorithm)
+ self.key = self._as_bytes(key)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ key = dns.rdata._base64ify(self.key, **kw) # pyright: ignore
+ return f"{self.flags} {self.protocol} {self.algorithm} {key}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ flags = tok.get_uint16()
+ protocol = tok.get_uint8()
+ algorithm = tok.get_string()
+ b64 = tok.concatenate_remaining_identifiers().encode()
+ key = base64.b64decode(b64)
+ return cls(rdclass, rdtype, flags, protocol, algorithm, key)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack("!HBB", self.flags, self.protocol, self.algorithm)
+ file.write(header)
+ file.write(self.key)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("!HBB")
+ key = parser.get_remaining()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], key)
+
+
+### BEGIN generated Flag constants
+
+SEP = Flag.SEP
+REVOKE = Flag.REVOKE
+ZONE = Flag.ZONE
+
+### END generated Flag constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/dsbase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/dsbase.py
new file mode 100644
index 0000000..8e05c2a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/dsbase.py
@@ -0,0 +1,83 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2010, 2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+import struct
+
+import dns.dnssectypes
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+
+
+@dns.immutable.immutable
+class DSBase(dns.rdata.Rdata):
+ """Base class for rdata that is like a DS record"""
+
+ __slots__ = ["key_tag", "algorithm", "digest_type", "digest"]
+
+ # Digest types registry:
+ # https://www.iana.org/assignments/ds-rr-types/ds-rr-types.xhtml
+ _digest_length_by_type = {
+ 1: 20, # SHA-1, RFC 3658 Sec. 2.4
+ 2: 32, # SHA-256, RFC 4509 Sec. 2.2
+ 3: 32, # GOST R 34.11-94, RFC 5933 Sec. 4 in conjunction with RFC 4490 Sec. 2.1
+ 4: 48, # SHA-384, RFC 6605 Sec. 2
+ }
+
+ def __init__(self, rdclass, rdtype, key_tag, algorithm, digest_type, digest):
+ super().__init__(rdclass, rdtype)
+ self.key_tag = self._as_uint16(key_tag)
+ self.algorithm = dns.dnssectypes.Algorithm.make(algorithm)
+ self.digest_type = dns.dnssectypes.DSDigest.make(self._as_uint8(digest_type))
+ self.digest = self._as_bytes(digest)
+ try:
+ if len(self.digest) != self._digest_length_by_type[self.digest_type]:
+ raise ValueError("digest length inconsistent with digest type")
+ except KeyError:
+ if self.digest_type == 0: # reserved, RFC 3658 Sec. 2.4
+ raise ValueError("digest type 0 is reserved")
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ kw = kw.copy()
+ chunksize = kw.pop("chunksize", 128)
+ digest = dns.rdata._hexify(
+ self.digest, chunksize=chunksize, **kw # pyright: ignore
+ )
+ return f"{self.key_tag} {self.algorithm} {self.digest_type} {digest}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ key_tag = tok.get_uint16()
+ algorithm = tok.get_string()
+ digest_type = tok.get_uint8()
+ digest = tok.concatenate_remaining_identifiers().encode()
+ digest = binascii.unhexlify(digest)
+ return cls(rdclass, rdtype, key_tag, algorithm, digest_type, digest)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack("!HBB", self.key_tag, self.algorithm, self.digest_type)
+ file.write(header)
+ file.write(self.digest)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("!HBB")
+ digest = parser.get_remaining()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], digest)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/euibase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/euibase.py
new file mode 100644
index 0000000..4eb82eb
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/euibase.py
@@ -0,0 +1,73 @@
+# Copyright (C) 2015 Red Hat, Inc.
+# Author: Petr Spacek
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED 'AS IS' AND RED HAT DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+
+import dns.exception
+import dns.immutable
+import dns.rdata
+
+
+@dns.immutable.immutable
+class EUIBase(dns.rdata.Rdata):
+ """EUIxx record"""
+
+ # see: rfc7043.txt
+
+ __slots__ = ["eui"]
+ # redefine these in subclasses
+ byte_len = 0
+ text_len = 0
+ # byte_len = 6 # 0123456789ab (in hex)
+ # text_len = byte_len * 3 - 1 # 01-23-45-67-89-ab
+
+ def __init__(self, rdclass, rdtype, eui):
+ super().__init__(rdclass, rdtype)
+ self.eui = self._as_bytes(eui)
+ if len(self.eui) != self.byte_len:
+ raise dns.exception.FormError(
+ f"EUI{self.byte_len * 8} rdata has to have {self.byte_len} bytes"
+ )
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ return dns.rdata._hexify(self.eui, chunksize=2, separator=b"-", **kw)
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ text = tok.get_string()
+ if len(text) != cls.text_len:
+ raise dns.exception.SyntaxError(
+ f"Input text must have {cls.text_len} characters"
+ )
+ for i in range(2, cls.byte_len * 3 - 1, 3):
+ if text[i] != "-":
+ raise dns.exception.SyntaxError(f"Dash expected at position {i}")
+ text = text.replace("-", "")
+ try:
+ data = binascii.unhexlify(text.encode())
+ except (ValueError, TypeError) as ex:
+ raise dns.exception.SyntaxError(f"Hex decoding error: {str(ex)}")
+ return cls(rdclass, rdtype, data)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(self.eui)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ eui = parser.get_bytes(cls.byte_len)
+ return cls(rdclass, rdtype, eui)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/mxbase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/mxbase.py
new file mode 100644
index 0000000..5d33e61
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/mxbase.py
@@ -0,0 +1,87 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""MX-like base classes."""
+
+import struct
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdtypes.util
+
+
+@dns.immutable.immutable
+class MXBase(dns.rdata.Rdata):
+ """Base class for rdata that is like an MX record."""
+
+ __slots__ = ["preference", "exchange"]
+
+ def __init__(self, rdclass, rdtype, preference, exchange):
+ super().__init__(rdclass, rdtype)
+ self.preference = self._as_uint16(preference)
+ self.exchange = self._as_name(exchange)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ exchange = self.exchange.choose_relativity(origin, relativize)
+ return f"{self.preference} {exchange}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ preference = tok.get_uint16()
+ exchange = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, preference, exchange)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ pref = struct.pack("!H", self.preference)
+ file.write(pref)
+ self.exchange.to_wire(file, compress, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ preference = parser.get_uint16()
+ exchange = parser.get_name(origin)
+ return cls(rdclass, rdtype, preference, exchange)
+
+ def _processing_priority(self):
+ return self.preference
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ return dns.rdtypes.util.priority_processing_order(iterable)
+
+
+@dns.immutable.immutable
+class UncompressedMX(MXBase):
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when converted to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ super()._to_wire(file, None, origin, False)
+
+
+@dns.immutable.immutable
+class UncompressedDowncasingMX(MXBase):
+ """Base class for rdata that is like an MX record, but whose name
+ is not compressed when convert to DNS wire format."""
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ super()._to_wire(file, None, origin, canonicalize)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/nsbase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/nsbase.py
new file mode 100644
index 0000000..904224f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/nsbase.py
@@ -0,0 +1,63 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""NS-like base classes."""
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+
+
+@dns.immutable.immutable
+class NSBase(dns.rdata.Rdata):
+ """Base class for rdata that is like an NS record."""
+
+ __slots__ = ["target"]
+
+ def __init__(self, rdclass, rdtype, target):
+ super().__init__(rdclass, rdtype)
+ self.target = self._as_name(target)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ return str(target)
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ target = tok.get_name(origin, relativize, relativize_to)
+ return cls(rdclass, rdtype, target)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.target.to_wire(file, compress, origin, canonicalize)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ target = parser.get_name(origin)
+ return cls(rdclass, rdtype, target)
+
+
+@dns.immutable.immutable
+class UncompressedNS(NSBase):
+ """Base class for rdata that is like an NS record, but whose name
+ is not compressed when convert to DNS wire format, and whose
+ digestable form is not downcased."""
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ self.target.to_wire(file, None, origin, False)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/svcbbase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/svcbbase.py
new file mode 100644
index 0000000..7338b66
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/svcbbase.py
@@ -0,0 +1,587 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import base64
+import enum
+import struct
+from typing import Any, Dict
+
+import dns.enum
+import dns.exception
+import dns.immutable
+import dns.ipv4
+import dns.ipv6
+import dns.name
+import dns.rdata
+import dns.rdtypes.util
+import dns.renderer
+import dns.tokenizer
+import dns.wire
+
+# Until there is an RFC, this module is experimental and may be changed in
+# incompatible ways.
+
+
+class UnknownParamKey(dns.exception.DNSException):
+ """Unknown SVCB ParamKey"""
+
+
+class ParamKey(dns.enum.IntEnum):
+ """SVCB ParamKey"""
+
+ MANDATORY = 0
+ ALPN = 1
+ NO_DEFAULT_ALPN = 2
+ PORT = 3
+ IPV4HINT = 4
+ ECH = 5
+ IPV6HINT = 6
+ DOHPATH = 7
+ OHTTP = 8
+
+ @classmethod
+ def _maximum(cls):
+ return 65535
+
+ @classmethod
+ def _short_name(cls):
+ return "SVCBParamKey"
+
+ @classmethod
+ def _prefix(cls):
+ return "KEY"
+
+ @classmethod
+ def _unknown_exception_class(cls):
+ return UnknownParamKey
+
+
+class Emptiness(enum.IntEnum):
+ NEVER = 0
+ ALWAYS = 1
+ ALLOWED = 2
+
+
+def _validate_key(key):
+ force_generic = False
+ if isinstance(key, bytes):
+ # We decode to latin-1 so we get 0-255 as valid and do NOT interpret
+ # UTF-8 sequences
+ key = key.decode("latin-1")
+ if isinstance(key, str):
+ if key.lower().startswith("key"):
+ force_generic = True
+ if key[3:].startswith("0") and len(key) != 4:
+ # key has leading zeros
+ raise ValueError("leading zeros in key")
+ key = key.replace("-", "_")
+ return (ParamKey.make(key), force_generic)
+
+
+def key_to_text(key):
+ return ParamKey.to_text(key).replace("_", "-").lower()
+
+
+# Like rdata escapify, but escapes ',' too.
+
+_escaped = b'",\\'
+
+
+def _escapify(qstring):
+ text = ""
+ for c in qstring:
+ if c in _escaped:
+ text += "\\" + chr(c)
+ elif c >= 0x20 and c < 0x7F:
+ text += chr(c)
+ else:
+ text += f"\\{c:03d}"
+ return text
+
+
+def _unescape(value: str) -> bytes:
+ if value == "":
+ return b""
+ unescaped = b""
+ l = len(value)
+ i = 0
+ while i < l:
+ c = value[i]
+ i += 1
+ if c == "\\":
+ if i >= l: # pragma: no cover (can't happen via tokenizer get())
+ raise dns.exception.UnexpectedEnd
+ c = value[i]
+ i += 1
+ if c.isdigit():
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c2 = value[i]
+ i += 1
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c3 = value[i]
+ i += 1
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
+ if codepoint > 255:
+ raise dns.exception.SyntaxError
+ unescaped += b"%c" % (codepoint)
+ continue
+ unescaped += c.encode()
+ return unescaped
+
+
+def _split(value):
+ l = len(value)
+ i = 0
+ items = []
+ unescaped = b""
+ while i < l:
+ c = value[i]
+ i += 1
+ if c == ord("\\"):
+ if i >= l: # pragma: no cover (can't happen via tokenizer get())
+ raise dns.exception.UnexpectedEnd
+ c = value[i]
+ i += 1
+ unescaped += b"%c" % (c)
+ elif c == ord(","):
+ items.append(unescaped)
+ unescaped = b""
+ else:
+ unescaped += b"%c" % (c)
+ items.append(unescaped)
+ return items
+
+
+@dns.immutable.immutable
+class Param:
+ """Abstract base class for SVCB parameters"""
+
+ @classmethod
+ def emptiness(cls) -> Emptiness:
+ return Emptiness.NEVER
+
+
+@dns.immutable.immutable
+class GenericParam(Param):
+ """Generic SVCB parameter"""
+
+ def __init__(self, value):
+ self.value = dns.rdata.Rdata._as_bytes(value, True)
+
+ @classmethod
+ def emptiness(cls):
+ return Emptiness.ALLOWED
+
+ @classmethod
+ def from_value(cls, value):
+ if value is None or len(value) == 0:
+ return None
+ else:
+ return cls(_unescape(value))
+
+ def to_text(self):
+ return '"' + dns.rdata._escapify(self.value) + '"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ value = parser.get_bytes(parser.remaining())
+ if len(value) == 0:
+ return None
+ else:
+ return cls(value)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ file.write(self.value)
+
+
+@dns.immutable.immutable
+class MandatoryParam(Param):
+ def __init__(self, keys):
+ # check for duplicates
+ keys = sorted([_validate_key(key)[0] for key in keys])
+ prior_k = None
+ for k in keys:
+ if k == prior_k:
+ raise ValueError(f"duplicate key {k:d}")
+ prior_k = k
+ if k == ParamKey.MANDATORY:
+ raise ValueError("listed the mandatory key as mandatory")
+ self.keys = tuple(keys)
+
+ @classmethod
+ def from_value(cls, value):
+ keys = [k.encode() for k in value.split(",")]
+ return cls(keys)
+
+ def to_text(self):
+ return '"' + ",".join([key_to_text(key) for key in self.keys]) + '"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ keys = []
+ last_key = -1
+ while parser.remaining() > 0:
+ key = parser.get_uint16()
+ if key < last_key:
+ raise dns.exception.FormError("manadatory keys not ascending")
+ last_key = key
+ keys.append(key)
+ return cls(keys)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ for key in self.keys:
+ file.write(struct.pack("!H", key))
+
+
+@dns.immutable.immutable
+class ALPNParam(Param):
+ def __init__(self, ids):
+ self.ids = dns.rdata.Rdata._as_tuple(
+ ids, lambda x: dns.rdata.Rdata._as_bytes(x, True, 255, False)
+ )
+
+ @classmethod
+ def from_value(cls, value):
+ return cls(_split(_unescape(value)))
+
+ def to_text(self):
+ value = ",".join([_escapify(id) for id in self.ids])
+ return '"' + dns.rdata._escapify(value.encode()) + '"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ ids = []
+ while parser.remaining() > 0:
+ id = parser.get_counted_bytes()
+ ids.append(id)
+ return cls(ids)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ for id in self.ids:
+ file.write(struct.pack("!B", len(id)))
+ file.write(id)
+
+
+@dns.immutable.immutable
+class NoDefaultALPNParam(Param):
+ # We don't ever expect to instantiate this class, but we need
+ # a from_value() and a from_wire_parser(), so we just return None
+ # from the class methods when things are OK.
+
+ @classmethod
+ def emptiness(cls):
+ return Emptiness.ALWAYS
+
+ @classmethod
+ def from_value(cls, value):
+ if value is None or value == "":
+ return None
+ else:
+ raise ValueError("no-default-alpn with non-empty value")
+
+ def to_text(self):
+ raise NotImplementedError # pragma: no cover
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ if parser.remaining() != 0:
+ raise dns.exception.FormError
+ return None
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ raise NotImplementedError # pragma: no cover
+
+
+@dns.immutable.immutable
+class PortParam(Param):
+ def __init__(self, port):
+ self.port = dns.rdata.Rdata._as_uint16(port)
+
+ @classmethod
+ def from_value(cls, value):
+ value = int(value)
+ return cls(value)
+
+ def to_text(self):
+ return f'"{self.port}"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ port = parser.get_uint16()
+ return cls(port)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ file.write(struct.pack("!H", self.port))
+
+
+@dns.immutable.immutable
+class IPv4HintParam(Param):
+ def __init__(self, addresses):
+ self.addresses = dns.rdata.Rdata._as_tuple(
+ addresses, dns.rdata.Rdata._as_ipv4_address
+ )
+
+ @classmethod
+ def from_value(cls, value):
+ addresses = value.split(",")
+ return cls(addresses)
+
+ def to_text(self):
+ return '"' + ",".join(self.addresses) + '"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ addresses = []
+ while parser.remaining() > 0:
+ ip = parser.get_bytes(4)
+ addresses.append(dns.ipv4.inet_ntoa(ip))
+ return cls(addresses)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ for address in self.addresses:
+ file.write(dns.ipv4.inet_aton(address))
+
+
+@dns.immutable.immutable
+class IPv6HintParam(Param):
+ def __init__(self, addresses):
+ self.addresses = dns.rdata.Rdata._as_tuple(
+ addresses, dns.rdata.Rdata._as_ipv6_address
+ )
+
+ @classmethod
+ def from_value(cls, value):
+ addresses = value.split(",")
+ return cls(addresses)
+
+ def to_text(self):
+ return '"' + ",".join(self.addresses) + '"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ addresses = []
+ while parser.remaining() > 0:
+ ip = parser.get_bytes(16)
+ addresses.append(dns.ipv6.inet_ntoa(ip))
+ return cls(addresses)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ for address in self.addresses:
+ file.write(dns.ipv6.inet_aton(address))
+
+
+@dns.immutable.immutable
+class ECHParam(Param):
+ def __init__(self, ech):
+ self.ech = dns.rdata.Rdata._as_bytes(ech, True)
+
+ @classmethod
+ def from_value(cls, value):
+ if "\\" in value:
+ raise ValueError("escape in ECH value")
+ value = base64.b64decode(value.encode())
+ return cls(value)
+
+ def to_text(self):
+ b64 = base64.b64encode(self.ech).decode("ascii")
+ return f'"{b64}"'
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ value = parser.get_bytes(parser.remaining())
+ return cls(value)
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ file.write(self.ech)
+
+
+@dns.immutable.immutable
+class OHTTPParam(Param):
+ # We don't ever expect to instantiate this class, but we need
+ # a from_value() and a from_wire_parser(), so we just return None
+ # from the class methods when things are OK.
+
+ @classmethod
+ def emptiness(cls):
+ return Emptiness.ALWAYS
+
+ @classmethod
+ def from_value(cls, value):
+ if value is None or value == "":
+ return None
+ else:
+ raise ValueError("ohttp with non-empty value")
+
+ def to_text(self):
+ raise NotImplementedError # pragma: no cover
+
+ @classmethod
+ def from_wire_parser(cls, parser, origin=None): # pylint: disable=W0613
+ if parser.remaining() != 0:
+ raise dns.exception.FormError
+ return None
+
+ def to_wire(self, file, origin=None): # pylint: disable=W0613
+ raise NotImplementedError # pragma: no cover
+
+
+_class_for_key: Dict[ParamKey, Any] = {
+ ParamKey.MANDATORY: MandatoryParam,
+ ParamKey.ALPN: ALPNParam,
+ ParamKey.NO_DEFAULT_ALPN: NoDefaultALPNParam,
+ ParamKey.PORT: PortParam,
+ ParamKey.IPV4HINT: IPv4HintParam,
+ ParamKey.ECH: ECHParam,
+ ParamKey.IPV6HINT: IPv6HintParam,
+ ParamKey.OHTTP: OHTTPParam,
+}
+
+
+def _validate_and_define(params, key, value):
+ (key, force_generic) = _validate_key(_unescape(key))
+ if key in params:
+ raise SyntaxError(f'duplicate key "{key:d}"')
+ cls = _class_for_key.get(key, GenericParam)
+ emptiness = cls.emptiness()
+ if value is None:
+ if emptiness == Emptiness.NEVER:
+ raise SyntaxError("value cannot be empty")
+ value = cls.from_value(value)
+ else:
+ if force_generic:
+ value = cls.from_wire_parser(dns.wire.Parser(_unescape(value)))
+ else:
+ value = cls.from_value(value)
+ params[key] = value
+
+
+@dns.immutable.immutable
+class SVCBBase(dns.rdata.Rdata):
+ """Base class for SVCB-like records"""
+
+ # see: draft-ietf-dnsop-svcb-https-11
+
+ __slots__ = ["priority", "target", "params"]
+
+ def __init__(self, rdclass, rdtype, priority, target, params):
+ super().__init__(rdclass, rdtype)
+ self.priority = self._as_uint16(priority)
+ self.target = self._as_name(target)
+ for k, v in params.items():
+ k = ParamKey.make(k)
+ if not isinstance(v, Param) and v is not None:
+ raise ValueError(f"{k:d} not a Param")
+ self.params = dns.immutable.Dict(params)
+ # Make sure any parameter listed as mandatory is present in the
+ # record.
+ mandatory = params.get(ParamKey.MANDATORY)
+ if mandatory:
+ for key in mandatory.keys:
+ # Note we have to say "not in" as we have None as a value
+ # so a get() and a not None test would be wrong.
+ if key not in params:
+ raise ValueError(f"key {key:d} declared mandatory but not present")
+ # The no-default-alpn parameter requires the alpn parameter.
+ if ParamKey.NO_DEFAULT_ALPN in params:
+ if ParamKey.ALPN not in params:
+ raise ValueError("no-default-alpn present, but alpn missing")
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ target = self.target.choose_relativity(origin, relativize)
+ params = []
+ for key in sorted(self.params.keys()):
+ value = self.params[key]
+ if value is None:
+ params.append(key_to_text(key))
+ else:
+ kv = key_to_text(key) + "=" + value.to_text()
+ params.append(kv)
+ if len(params) > 0:
+ space = " "
+ else:
+ space = ""
+ return f"{self.priority} {target}{space}{' '.join(params)}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ priority = tok.get_uint16()
+ target = tok.get_name(origin, relativize, relativize_to)
+ if priority == 0:
+ token = tok.get()
+ if not token.is_eol_or_eof():
+ raise SyntaxError("parameters in AliasMode")
+ tok.unget(token)
+ params = {}
+ while True:
+ token = tok.get()
+ if token.is_eol_or_eof():
+ tok.unget(token)
+ break
+ if token.ttype != dns.tokenizer.IDENTIFIER:
+ raise SyntaxError("parameter is not an identifier")
+ equals = token.value.find("=")
+ if equals == len(token.value) - 1:
+ # 'key=', so next token should be a quoted string without
+ # any intervening whitespace.
+ key = token.value[:-1]
+ token = tok.get(want_leading=True)
+ if token.ttype != dns.tokenizer.QUOTED_STRING:
+ raise SyntaxError("whitespace after =")
+ value = token.value
+ elif equals > 0:
+ # key=value
+ key = token.value[:equals]
+ value = token.value[equals + 1 :]
+ elif equals == 0:
+ # =key
+ raise SyntaxError('parameter cannot start with "="')
+ else:
+ # key
+ key = token.value
+ value = None
+ _validate_and_define(params, key, value)
+ return cls(rdclass, rdtype, priority, target, params)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ file.write(struct.pack("!H", self.priority))
+ self.target.to_wire(file, None, origin, False)
+ for key in sorted(self.params):
+ file.write(struct.pack("!H", key))
+ value = self.params[key]
+ with dns.renderer.prefixed_length(file, 2):
+ # Note that we're still writing a length of zero if the value is None
+ if value is not None:
+ value.to_wire(file, origin)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ priority = parser.get_uint16()
+ target = parser.get_name(origin)
+ if priority == 0 and parser.remaining() != 0:
+ raise dns.exception.FormError("parameters in AliasMode")
+ params = {}
+ prior_key = -1
+ while parser.remaining() > 0:
+ key = parser.get_uint16()
+ if key < prior_key:
+ raise dns.exception.FormError("keys not in order")
+ prior_key = key
+ vlen = parser.get_uint16()
+ pkey = ParamKey.make(key)
+ pcls = _class_for_key.get(pkey, GenericParam)
+ with parser.restrict_to(vlen):
+ value = pcls.from_wire_parser(parser, origin)
+ params[pkey] = value
+ return cls(rdclass, rdtype, priority, target, params)
+
+ def _processing_priority(self):
+ return self.priority
+
+ @classmethod
+ def _processing_order(cls, iterable):
+ return dns.rdtypes.util.priority_processing_order(iterable)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/tlsabase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/tlsabase.py
new file mode 100644
index 0000000..ddc196f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/tlsabase.py
@@ -0,0 +1,69 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2005-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import binascii
+import struct
+
+import dns.immutable
+import dns.rdata
+import dns.rdatatype
+
+
+@dns.immutable.immutable
+class TLSABase(dns.rdata.Rdata):
+ """Base class for TLSA and SMIMEA records"""
+
+ # see: RFC 6698
+
+ __slots__ = ["usage", "selector", "mtype", "cert"]
+
+ def __init__(self, rdclass, rdtype, usage, selector, mtype, cert):
+ super().__init__(rdclass, rdtype)
+ self.usage = self._as_uint8(usage)
+ self.selector = self._as_uint8(selector)
+ self.mtype = self._as_uint8(mtype)
+ self.cert = self._as_bytes(cert)
+
+ def to_text(self, origin=None, relativize=True, **kw):
+ kw = kw.copy()
+ chunksize = kw.pop("chunksize", 128)
+ cert = dns.rdata._hexify(
+ self.cert, chunksize=chunksize, **kw # pyright: ignore
+ )
+ return f"{self.usage} {self.selector} {self.mtype} {cert}"
+
+ @classmethod
+ def from_text(
+ cls, rdclass, rdtype, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ usage = tok.get_uint8()
+ selector = tok.get_uint8()
+ mtype = tok.get_uint8()
+ cert = tok.concatenate_remaining_identifiers().encode()
+ cert = binascii.unhexlify(cert)
+ return cls(rdclass, rdtype, usage, selector, mtype, cert)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ header = struct.pack("!BBB", self.usage, self.selector, self.mtype)
+ file.write(header)
+ file.write(self.cert)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ header = parser.get_struct("BBB")
+ cert = parser.get_remaining()
+ return cls(rdclass, rdtype, header[0], header[1], header[2], cert)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/txtbase.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/txtbase.py
new file mode 100644
index 0000000..5e5b24f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/txtbase.py
@@ -0,0 +1,109 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""TXT-like base class."""
+
+from typing import Any, Dict, Iterable, Tuple
+
+import dns.exception
+import dns.immutable
+import dns.name
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.renderer
+import dns.tokenizer
+
+
+@dns.immutable.immutable
+class TXTBase(dns.rdata.Rdata):
+ """Base class for rdata that is like a TXT record (see RFC 1035)."""
+
+ __slots__ = ["strings"]
+
+ def __init__(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ strings: Iterable[bytes | str],
+ ):
+ """Initialize a TXT-like rdata.
+
+ *rdclass*, an ``int`` is the rdataclass of the Rdata.
+
+ *rdtype*, an ``int`` is the rdatatype of the Rdata.
+
+ *strings*, a tuple of ``bytes``
+ """
+ super().__init__(rdclass, rdtype)
+ self.strings: Tuple[bytes] = self._as_tuple(
+ strings, lambda x: self._as_bytes(x, True, 255)
+ )
+ if len(self.strings) == 0:
+ raise ValueError("the list of strings must not be empty")
+
+ def to_text(
+ self,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ **kw: Dict[str, Any],
+ ) -> str:
+ txt = ""
+ prefix = ""
+ for s in self.strings:
+ txt += f'{prefix}"{dns.rdata._escapify(s)}"'
+ prefix = " "
+ return txt
+
+ @classmethod
+ def from_text(
+ cls,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ tok: dns.tokenizer.Tokenizer,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+ ) -> dns.rdata.Rdata:
+ strings = []
+ for token in tok.get_remaining():
+ token = token.unescape_to_bytes()
+ # The 'if' below is always true in the current code, but we
+ # are leaving this check in in case things change some day.
+ if not (
+ token.is_quoted_string() or token.is_identifier()
+ ): # pragma: no cover
+ raise dns.exception.SyntaxError("expected a string")
+ if len(token.value) > 255:
+ raise dns.exception.SyntaxError("string too long")
+ strings.append(token.value)
+ if len(strings) == 0:
+ raise dns.exception.UnexpectedEnd
+ return cls(rdclass, rdtype, strings)
+
+ def _to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ for s in self.strings:
+ with dns.renderer.prefixed_length(file, 1):
+ file.write(s)
+
+ @classmethod
+ def from_wire_parser(cls, rdclass, rdtype, parser, origin=None):
+ strings = []
+ while parser.remaining() > 0:
+ s = parser.get_counted_bytes()
+ strings.append(s)
+ return cls(rdclass, rdtype, strings)
diff --git a/tapdown/lib/python3.11/site-packages/dns/rdtypes/util.py b/tapdown/lib/python3.11/site-packages/dns/rdtypes/util.py
new file mode 100644
index 0000000..c17b154
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rdtypes/util.py
@@ -0,0 +1,269 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import collections
+import random
+import struct
+from typing import Any, Iterable, List, Tuple
+
+import dns.exception
+import dns.ipv4
+import dns.ipv6
+import dns.name
+import dns.rdata
+import dns.rdatatype
+import dns.tokenizer
+import dns.wire
+
+
+class Gateway:
+ """A helper class for the IPSECKEY gateway and AMTRELAY relay fields"""
+
+ name = ""
+
+ def __init__(self, type: Any, gateway: str | dns.name.Name | None = None):
+ self.type = dns.rdata.Rdata._as_uint8(type)
+ self.gateway = gateway
+ self._check()
+
+ @classmethod
+ def _invalid_type(cls, gateway_type):
+ return f"invalid {cls.name} type: {gateway_type}"
+
+ def _check(self):
+ if self.type == 0:
+ if self.gateway not in (".", None):
+ raise SyntaxError(f"invalid {self.name} for type 0")
+ self.gateway = None
+ elif self.type == 1:
+ # check that it's OK
+ assert isinstance(self.gateway, str)
+ dns.ipv4.inet_aton(self.gateway)
+ elif self.type == 2:
+ # check that it's OK
+ assert isinstance(self.gateway, str)
+ dns.ipv6.inet_aton(self.gateway)
+ elif self.type == 3:
+ if not isinstance(self.gateway, dns.name.Name):
+ raise SyntaxError(f"invalid {self.name}; not a name")
+ else:
+ raise SyntaxError(self._invalid_type(self.type))
+
+ def to_text(self, origin=None, relativize=True):
+ if self.type == 0:
+ return "."
+ elif self.type in (1, 2):
+ return self.gateway
+ elif self.type == 3:
+ assert isinstance(self.gateway, dns.name.Name)
+ return str(self.gateway.choose_relativity(origin, relativize))
+ else:
+ raise ValueError(self._invalid_type(self.type)) # pragma: no cover
+
+ @classmethod
+ def from_text(
+ cls, gateway_type, tok, origin=None, relativize=True, relativize_to=None
+ ):
+ if gateway_type in (0, 1, 2):
+ gateway = tok.get_string()
+ elif gateway_type == 3:
+ gateway = tok.get_name(origin, relativize, relativize_to)
+ else:
+ raise dns.exception.SyntaxError(
+ cls._invalid_type(gateway_type)
+ ) # pragma: no cover
+ return cls(gateway_type, gateway)
+
+ # pylint: disable=unused-argument
+ def to_wire(self, file, compress=None, origin=None, canonicalize=False):
+ if self.type == 0:
+ pass
+ elif self.type == 1:
+ assert isinstance(self.gateway, str)
+ file.write(dns.ipv4.inet_aton(self.gateway))
+ elif self.type == 2:
+ assert isinstance(self.gateway, str)
+ file.write(dns.ipv6.inet_aton(self.gateway))
+ elif self.type == 3:
+ assert isinstance(self.gateway, dns.name.Name)
+ self.gateway.to_wire(file, None, origin, False)
+ else:
+ raise ValueError(self._invalid_type(self.type)) # pragma: no cover
+
+ # pylint: enable=unused-argument
+
+ @classmethod
+ def from_wire_parser(cls, gateway_type, parser, origin=None):
+ if gateway_type == 0:
+ gateway = None
+ elif gateway_type == 1:
+ gateway = dns.ipv4.inet_ntoa(parser.get_bytes(4))
+ elif gateway_type == 2:
+ gateway = dns.ipv6.inet_ntoa(parser.get_bytes(16))
+ elif gateway_type == 3:
+ gateway = parser.get_name(origin)
+ else:
+ raise dns.exception.FormError(cls._invalid_type(gateway_type))
+ return cls(gateway_type, gateway)
+
+
+class Bitmap:
+ """A helper class for the NSEC/NSEC3/CSYNC type bitmaps"""
+
+ type_name = ""
+
+ def __init__(self, windows: Iterable[Tuple[int, bytes]] | None = None):
+ last_window = -1
+ if windows is None:
+ windows = []
+ self.windows = windows
+ for window, bitmap in self.windows:
+ if not isinstance(window, int):
+ raise ValueError(f"bad {self.type_name} window type")
+ if window <= last_window:
+ raise ValueError(f"bad {self.type_name} window order")
+ if window > 256:
+ raise ValueError(f"bad {self.type_name} window number")
+ last_window = window
+ if not isinstance(bitmap, bytes):
+ raise ValueError(f"bad {self.type_name} octets type")
+ if len(bitmap) == 0 or len(bitmap) > 32:
+ raise ValueError(f"bad {self.type_name} octets")
+
+ def to_text(self) -> str:
+ text = ""
+ for window, bitmap in self.windows:
+ bits = []
+ for i, byte in enumerate(bitmap):
+ for j in range(0, 8):
+ if byte & (0x80 >> j):
+ rdtype = dns.rdatatype.RdataType.make(window * 256 + i * 8 + j)
+ bits.append(dns.rdatatype.to_text(rdtype))
+ text += " " + " ".join(bits)
+ return text
+
+ @classmethod
+ def from_text(cls, tok: "dns.tokenizer.Tokenizer") -> "Bitmap":
+ rdtypes = []
+ for token in tok.get_remaining():
+ rdtype = dns.rdatatype.from_text(token.unescape().value)
+ if rdtype == 0:
+ raise dns.exception.SyntaxError(f"{cls.type_name} with bit 0")
+ rdtypes.append(rdtype)
+ return cls.from_rdtypes(rdtypes)
+
+ @classmethod
+ def from_rdtypes(cls, rdtypes: List[dns.rdatatype.RdataType]) -> "Bitmap":
+ rdtypes = sorted(rdtypes)
+ window = 0
+ octets = 0
+ prior_rdtype = 0
+ bitmap = bytearray(b"\0" * 32)
+ windows = []
+ for rdtype in rdtypes:
+ if rdtype == prior_rdtype:
+ continue
+ prior_rdtype = rdtype
+ new_window = rdtype // 256
+ if new_window != window:
+ if octets != 0:
+ windows.append((window, bytes(bitmap[0:octets])))
+ bitmap = bytearray(b"\0" * 32)
+ window = new_window
+ offset = rdtype % 256
+ byte = offset // 8
+ bit = offset % 8
+ octets = byte + 1
+ bitmap[byte] = bitmap[byte] | (0x80 >> bit)
+ if octets != 0:
+ windows.append((window, bytes(bitmap[0:octets])))
+ return cls(windows)
+
+ def to_wire(self, file: Any) -> None:
+ for window, bitmap in self.windows:
+ file.write(struct.pack("!BB", window, len(bitmap)))
+ file.write(bitmap)
+
+ @classmethod
+ def from_wire_parser(cls, parser: "dns.wire.Parser") -> "Bitmap":
+ windows = []
+ while parser.remaining() > 0:
+ window = parser.get_uint8()
+ bitmap = parser.get_counted_bytes()
+ windows.append((window, bitmap))
+ return cls(windows)
+
+
+def _priority_table(items):
+ by_priority = collections.defaultdict(list)
+ for rdata in items:
+ by_priority[rdata._processing_priority()].append(rdata)
+ return by_priority
+
+
+def priority_processing_order(iterable):
+ items = list(iterable)
+ if len(items) == 1:
+ return items
+ by_priority = _priority_table(items)
+ ordered = []
+ for k in sorted(by_priority.keys()):
+ rdatas = by_priority[k]
+ random.shuffle(rdatas)
+ ordered.extend(rdatas)
+ return ordered
+
+
+_no_weight = 0.1
+
+
+def weighted_processing_order(iterable):
+ items = list(iterable)
+ if len(items) == 1:
+ return items
+ by_priority = _priority_table(items)
+ ordered = []
+ for k in sorted(by_priority.keys()):
+ rdatas = by_priority[k]
+ total = sum(rdata._processing_weight() or _no_weight for rdata in rdatas)
+ while len(rdatas) > 1:
+ r = random.uniform(0, total)
+ for n, rdata in enumerate(rdatas): # noqa: B007
+ weight = rdata._processing_weight() or _no_weight
+ if weight > r:
+ break
+ r -= weight
+ total -= weight # pyright: ignore[reportPossiblyUnboundVariable]
+ # pylint: disable=undefined-loop-variable
+ ordered.append(rdata) # pyright: ignore[reportPossiblyUnboundVariable]
+ del rdatas[n] # pyright: ignore[reportPossiblyUnboundVariable]
+ ordered.append(rdatas[0])
+ return ordered
+
+
+def parse_formatted_hex(formatted, num_chunks, chunk_size, separator):
+ if len(formatted) != num_chunks * (chunk_size + 1) - 1:
+ raise ValueError("invalid formatted hex string")
+ value = b""
+ for _ in range(num_chunks):
+ chunk = formatted[0:chunk_size]
+ value += int(chunk, 16).to_bytes(chunk_size // 2, "big")
+ formatted = formatted[chunk_size:]
+ if len(formatted) > 0 and formatted[0] != separator:
+ raise ValueError("invalid formatted hex string")
+ formatted = formatted[1:]
+ return value
diff --git a/tapdown/lib/python3.11/site-packages/dns/renderer.py b/tapdown/lib/python3.11/site-packages/dns/renderer.py
new file mode 100644
index 0000000..cc912b2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/renderer.py
@@ -0,0 +1,355 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Help for building DNS wire format messages"""
+
+import contextlib
+import io
+import random
+import struct
+import time
+
+import dns.edns
+import dns.exception
+import dns.rdataclass
+import dns.rdatatype
+import dns.tsig
+
+# Note we can't import dns.message for cicularity reasons
+
+QUESTION = 0
+ANSWER = 1
+AUTHORITY = 2
+ADDITIONAL = 3
+
+
+@contextlib.contextmanager
+def prefixed_length(output, length_length):
+ output.write(b"\00" * length_length)
+ start = output.tell()
+ yield
+ end = output.tell()
+ length = end - start
+ if length > 0:
+ try:
+ output.seek(start - length_length)
+ try:
+ output.write(length.to_bytes(length_length, "big"))
+ except OverflowError:
+ raise dns.exception.FormError
+ finally:
+ output.seek(end)
+
+
+class Renderer:
+ """Helper class for building DNS wire-format messages.
+
+ Most applications can use the higher-level L{dns.message.Message}
+ class and its to_wire() method to generate wire-format messages.
+ This class is for those applications which need finer control
+ over the generation of messages.
+
+ Typical use::
+
+ r = dns.renderer.Renderer(id=1, flags=0x80, max_size=512)
+ r.add_question(qname, qtype, qclass)
+ r.add_rrset(dns.renderer.ANSWER, rrset_1)
+ r.add_rrset(dns.renderer.ANSWER, rrset_2)
+ r.add_rrset(dns.renderer.AUTHORITY, ns_rrset)
+ r.add_rrset(dns.renderer.ADDITIONAL, ad_rrset_1)
+ r.add_rrset(dns.renderer.ADDITIONAL, ad_rrset_2)
+ r.add_edns(0, 0, 4096)
+ r.write_header()
+ r.add_tsig(keyname, secret, 300, 1, 0, '', request_mac)
+ wire = r.get_wire()
+
+ If padding is going to be used, then the OPT record MUST be
+ written after everything else in the additional section except for
+ the TSIG (if any).
+
+ output, an io.BytesIO, where rendering is written
+
+ id: the message id
+
+ flags: the message flags
+
+ max_size: the maximum size of the message
+
+ origin: the origin to use when rendering relative names
+
+ compress: the compression table
+
+ section: an int, the section currently being rendered
+
+ counts: list of the number of RRs in each section
+
+ mac: the MAC of the rendered message (if TSIG was used)
+ """
+
+ def __init__(self, id=None, flags=0, max_size=65535, origin=None):
+ """Initialize a new renderer."""
+
+ self.output = io.BytesIO()
+ if id is None:
+ self.id = random.randint(0, 65535)
+ else:
+ self.id = id
+ self.flags = flags
+ self.max_size = max_size
+ self.origin = origin
+ self.compress = {}
+ self.section = QUESTION
+ self.counts = [0, 0, 0, 0]
+ self.output.write(b"\x00" * 12)
+ self.mac = ""
+ self.reserved = 0
+ self.was_padded = False
+
+ def _rollback(self, where):
+ """Truncate the output buffer at offset *where*, and remove any
+ compression table entries that pointed beyond the truncation
+ point.
+ """
+
+ self.output.seek(where)
+ self.output.truncate()
+ keys_to_delete = []
+ for k, v in self.compress.items():
+ if v >= where:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.compress[k]
+
+ def _set_section(self, section):
+ """Set the renderer's current section.
+
+ Sections must be rendered order: QUESTION, ANSWER, AUTHORITY,
+ ADDITIONAL. Sections may be empty.
+
+ Raises dns.exception.FormError if an attempt was made to set
+ a section value less than the current section.
+ """
+
+ if self.section != section:
+ if self.section > section:
+ raise dns.exception.FormError
+ self.section = section
+
+ @contextlib.contextmanager
+ def _track_size(self):
+ start = self.output.tell()
+ yield start
+ if self.output.tell() > self.max_size:
+ self._rollback(start)
+ raise dns.exception.TooBig
+
+ @contextlib.contextmanager
+ def _temporarily_seek_to(self, where):
+ current = self.output.tell()
+ try:
+ self.output.seek(where)
+ yield
+ finally:
+ self.output.seek(current)
+
+ def add_question(self, qname, rdtype, rdclass=dns.rdataclass.IN):
+ """Add a question to the message."""
+
+ self._set_section(QUESTION)
+ with self._track_size():
+ qname.to_wire(self.output, self.compress, self.origin)
+ self.output.write(struct.pack("!HH", rdtype, rdclass))
+ self.counts[QUESTION] += 1
+
+ def add_rrset(self, section, rrset, **kw):
+ """Add the rrset to the specified section.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+ """
+
+ self._set_section(section)
+ with self._track_size():
+ n = rrset.to_wire(self.output, self.compress, self.origin, **kw)
+ self.counts[section] += n
+
+ def add_rdataset(self, section, name, rdataset, **kw):
+ """Add the rdataset to the specified section, using the specified
+ name as the owner name.
+
+ Any keyword arguments are passed on to the rdataset's to_wire()
+ routine.
+ """
+
+ self._set_section(section)
+ with self._track_size():
+ n = rdataset.to_wire(name, self.output, self.compress, self.origin, **kw)
+ self.counts[section] += n
+
+ def add_opt(self, opt, pad=0, opt_size=0, tsig_size=0):
+ """Add *opt* to the additional section, applying padding if desired. The
+ padding will take the specified precomputed OPT size and TSIG size into
+ account.
+
+ Note that we don't have reliable way of knowing how big a GSS-TSIG digest
+ might be, so we we might not get an even multiple of the pad in that case."""
+ if pad:
+ ttl = opt.ttl
+ assert opt_size >= 11
+ opt_rdata = opt[0]
+ size_without_padding = self.output.tell() + opt_size + tsig_size
+ remainder = size_without_padding % pad
+ if remainder:
+ pad = b"\x00" * (pad - remainder)
+ else:
+ pad = b""
+ options = list(opt_rdata.options)
+ options.append(dns.edns.GenericOption(dns.edns.OptionType.PADDING, pad))
+ opt = dns.message.Message._make_opt( # pyright: ignore
+ ttl, opt_rdata.rdclass, options
+ )
+ self.was_padded = True
+ self.add_rrset(ADDITIONAL, opt)
+
+ def add_edns(self, edns, ednsflags, payload, options=None):
+ """Add an EDNS OPT record to the message."""
+
+ # make sure the EDNS version in ednsflags agrees with edns
+ ednsflags &= 0xFF00FFFF
+ ednsflags |= edns << 16
+ opt = dns.message.Message._make_opt( # pyright: ignore
+ ednsflags, payload, options
+ )
+ self.add_opt(opt)
+
+ def add_tsig(
+ self,
+ keyname,
+ secret,
+ fudge,
+ id,
+ tsig_error,
+ other_data,
+ request_mac,
+ algorithm=dns.tsig.default_algorithm,
+ ):
+ """Add a TSIG signature to the message."""
+
+ s = self.output.getvalue()
+
+ if isinstance(secret, dns.tsig.Key):
+ key = secret
+ else:
+ key = dns.tsig.Key(keyname, secret, algorithm)
+ tsig = dns.message.Message._make_tsig( # pyright: ignore
+ keyname, algorithm, 0, fudge, b"", id, tsig_error, other_data
+ )
+ (tsig, _) = dns.tsig.sign(s, key, tsig[0], int(time.time()), request_mac)
+ self._write_tsig(tsig, keyname)
+
+ def add_multi_tsig(
+ self,
+ ctx,
+ keyname,
+ secret,
+ fudge,
+ id,
+ tsig_error,
+ other_data,
+ request_mac,
+ algorithm=dns.tsig.default_algorithm,
+ ):
+ """Add a TSIG signature to the message. Unlike add_tsig(), this can be
+ used for a series of consecutive DNS envelopes, e.g. for a zone
+ transfer over TCP [RFC2845, 4.4].
+
+ For the first message in the sequence, give ctx=None. For each
+ subsequent message, give the ctx that was returned from the
+ add_multi_tsig() call for the previous message."""
+
+ s = self.output.getvalue()
+
+ if isinstance(secret, dns.tsig.Key):
+ key = secret
+ else:
+ key = dns.tsig.Key(keyname, secret, algorithm)
+ tsig = dns.message.Message._make_tsig( # pyright: ignore
+ keyname, algorithm, 0, fudge, b"", id, tsig_error, other_data
+ )
+ (tsig, ctx) = dns.tsig.sign(
+ s, key, tsig[0], int(time.time()), request_mac, ctx, True
+ )
+ self._write_tsig(tsig, keyname)
+ return ctx
+
+ def _write_tsig(self, tsig, keyname):
+ if self.was_padded:
+ compress = None
+ else:
+ compress = self.compress
+ self._set_section(ADDITIONAL)
+ with self._track_size():
+ keyname.to_wire(self.output, compress, self.origin)
+ self.output.write(
+ struct.pack("!HHI", dns.rdatatype.TSIG, dns.rdataclass.ANY, 0)
+ )
+ with prefixed_length(self.output, 2):
+ tsig.to_wire(self.output)
+
+ self.counts[ADDITIONAL] += 1
+ with self._temporarily_seek_to(10):
+ self.output.write(struct.pack("!H", self.counts[ADDITIONAL]))
+
+ def write_header(self):
+ """Write the DNS message header.
+
+ Writing the DNS message header is done after all sections
+ have been rendered, but before the optional TSIG signature
+ is added.
+ """
+
+ with self._temporarily_seek_to(0):
+ self.output.write(
+ struct.pack(
+ "!HHHHHH",
+ self.id,
+ self.flags,
+ self.counts[0],
+ self.counts[1],
+ self.counts[2],
+ self.counts[3],
+ )
+ )
+
+ def get_wire(self):
+ """Return the wire format message."""
+
+ return self.output.getvalue()
+
+ def reserve(self, size: int) -> None:
+ """Reserve *size* bytes."""
+ if size < 0:
+ raise ValueError("reserved amount must be non-negative")
+ if size > self.max_size:
+ raise ValueError("cannot reserve more than the maximum size")
+ self.reserved += size
+ self.max_size -= size
+
+ def release_reserved(self) -> None:
+ """Release the reserved bytes."""
+ self.max_size += self.reserved
+ self.reserved = 0
diff --git a/tapdown/lib/python3.11/site-packages/dns/resolver.py b/tapdown/lib/python3.11/site-packages/dns/resolver.py
new file mode 100644
index 0000000..923bb4b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/resolver.py
@@ -0,0 +1,2068 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS stub resolver."""
+
+import contextlib
+import random
+import socket
+import sys
+import threading
+import time
+import warnings
+from typing import Any, Dict, Iterator, List, Sequence, Tuple, cast
+from urllib.parse import urlparse
+
+import dns._ddr
+import dns.edns
+import dns.exception
+import dns.flags
+import dns.inet
+import dns.ipv4
+import dns.ipv6
+import dns.message
+import dns.name
+import dns.nameserver
+import dns.query
+import dns.rcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdtypes.ANY.PTR
+import dns.rdtypes.svcbbase
+import dns.reversename
+import dns.tsig
+
+if sys.platform == "win32": # pragma: no cover
+ import dns.win32util
+
+
+class NXDOMAIN(dns.exception.DNSException):
+ """The DNS query name does not exist."""
+
+ supp_kwargs = {"qnames", "responses"}
+ fmt = None # we have our own __str__ implementation
+
+ # pylint: disable=arguments-differ
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def _check_kwargs(self, qnames, responses=None): # pyright: ignore
+ if not isinstance(qnames, list | tuple | set):
+ raise AttributeError("qnames must be a list, tuple or set")
+ if len(qnames) == 0:
+ raise AttributeError("qnames must contain at least one element")
+ if responses is None:
+ responses = {}
+ elif not isinstance(responses, dict):
+ raise AttributeError("responses must be a dict(qname=response)")
+ kwargs = dict(qnames=qnames, responses=responses)
+ return kwargs
+
+ def __str__(self) -> str:
+ if "qnames" not in self.kwargs:
+ return super().__str__()
+ qnames = self.kwargs["qnames"]
+ if len(qnames) > 1:
+ msg = "None of DNS query names exist"
+ else:
+ msg = "The DNS query name does not exist"
+ qnames = ", ".join(map(str, qnames))
+ return f"{msg}: {qnames}"
+
+ @property
+ def canonical_name(self):
+ """Return the unresolved canonical name."""
+ if "qnames" not in self.kwargs:
+ raise TypeError("parametrized exception required")
+ for qname in self.kwargs["qnames"]:
+ response = self.kwargs["responses"][qname]
+ try:
+ cname = response.canonical_name()
+ if cname != qname:
+ return cname
+ except Exception: # pragma: no cover
+ # We can just eat this exception as it means there was
+ # something wrong with the response.
+ pass
+ return self.kwargs["qnames"][0]
+
+ def __add__(self, e_nx):
+ """Augment by results from another NXDOMAIN exception."""
+ qnames0 = list(self.kwargs.get("qnames", []))
+ responses0 = dict(self.kwargs.get("responses", {}))
+ responses1 = e_nx.kwargs.get("responses", {})
+ for qname1 in e_nx.kwargs.get("qnames", []):
+ if qname1 not in qnames0:
+ qnames0.append(qname1)
+ if qname1 in responses1:
+ responses0[qname1] = responses1[qname1]
+ return NXDOMAIN(qnames=qnames0, responses=responses0)
+
+ def qnames(self):
+ """All of the names that were tried.
+
+ Returns a list of ``dns.name.Name``.
+ """
+ return self.kwargs["qnames"]
+
+ def responses(self):
+ """A map from queried names to their NXDOMAIN responses.
+
+ Returns a dict mapping a ``dns.name.Name`` to a
+ ``dns.message.Message``.
+ """
+ return self.kwargs["responses"]
+
+ def response(self, qname):
+ """The response for query *qname*.
+
+ Returns a ``dns.message.Message``.
+ """
+ return self.kwargs["responses"][qname]
+
+
+class YXDOMAIN(dns.exception.DNSException):
+ """The DNS query name is too long after DNAME substitution."""
+
+
+ErrorTuple = Tuple[
+ str | None,
+ bool,
+ int,
+ Exception | str,
+ dns.message.Message | None,
+]
+
+
+def _errors_to_text(errors: List[ErrorTuple]) -> List[str]:
+ """Turn a resolution errors trace into a list of text."""
+ texts = []
+ for err in errors:
+ texts.append(f"Server {err[0]} answered {err[3]}")
+ return texts
+
+
+class LifetimeTimeout(dns.exception.Timeout):
+ """The resolution lifetime expired."""
+
+ msg = "The resolution lifetime expired."
+ fmt = f"{msg[:-1]} after {{timeout:.3f}} seconds: {{errors}}"
+ supp_kwargs = {"timeout", "errors"}
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def _fmt_kwargs(self, **kwargs):
+ srv_msgs = _errors_to_text(kwargs["errors"])
+ return super()._fmt_kwargs(
+ timeout=kwargs["timeout"], errors="; ".join(srv_msgs)
+ )
+
+
+# We added more detail to resolution timeouts, but they are still
+# subclasses of dns.exception.Timeout for backwards compatibility. We also
+# keep dns.resolver.Timeout defined for backwards compatibility.
+Timeout = LifetimeTimeout
+
+
+class NoAnswer(dns.exception.DNSException):
+ """The DNS response does not contain an answer to the question."""
+
+ fmt = "The DNS response does not contain an answer to the question: {query}"
+ supp_kwargs = {"response"}
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def _fmt_kwargs(self, **kwargs):
+ return super()._fmt_kwargs(query=kwargs["response"].question)
+
+ def response(self):
+ return self.kwargs["response"]
+
+
+class NoNameservers(dns.exception.DNSException):
+ """All nameservers failed to answer the query.
+
+ errors: list of servers and respective errors
+ The type of errors is
+ [(server IP address, any object convertible to string)].
+ Non-empty errors list will add explanatory message ()
+ """
+
+ msg = "All nameservers failed to answer the query."
+ fmt = f"{msg[:-1]} {{query}}: {{errors}}"
+ supp_kwargs = {"request", "errors"}
+
+ # We do this as otherwise mypy complains about unexpected keyword argument
+ # idna_exception
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ def _fmt_kwargs(self, **kwargs):
+ srv_msgs = _errors_to_text(kwargs["errors"])
+ return super()._fmt_kwargs(
+ query=kwargs["request"].question, errors="; ".join(srv_msgs)
+ )
+
+
+class NotAbsolute(dns.exception.DNSException):
+ """An absolute domain name is required but a relative name was provided."""
+
+
+class NoRootSOA(dns.exception.DNSException):
+ """There is no SOA RR at the DNS root name. This should never happen!"""
+
+
+class NoMetaqueries(dns.exception.DNSException):
+ """DNS metaqueries are not allowed."""
+
+
+class NoResolverConfiguration(dns.exception.DNSException):
+ """Resolver configuration could not be read or specified no nameservers."""
+
+
+class Answer:
+ """DNS stub resolver answer.
+
+ Instances of this class bundle up the result of a successful DNS
+ resolution.
+
+ For convenience, the answer object implements much of the sequence
+ protocol, forwarding to its ``rrset`` attribute. E.g.
+ ``for a in answer`` is equivalent to ``for a in answer.rrset``.
+ ``answer[i]`` is equivalent to ``answer.rrset[i]``, and
+ ``answer[i:j]`` is equivalent to ``answer.rrset[i:j]``.
+
+ Note that CNAMEs or DNAMEs in the response may mean that answer
+ RRset's name might not be the query name.
+ """
+
+ def __init__(
+ self,
+ qname: dns.name.Name,
+ rdtype: dns.rdatatype.RdataType,
+ rdclass: dns.rdataclass.RdataClass,
+ response: dns.message.QueryMessage,
+ nameserver: str | None = None,
+ port: int | None = None,
+ ) -> None:
+ self.qname = qname
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.response = response
+ self.nameserver = nameserver
+ self.port = port
+ self.chaining_result = response.resolve_chaining()
+ # Copy some attributes out of chaining_result for backwards
+ # compatibility and convenience.
+ self.canonical_name = self.chaining_result.canonical_name
+ self.rrset = self.chaining_result.answer
+ self.expiration = time.time() + self.chaining_result.minimum_ttl
+
+ def __getattr__(self, attr): # pragma: no cover
+ if self.rrset is not None:
+ if attr == "name":
+ return self.rrset.name
+ elif attr == "ttl":
+ return self.rrset.ttl
+ elif attr == "covers":
+ return self.rrset.covers
+ elif attr == "rdclass":
+ return self.rrset.rdclass
+ elif attr == "rdtype":
+ return self.rrset.rdtype
+ else:
+ raise AttributeError(attr)
+
+ def __len__(self) -> int:
+ return self.rrset is not None and len(self.rrset) or 0
+
+ def __iter__(self) -> Iterator[Any]:
+ return self.rrset is not None and iter(self.rrset) or iter(tuple())
+
+ def __getitem__(self, i):
+ if self.rrset is None:
+ raise IndexError
+ return self.rrset[i]
+
+ def __delitem__(self, i):
+ if self.rrset is None:
+ raise IndexError
+ del self.rrset[i]
+
+
+class Answers(dict):
+ """A dict of DNS stub resolver answers, indexed by type."""
+
+
+class EmptyHostAnswers(dns.exception.DNSException):
+ """The HostAnswers has no addresses"""
+
+
+class HostAnswers(Answers):
+ """A dict of DNS stub resolver answers to a host name lookup, indexed by
+ type.
+ """
+
+ @classmethod
+ def make(
+ cls,
+ v6: Answer | None = None,
+ v4: Answer | None = None,
+ add_empty: bool = True,
+ ) -> "HostAnswers":
+ answers = HostAnswers()
+ if v6 is not None and (add_empty or v6.rrset):
+ answers[dns.rdatatype.AAAA] = v6
+ if v4 is not None and (add_empty or v4.rrset):
+ answers[dns.rdatatype.A] = v4
+ return answers
+
+ # Returns pairs of (address, family) from this result, potentially
+ # filtering by address family.
+ def addresses_and_families(
+ self, family: int = socket.AF_UNSPEC
+ ) -> Iterator[Tuple[str, int]]:
+ if family == socket.AF_UNSPEC:
+ yield from self.addresses_and_families(socket.AF_INET6)
+ yield from self.addresses_and_families(socket.AF_INET)
+ return
+ elif family == socket.AF_INET6:
+ answer = self.get(dns.rdatatype.AAAA)
+ elif family == socket.AF_INET:
+ answer = self.get(dns.rdatatype.A)
+ else: # pragma: no cover
+ raise NotImplementedError(f"unknown address family {family}")
+ if answer:
+ for rdata in answer:
+ yield (rdata.address, family)
+
+ # Returns addresses from this result, potentially filtering by
+ # address family.
+ def addresses(self, family: int = socket.AF_UNSPEC) -> Iterator[str]:
+ return (pair[0] for pair in self.addresses_and_families(family))
+
+ # Returns the canonical name from this result.
+ def canonical_name(self) -> dns.name.Name:
+ answer = self.get(dns.rdatatype.AAAA, self.get(dns.rdatatype.A))
+ if answer is None:
+ raise EmptyHostAnswers
+ return answer.canonical_name
+
+
+class CacheStatistics:
+ """Cache Statistics"""
+
+ def __init__(self, hits: int = 0, misses: int = 0) -> None:
+ self.hits = hits
+ self.misses = misses
+
+ def reset(self) -> None:
+ self.hits = 0
+ self.misses = 0
+
+ def clone(self) -> "CacheStatistics":
+ return CacheStatistics(self.hits, self.misses)
+
+
+class CacheBase:
+ def __init__(self) -> None:
+ self.lock = threading.Lock()
+ self.statistics = CacheStatistics()
+
+ def reset_statistics(self) -> None:
+ """Reset all statistics to zero."""
+ with self.lock:
+ self.statistics.reset()
+
+ def hits(self) -> int:
+ """How many hits has the cache had?"""
+ with self.lock:
+ return self.statistics.hits
+
+ def misses(self) -> int:
+ """How many misses has the cache had?"""
+ with self.lock:
+ return self.statistics.misses
+
+ def get_statistics_snapshot(self) -> CacheStatistics:
+ """Return a consistent snapshot of all the statistics.
+
+ If running with multiple threads, it's better to take a
+ snapshot than to call statistics methods such as hits() and
+ misses() individually.
+ """
+ with self.lock:
+ return self.statistics.clone()
+
+
+CacheKey = Tuple[dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass]
+
+
+class Cache(CacheBase):
+ """Simple thread-safe DNS answer cache."""
+
+ def __init__(self, cleaning_interval: float = 300.0) -> None:
+ """*cleaning_interval*, a ``float`` is the number of seconds between
+ periodic cleanings.
+ """
+
+ super().__init__()
+ self.data: Dict[CacheKey, Answer] = {}
+ self.cleaning_interval = cleaning_interval
+ self.next_cleaning: float = time.time() + self.cleaning_interval
+
+ def _maybe_clean(self) -> None:
+ """Clean the cache if it's time to do so."""
+
+ now = time.time()
+ if self.next_cleaning <= now:
+ keys_to_delete = []
+ for k, v in self.data.items():
+ if v.expiration <= now:
+ keys_to_delete.append(k)
+ for k in keys_to_delete:
+ del self.data[k]
+ now = time.time()
+ self.next_cleaning = now + self.cleaning_interval
+
+ def get(self, key: CacheKey) -> Answer | None:
+ """Get the answer associated with *key*.
+
+ Returns None if no answer is cached for the key.
+
+ *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)``
+ tuple whose values are the query name, rdtype, and rdclass respectively.
+
+ Returns a ``dns.resolver.Answer`` or ``None``.
+ """
+
+ with self.lock:
+ self._maybe_clean()
+ v = self.data.get(key)
+ if v is None or v.expiration <= time.time():
+ self.statistics.misses += 1
+ return None
+ self.statistics.hits += 1
+ return v
+
+ def put(self, key: CacheKey, value: Answer) -> None:
+ """Associate key and value in the cache.
+
+ *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)``
+ tuple whose values are the query name, rdtype, and rdclass respectively.
+
+ *value*, a ``dns.resolver.Answer``, the answer.
+ """
+
+ with self.lock:
+ self._maybe_clean()
+ self.data[key] = value
+
+ def flush(self, key: CacheKey | None = None) -> None:
+ """Flush the cache.
+
+ If *key* is not ``None``, only that item is flushed. Otherwise the entire cache
+ is flushed.
+
+ *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)``
+ tuple whose values are the query name, rdtype, and rdclass respectively.
+ """
+
+ with self.lock:
+ if key is not None:
+ if key in self.data:
+ del self.data[key]
+ else:
+ self.data = {}
+ self.next_cleaning = time.time() + self.cleaning_interval
+
+
+class LRUCacheNode:
+ """LRUCache node."""
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+ self.hits = 0
+ self.prev = self
+ self.next = self
+
+ def link_after(self, node: "LRUCacheNode") -> None:
+ self.prev = node
+ self.next = node.next
+ node.next.prev = self
+ node.next = self
+
+ def unlink(self) -> None:
+ self.next.prev = self.prev
+ self.prev.next = self.next
+
+
+class LRUCache(CacheBase):
+ """Thread-safe, bounded, least-recently-used DNS answer cache.
+
+ This cache is better than the simple cache (above) if you're
+ running a web crawler or other process that does a lot of
+ resolutions. The LRUCache has a maximum number of nodes, and when
+ it is full, the least-recently used node is removed to make space
+ for a new one.
+ """
+
+ def __init__(self, max_size: int = 100000) -> None:
+ """*max_size*, an ``int``, is the maximum number of nodes to cache;
+ it must be greater than 0.
+ """
+
+ super().__init__()
+ self.data: Dict[CacheKey, LRUCacheNode] = {}
+ self.set_max_size(max_size)
+ self.sentinel: LRUCacheNode = LRUCacheNode(None, None)
+ self.sentinel.prev = self.sentinel
+ self.sentinel.next = self.sentinel
+
+ def set_max_size(self, max_size: int) -> None:
+ if max_size < 1:
+ max_size = 1
+ self.max_size = max_size
+
+ def get(self, key: CacheKey) -> Answer | None:
+ """Get the answer associated with *key*.
+
+ Returns None if no answer is cached for the key.
+
+ *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)``
+ tuple whose values are the query name, rdtype, and rdclass respectively.
+
+ Returns a ``dns.resolver.Answer`` or ``None``.
+ """
+
+ with self.lock:
+ node = self.data.get(key)
+ if node is None:
+ self.statistics.misses += 1
+ return None
+ # Unlink because we're either going to move the node to the front
+ # of the LRU list or we're going to free it.
+ node.unlink()
+ if node.value.expiration <= time.time():
+ del self.data[node.key]
+ self.statistics.misses += 1
+ return None
+ node.link_after(self.sentinel)
+ self.statistics.hits += 1
+ node.hits += 1
+ return node.value
+
+ def get_hits_for_key(self, key: CacheKey) -> int:
+ """Return the number of cache hits associated with the specified key."""
+ with self.lock:
+ node = self.data.get(key)
+ if node is None or node.value.expiration <= time.time():
+ return 0
+ else:
+ return node.hits
+
+ def put(self, key: CacheKey, value: Answer) -> None:
+ """Associate key and value in the cache.
+
+ *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)``
+ tuple whose values are the query name, rdtype, and rdclass respectively.
+
+ *value*, a ``dns.resolver.Answer``, the answer.
+ """
+
+ with self.lock:
+ node = self.data.get(key)
+ if node is not None:
+ node.unlink()
+ del self.data[node.key]
+ while len(self.data) >= self.max_size:
+ gnode = self.sentinel.prev
+ gnode.unlink()
+ del self.data[gnode.key]
+ node = LRUCacheNode(key, value)
+ node.link_after(self.sentinel)
+ self.data[key] = node
+
+ def flush(self, key: CacheKey | None = None) -> None:
+ """Flush the cache.
+
+ If *key* is not ``None``, only that item is flushed. Otherwise the entire cache
+ is flushed.
+
+ *key*, a ``(dns.name.Name, dns.rdatatype.RdataType, dns.rdataclass.RdataClass)``
+ tuple whose values are the query name, rdtype, and rdclass respectively.
+ """
+
+ with self.lock:
+ if key is not None:
+ node = self.data.get(key)
+ if node is not None:
+ node.unlink()
+ del self.data[node.key]
+ else:
+ gnode = self.sentinel.next
+ while gnode != self.sentinel:
+ next = gnode.next
+ gnode.unlink()
+ gnode = next
+ self.data = {}
+
+
+class _Resolution:
+ """Helper class for dns.resolver.Resolver.resolve().
+
+ All of the "business logic" of resolution is encapsulated in this
+ class, allowing us to have multiple resolve() implementations
+ using different I/O schemes without copying all of the
+ complicated logic.
+
+ This class is a "friend" to dns.resolver.Resolver and manipulates
+ resolver data structures directly.
+ """
+
+ def __init__(
+ self,
+ resolver: "BaseResolver",
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ rdclass: dns.rdataclass.RdataClass | str,
+ tcp: bool,
+ raise_on_no_answer: bool,
+ search: bool | None,
+ ) -> None:
+ if isinstance(qname, str):
+ qname = dns.name.from_text(qname, None)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ if dns.rdatatype.is_metatype(rdtype):
+ raise NoMetaqueries
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ if dns.rdataclass.is_metaclass(rdclass):
+ raise NoMetaqueries
+ self.resolver = resolver
+ self.qnames_to_try = resolver._get_qnames_to_try(qname, search)
+ self.qnames = self.qnames_to_try[:]
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.tcp = tcp
+ self.raise_on_no_answer = raise_on_no_answer
+ self.nxdomain_responses: Dict[dns.name.Name, dns.message.QueryMessage] = {}
+ # Initialize other things to help analysis tools
+ self.qname = dns.name.empty
+ self.nameservers: List[dns.nameserver.Nameserver] = []
+ self.current_nameservers: List[dns.nameserver.Nameserver] = []
+ self.errors: List[ErrorTuple] = []
+ self.nameserver: dns.nameserver.Nameserver | None = None
+ self.tcp_attempt = False
+ self.retry_with_tcp = False
+ self.request: dns.message.QueryMessage | None = None
+ self.backoff = 0.0
+
+ def next_request(
+ self,
+ ) -> Tuple[dns.message.QueryMessage | None, Answer | None]:
+ """Get the next request to send, and check the cache.
+
+ Returns a (request, answer) tuple. At most one of request or
+ answer will not be None.
+ """
+
+ # We return a tuple instead of Union[Message,Answer] as it lets
+ # the caller avoid isinstance().
+
+ while len(self.qnames) > 0:
+ self.qname = self.qnames.pop(0)
+
+ # Do we know the answer?
+ if self.resolver.cache:
+ answer = self.resolver.cache.get(
+ (self.qname, self.rdtype, self.rdclass)
+ )
+ if answer is not None:
+ if answer.rrset is None and self.raise_on_no_answer:
+ raise NoAnswer(response=answer.response)
+ else:
+ return (None, answer)
+ answer = self.resolver.cache.get(
+ (self.qname, dns.rdatatype.ANY, self.rdclass)
+ )
+ if answer is not None and answer.response.rcode() == dns.rcode.NXDOMAIN:
+ # cached NXDOMAIN; record it and continue to next
+ # name.
+ self.nxdomain_responses[self.qname] = answer.response
+ continue
+
+ # Build the request
+ request = dns.message.make_query(self.qname, self.rdtype, self.rdclass)
+ if self.resolver.keyname is not None:
+ request.use_tsig(
+ self.resolver.keyring,
+ self.resolver.keyname,
+ algorithm=self.resolver.keyalgorithm,
+ )
+ request.use_edns(
+ self.resolver.edns,
+ self.resolver.ednsflags,
+ self.resolver.payload,
+ options=self.resolver.ednsoptions,
+ )
+ if self.resolver.flags is not None:
+ request.flags = self.resolver.flags
+
+ self.nameservers = self.resolver._enrich_nameservers(
+ self.resolver._nameservers,
+ self.resolver.nameserver_ports,
+ self.resolver.port,
+ )
+ if self.resolver.rotate:
+ random.shuffle(self.nameservers)
+ self.current_nameservers = self.nameservers[:]
+ self.errors = []
+ self.nameserver = None
+ self.tcp_attempt = False
+ self.retry_with_tcp = False
+ self.request = request
+ self.backoff = 0.10
+
+ return (request, None)
+
+ #
+ # We've tried everything and only gotten NXDOMAINs. (We know
+ # it's only NXDOMAINs as anything else would have returned
+ # before now.)
+ #
+ raise NXDOMAIN(qnames=self.qnames_to_try, responses=self.nxdomain_responses)
+
+ def next_nameserver(self) -> Tuple[dns.nameserver.Nameserver, bool, float]:
+ if self.retry_with_tcp:
+ assert self.nameserver is not None
+ assert not self.nameserver.is_always_max_size()
+ self.tcp_attempt = True
+ self.retry_with_tcp = False
+ return (self.nameserver, True, 0)
+
+ backoff = 0.0
+ if not self.current_nameservers:
+ if len(self.nameservers) == 0:
+ # Out of things to try!
+ raise NoNameservers(request=self.request, errors=self.errors)
+ self.current_nameservers = self.nameservers[:]
+ backoff = self.backoff
+ self.backoff = min(self.backoff * 2, 2)
+
+ self.nameserver = self.current_nameservers.pop(0)
+ self.tcp_attempt = self.tcp or self.nameserver.is_always_max_size()
+ return (self.nameserver, self.tcp_attempt, backoff)
+
+ def query_result(
+ self, response: dns.message.Message | None, ex: Exception | None
+ ) -> Tuple[Answer | None, bool]:
+ #
+ # returns an (answer: Answer, end_loop: bool) tuple.
+ #
+ assert self.nameserver is not None
+ if ex:
+ # Exception during I/O or from_wire()
+ assert response is None
+ self.errors.append(
+ (
+ str(self.nameserver),
+ self.tcp_attempt,
+ self.nameserver.answer_port(),
+ ex,
+ response,
+ )
+ )
+ if (
+ isinstance(ex, dns.exception.FormError)
+ or isinstance(ex, EOFError)
+ or isinstance(ex, OSError)
+ or isinstance(ex, NotImplementedError)
+ ):
+ # This nameserver is no good, take it out of the mix.
+ self.nameservers.remove(self.nameserver)
+ elif isinstance(ex, dns.message.Truncated):
+ if self.tcp_attempt:
+ # Truncation with TCP is no good!
+ self.nameservers.remove(self.nameserver)
+ else:
+ self.retry_with_tcp = True
+ return (None, False)
+ # We got an answer!
+ assert response is not None
+ assert isinstance(response, dns.message.QueryMessage)
+ rcode = response.rcode()
+ if rcode == dns.rcode.NOERROR:
+ try:
+ answer = Answer(
+ self.qname,
+ self.rdtype,
+ self.rdclass,
+ response,
+ self.nameserver.answer_nameserver(),
+ self.nameserver.answer_port(),
+ )
+ except Exception as e:
+ self.errors.append(
+ (
+ str(self.nameserver),
+ self.tcp_attempt,
+ self.nameserver.answer_port(),
+ e,
+ response,
+ )
+ )
+ # The nameserver is no good, take it out of the mix.
+ self.nameservers.remove(self.nameserver)
+ return (None, False)
+ if self.resolver.cache:
+ self.resolver.cache.put((self.qname, self.rdtype, self.rdclass), answer)
+ if answer.rrset is None and self.raise_on_no_answer:
+ raise NoAnswer(response=answer.response)
+ return (answer, True)
+ elif rcode == dns.rcode.NXDOMAIN:
+ # Further validate the response by making an Answer, even
+ # if we aren't going to cache it.
+ try:
+ answer = Answer(
+ self.qname, dns.rdatatype.ANY, dns.rdataclass.IN, response
+ )
+ except Exception as e:
+ self.errors.append(
+ (
+ str(self.nameserver),
+ self.tcp_attempt,
+ self.nameserver.answer_port(),
+ e,
+ response,
+ )
+ )
+ # The nameserver is no good, take it out of the mix.
+ self.nameservers.remove(self.nameserver)
+ return (None, False)
+ self.nxdomain_responses[self.qname] = response
+ if self.resolver.cache:
+ self.resolver.cache.put(
+ (self.qname, dns.rdatatype.ANY, self.rdclass), answer
+ )
+ # Make next_nameserver() return None, so caller breaks its
+ # inner loop and calls next_request().
+ return (None, True)
+ elif rcode == dns.rcode.YXDOMAIN:
+ yex = YXDOMAIN()
+ self.errors.append(
+ (
+ str(self.nameserver),
+ self.tcp_attempt,
+ self.nameserver.answer_port(),
+ yex,
+ response,
+ )
+ )
+ raise yex
+ else:
+ #
+ # We got a response, but we're not happy with the
+ # rcode in it.
+ #
+ if rcode != dns.rcode.SERVFAIL or not self.resolver.retry_servfail:
+ self.nameservers.remove(self.nameserver)
+ self.errors.append(
+ (
+ str(self.nameserver),
+ self.tcp_attempt,
+ self.nameserver.answer_port(),
+ dns.rcode.to_text(rcode),
+ response,
+ )
+ )
+ return (None, False)
+
+
+class BaseResolver:
+ """DNS stub resolver."""
+
+ # We initialize in reset()
+ #
+ # pylint: disable=attribute-defined-outside-init
+
+ domain: dns.name.Name
+ nameserver_ports: Dict[str, int]
+ port: int
+ search: List[dns.name.Name]
+ use_search_by_default: bool
+ timeout: float
+ lifetime: float
+ keyring: Any | None
+ keyname: dns.name.Name | str | None
+ keyalgorithm: dns.name.Name | str
+ edns: int
+ ednsflags: int
+ ednsoptions: List[dns.edns.Option] | None
+ payload: int
+ cache: Any
+ flags: int | None
+ retry_servfail: bool
+ rotate: bool
+ ndots: int | None
+ _nameservers: Sequence[str | dns.nameserver.Nameserver]
+
+ def __init__(
+ self, filename: str = "/etc/resolv.conf", configure: bool = True
+ ) -> None:
+ """*filename*, a ``str`` or file object, specifying a file
+ in standard /etc/resolv.conf format. This parameter is meaningful
+ only when *configure* is true and the platform is POSIX.
+
+ *configure*, a ``bool``. If True (the default), the resolver
+ instance is configured in the normal fashion for the operating
+ system the resolver is running on. (I.e. by reading a
+ /etc/resolv.conf file on POSIX systems and from the registry
+ on Windows systems.)
+ """
+
+ self.reset()
+ if configure:
+ if sys.platform == "win32": # pragma: no cover
+ self.read_registry()
+ elif filename:
+ self.read_resolv_conf(filename)
+
+ def reset(self) -> None:
+ """Reset all resolver configuration to the defaults."""
+
+ self.domain = dns.name.Name(dns.name.from_text(socket.gethostname())[1:])
+ if len(self.domain) == 0: # pragma: no cover
+ self.domain = dns.name.root
+ self._nameservers = []
+ self.nameserver_ports = {}
+ self.port = 53
+ self.search = []
+ self.use_search_by_default = False
+ self.timeout = 2.0
+ self.lifetime = 5.0
+ self.keyring = None
+ self.keyname = None
+ self.keyalgorithm = dns.tsig.default_algorithm
+ self.edns = -1
+ self.ednsflags = 0
+ self.ednsoptions = None
+ self.payload = 0
+ self.cache = None
+ self.flags = None
+ self.retry_servfail = False
+ self.rotate = False
+ self.ndots = None
+
+ def read_resolv_conf(self, f: Any) -> None:
+ """Process *f* as a file in the /etc/resolv.conf format. If f is
+ a ``str``, it is used as the name of the file to open; otherwise it
+ is treated as the file itself.
+
+ Interprets the following items:
+
+ - nameserver - name server IP address
+
+ - domain - local domain name
+
+ - search - search list for host-name lookup
+
+ - options - supported options are rotate, timeout, edns0, and ndots
+
+ """
+
+ nameservers = []
+ if isinstance(f, str):
+ try:
+ cm: contextlib.AbstractContextManager = open(f, encoding="utf-8")
+ except OSError:
+ # /etc/resolv.conf doesn't exist, can't be read, etc.
+ raise NoResolverConfiguration(f"cannot open {f}")
+ else:
+ cm = contextlib.nullcontext(f)
+ with cm as f:
+ for l in f:
+ if len(l) == 0 or l[0] == "#" or l[0] == ";":
+ continue
+ tokens = l.split()
+
+ # Any line containing less than 2 tokens is malformed
+ if len(tokens) < 2:
+ continue
+
+ if tokens[0] == "nameserver":
+ nameservers.append(tokens[1])
+ elif tokens[0] == "domain":
+ self.domain = dns.name.from_text(tokens[1])
+ # domain and search are exclusive
+ self.search = []
+ elif tokens[0] == "search":
+ # the last search wins
+ self.search = []
+ for suffix in tokens[1:]:
+ self.search.append(dns.name.from_text(suffix))
+ # We don't set domain as it is not used if
+ # len(self.search) > 0
+ elif tokens[0] == "options":
+ for opt in tokens[1:]:
+ if opt == "rotate":
+ self.rotate = True
+ elif opt == "edns0":
+ self.use_edns()
+ elif "timeout" in opt:
+ try:
+ self.timeout = int(opt.split(":")[1])
+ except (ValueError, IndexError):
+ pass
+ elif "ndots" in opt:
+ try:
+ self.ndots = int(opt.split(":")[1])
+ except (ValueError, IndexError):
+ pass
+ if len(nameservers) == 0:
+ raise NoResolverConfiguration("no nameservers")
+ # Assigning directly instead of appending means we invoke the
+ # setter logic, with additonal checking and enrichment.
+ self.nameservers = nameservers
+
+ def read_registry(self) -> None: # pragma: no cover
+ """Extract resolver configuration from the Windows registry."""
+ try:
+ info = dns.win32util.get_dns_info() # type: ignore
+ if info.domain is not None:
+ self.domain = info.domain
+ self.nameservers = info.nameservers
+ self.search = info.search
+ except AttributeError:
+ raise NotImplementedError
+
+ def _compute_timeout(
+ self,
+ start: float,
+ lifetime: float | None = None,
+ errors: List[ErrorTuple] | None = None,
+ ) -> float:
+ lifetime = self.lifetime if lifetime is None else lifetime
+ now = time.time()
+ duration = now - start
+ if errors is None:
+ errors = []
+ if duration < 0:
+ if duration < -1:
+ # Time going backwards is bad. Just give up.
+ raise LifetimeTimeout(timeout=duration, errors=errors)
+ else:
+ # Time went backwards, but only a little. This can
+ # happen, e.g. under vmware with older linux kernels.
+ # Pretend it didn't happen.
+ duration = 0
+ if duration >= lifetime:
+ raise LifetimeTimeout(timeout=duration, errors=errors)
+ return min(lifetime - duration, self.timeout)
+
+ def _get_qnames_to_try(
+ self, qname: dns.name.Name, search: bool | None
+ ) -> List[dns.name.Name]:
+ # This is a separate method so we can unit test the search
+ # rules without requiring the Internet.
+ if search is None:
+ search = self.use_search_by_default
+ qnames_to_try = []
+ if qname.is_absolute():
+ qnames_to_try.append(qname)
+ else:
+ abs_qname = qname.concatenate(dns.name.root)
+ if search:
+ if len(self.search) > 0:
+ # There is a search list, so use it exclusively
+ search_list = self.search[:]
+ elif self.domain != dns.name.root and self.domain is not None:
+ # We have some notion of a domain that isn't the root, so
+ # use it as the search list.
+ search_list = [self.domain]
+ else:
+ search_list = []
+ # Figure out the effective ndots (default is 1)
+ if self.ndots is None:
+ ndots = 1
+ else:
+ ndots = self.ndots
+ for suffix in search_list:
+ qnames_to_try.append(qname + suffix)
+ if len(qname) > ndots:
+ # The name has at least ndots dots, so we should try an
+ # absolute query first.
+ qnames_to_try.insert(0, abs_qname)
+ else:
+ # The name has less than ndots dots, so we should search
+ # first, then try the absolute name.
+ qnames_to_try.append(abs_qname)
+ else:
+ qnames_to_try.append(abs_qname)
+ return qnames_to_try
+
+ def use_tsig(
+ self,
+ keyring: Any,
+ keyname: dns.name.Name | str | None = None,
+ algorithm: dns.name.Name | str = dns.tsig.default_algorithm,
+ ) -> None:
+ """Add a TSIG signature to each query.
+
+ The parameters are passed to ``dns.message.Message.use_tsig()``;
+ see its documentation for details.
+ """
+
+ self.keyring = keyring
+ self.keyname = keyname
+ self.keyalgorithm = algorithm
+
+ def use_edns(
+ self,
+ edns: int | bool | None = 0,
+ ednsflags: int = 0,
+ payload: int = dns.message.DEFAULT_EDNS_PAYLOAD,
+ options: List[dns.edns.Option] | None = None,
+ ) -> None:
+ """Configure EDNS behavior.
+
+ *edns*, an ``int``, is the EDNS level to use. Specifying
+ ``None``, ``False``, or ``-1`` means "do not use EDNS", and in this case
+ the other parameters are ignored. Specifying ``True`` is
+ equivalent to specifying 0, i.e. "use EDNS0".
+
+ *ednsflags*, an ``int``, the EDNS flag values.
+
+ *payload*, an ``int``, is the EDNS sender's payload field, which is the
+ maximum size of UDP datagram the sender can handle. I.e. how big
+ a response to this message can be.
+
+ *options*, a list of ``dns.edns.Option`` objects or ``None``, the EDNS
+ options.
+ """
+
+ if edns is None or edns is False:
+ edns = -1
+ elif edns is True:
+ edns = 0
+ self.edns = edns
+ self.ednsflags = ednsflags
+ self.payload = payload
+ self.ednsoptions = options
+
+ def set_flags(self, flags: int) -> None:
+ """Overrides the default flags with your own.
+
+ *flags*, an ``int``, the message flags to use.
+ """
+
+ self.flags = flags
+
+ @classmethod
+ def _enrich_nameservers(
+ cls,
+ nameservers: Sequence[str | dns.nameserver.Nameserver],
+ nameserver_ports: Dict[str, int],
+ default_port: int,
+ ) -> List[dns.nameserver.Nameserver]:
+ enriched_nameservers = []
+ if isinstance(nameservers, list | tuple):
+ for nameserver in nameservers:
+ enriched_nameserver: dns.nameserver.Nameserver
+ if isinstance(nameserver, dns.nameserver.Nameserver):
+ enriched_nameserver = nameserver
+ elif dns.inet.is_address(nameserver):
+ port = nameserver_ports.get(nameserver, default_port)
+ enriched_nameserver = dns.nameserver.Do53Nameserver(
+ nameserver, port
+ )
+ else:
+ try:
+ if urlparse(nameserver).scheme != "https":
+ raise NotImplementedError
+ except Exception:
+ raise ValueError(
+ f"nameserver {nameserver} is not a "
+ "dns.nameserver.Nameserver instance or text form, "
+ "IP address, nor a valid https URL"
+ )
+ enriched_nameserver = dns.nameserver.DoHNameserver(nameserver)
+ enriched_nameservers.append(enriched_nameserver)
+ else:
+ raise ValueError(
+ f"nameservers must be a list or tuple (not a {type(nameservers)})"
+ )
+ return enriched_nameservers
+
+ @property
+ def nameservers(
+ self,
+ ) -> Sequence[str | dns.nameserver.Nameserver]:
+ return self._nameservers
+
+ @nameservers.setter
+ def nameservers(
+ self, nameservers: Sequence[str | dns.nameserver.Nameserver]
+ ) -> None:
+ """
+ *nameservers*, a ``list`` or ``tuple`` of nameservers, where a nameserver is either
+ a string interpretable as a nameserver, or a ``dns.nameserver.Nameserver``
+ instance.
+
+ Raises ``ValueError`` if *nameservers* is not a list of nameservers.
+ """
+ # We just call _enrich_nameservers() for checking
+ self._enrich_nameservers(nameservers, self.nameserver_ports, self.port)
+ self._nameservers = nameservers
+
+
+class Resolver(BaseResolver):
+ """DNS stub resolver."""
+
+ def resolve(
+ self,
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ search: bool | None = None,
+ ) -> Answer: # pylint: disable=arguments-differ
+ """Query nameservers to find the answer to the question.
+
+ The *qname*, *rdtype*, and *rdclass* parameters may be objects
+ of the appropriate type, or strings that can be converted into objects
+ of the appropriate type.
+
+ *qname*, a ``dns.name.Name`` or ``str``, the query name.
+
+ *rdtype*, an ``int`` or ``str``, the query type.
+
+ *rdclass*, an ``int`` or ``str``, the query class.
+
+ *tcp*, a ``bool``. If ``True``, use TCP to make the query.
+
+ *source*, a ``str`` or ``None``. If not ``None``, bind to this IP
+ address when making queries.
+
+ *raise_on_no_answer*, a ``bool``. If ``True``, raise
+ ``dns.resolver.NoAnswer`` if there's no answer to the question.
+
+ *source_port*, an ``int``, the port from which to send the message.
+
+ *lifetime*, a ``float``, how many seconds a query should run
+ before timing out.
+
+ *search*, a ``bool`` or ``None``, determines whether the
+ search list configured in the system's resolver configuration
+ are used for relative names, and whether the resolver's domain
+ may be added to relative names. The default is ``None``,
+ which causes the value of the resolver's
+ ``use_search_by_default`` attribute to be used.
+
+ Raises ``dns.resolver.LifetimeTimeout`` if no answers could be found
+ in the specified lifetime.
+
+ Raises ``dns.resolver.NXDOMAIN`` if the query name does not exist.
+
+ Raises ``dns.resolver.YXDOMAIN`` if the query name is too long after
+ DNAME substitution.
+
+ Raises ``dns.resolver.NoAnswer`` if *raise_on_no_answer* is
+ ``True`` and the query name exists but has no RRset of the
+ desired type and class.
+
+ Raises ``dns.resolver.NoNameservers`` if no non-broken
+ nameservers are available to answer the question.
+
+ Returns a ``dns.resolver.Answer`` instance.
+
+ """
+
+ resolution = _Resolution(
+ self, qname, rdtype, rdclass, tcp, raise_on_no_answer, search
+ )
+ start = time.time()
+ while True:
+ (request, answer) = resolution.next_request()
+ # Note we need to say "if answer is not None" and not just
+ # "if answer" because answer implements __len__, and python
+ # will call that. We want to return if we have an answer
+ # object, including in cases where its length is 0.
+ if answer is not None:
+ # cache hit!
+ return answer
+ assert request is not None # needed for type checking
+ done = False
+ while not done:
+ (nameserver, tcp, backoff) = resolution.next_nameserver()
+ if backoff:
+ time.sleep(backoff)
+ timeout = self._compute_timeout(start, lifetime, resolution.errors)
+ try:
+ response = nameserver.query(
+ request,
+ timeout=timeout,
+ source=source,
+ source_port=source_port,
+ max_size=tcp,
+ )
+ except Exception as ex:
+ (_, done) = resolution.query_result(None, ex)
+ continue
+ (answer, done) = resolution.query_result(response, None)
+ # Note we need to say "if answer is not None" and not just
+ # "if answer" because answer implements __len__, and python
+ # will call that. We want to return if we have an answer
+ # object, including in cases where its length is 0.
+ if answer is not None:
+ return answer
+
+ def query(
+ self,
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ ) -> Answer: # pragma: no cover
+ """Query nameservers to find the answer to the question.
+
+ This method calls resolve() with ``search=True``, and is
+ provided for backwards compatibility with prior versions of
+ dnspython. See the documentation for the resolve() method for
+ further details.
+ """
+ warnings.warn(
+ "please use dns.resolver.Resolver.resolve() instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.resolve(
+ qname,
+ rdtype,
+ rdclass,
+ tcp,
+ source,
+ raise_on_no_answer,
+ source_port,
+ lifetime,
+ True,
+ )
+
+ def resolve_address(self, ipaddr: str, *args: Any, **kwargs: Any) -> Answer:
+ """Use a resolver to run a reverse query for PTR records.
+
+ This utilizes the resolve() method to perform a PTR lookup on the
+ specified IP address.
+
+ *ipaddr*, a ``str``, the IPv4 or IPv6 address you want to get
+ the PTR record for.
+
+ All other arguments that can be passed to the resolve() function
+ except for rdtype and rdclass are also supported by this
+ function.
+ """
+ # We make a modified kwargs for type checking happiness, as otherwise
+ # we get a legit warning about possibly having rdtype and rdclass
+ # in the kwargs more than once.
+ modified_kwargs: Dict[str, Any] = {}
+ modified_kwargs.update(kwargs)
+ modified_kwargs["rdtype"] = dns.rdatatype.PTR
+ modified_kwargs["rdclass"] = dns.rdataclass.IN
+ return self.resolve(
+ dns.reversename.from_address(ipaddr), *args, **modified_kwargs
+ )
+
+ def resolve_name(
+ self,
+ name: dns.name.Name | str,
+ family: int = socket.AF_UNSPEC,
+ **kwargs: Any,
+ ) -> HostAnswers:
+ """Use a resolver to query for address records.
+
+ This utilizes the resolve() method to perform A and/or AAAA lookups on
+ the specified name.
+
+ *qname*, a ``dns.name.Name`` or ``str``, the name to resolve.
+
+ *family*, an ``int``, the address family. If socket.AF_UNSPEC
+ (the default), both A and AAAA records will be retrieved.
+
+ All other arguments that can be passed to the resolve() function
+ except for rdtype and rdclass are also supported by this
+ function.
+ """
+ # We make a modified kwargs for type checking happiness, as otherwise
+ # we get a legit warning about possibly having rdtype and rdclass
+ # in the kwargs more than once.
+ modified_kwargs: Dict[str, Any] = {}
+ modified_kwargs.update(kwargs)
+ modified_kwargs.pop("rdtype", None)
+ modified_kwargs["rdclass"] = dns.rdataclass.IN
+
+ if family == socket.AF_INET:
+ v4 = self.resolve(name, dns.rdatatype.A, **modified_kwargs)
+ return HostAnswers.make(v4=v4)
+ elif family == socket.AF_INET6:
+ v6 = self.resolve(name, dns.rdatatype.AAAA, **modified_kwargs)
+ return HostAnswers.make(v6=v6)
+ elif family != socket.AF_UNSPEC: # pragma: no cover
+ raise NotImplementedError(f"unknown address family {family}")
+
+ raise_on_no_answer = modified_kwargs.pop("raise_on_no_answer", True)
+ lifetime = modified_kwargs.pop("lifetime", None)
+ start = time.time()
+ v6 = self.resolve(
+ name,
+ dns.rdatatype.AAAA,
+ raise_on_no_answer=False,
+ lifetime=self._compute_timeout(start, lifetime),
+ **modified_kwargs,
+ )
+ # Note that setting name ensures we query the same name
+ # for A as we did for AAAA. (This is just in case search lists
+ # are active by default in the resolver configuration and
+ # we might be talking to a server that says NXDOMAIN when it
+ # wants to say NOERROR no data.
+ name = v6.qname
+ v4 = self.resolve(
+ name,
+ dns.rdatatype.A,
+ raise_on_no_answer=False,
+ lifetime=self._compute_timeout(start, lifetime),
+ **modified_kwargs,
+ )
+ answers = HostAnswers.make(v6=v6, v4=v4, add_empty=not raise_on_no_answer)
+ if not answers:
+ raise NoAnswer(response=v6.response)
+ return answers
+
+ # pylint: disable=redefined-outer-name
+
+ def canonical_name(self, name: dns.name.Name | str) -> dns.name.Name:
+ """Determine the canonical name of *name*.
+
+ The canonical name is the name the resolver uses for queries
+ after all CNAME and DNAME renamings have been applied.
+
+ *name*, a ``dns.name.Name`` or ``str``, the query name.
+
+ This method can raise any exception that ``resolve()`` can
+ raise, other than ``dns.resolver.NoAnswer`` and
+ ``dns.resolver.NXDOMAIN``.
+
+ Returns a ``dns.name.Name``.
+ """
+ try:
+ answer = self.resolve(name, raise_on_no_answer=False)
+ canonical_name = answer.canonical_name
+ except NXDOMAIN as e:
+ canonical_name = e.canonical_name
+ return canonical_name
+
+ # pylint: enable=redefined-outer-name
+
+ def try_ddr(self, lifetime: float = 5.0) -> None:
+ """Try to update the resolver's nameservers using Discovery of Designated
+ Resolvers (DDR). If successful, the resolver will subsequently use
+ DNS-over-HTTPS or DNS-over-TLS for future queries.
+
+ *lifetime*, a float, is the maximum time to spend attempting DDR. The default
+ is 5 seconds.
+
+ If the SVCB query is successful and results in a non-empty list of nameservers,
+ then the resolver's nameservers are set to the returned servers in priority
+ order.
+
+ The current implementation does not use any address hints from the SVCB record,
+ nor does it resolve addresses for the SCVB target name, rather it assumes that
+ the bootstrap nameserver will always be one of the addresses and uses it.
+ A future revision to the code may offer fuller support. The code verifies that
+ the bootstrap nameserver is in the Subject Alternative Name field of the
+ TLS certficate.
+ """
+ try:
+ expiration = time.time() + lifetime
+ answer = self.resolve(
+ dns._ddr._local_resolver_name, "SVCB", lifetime=lifetime
+ )
+ timeout = dns.query._remaining(expiration)
+ nameservers = dns._ddr._get_nameservers_sync(answer, timeout)
+ if len(nameservers) > 0:
+ self.nameservers = nameservers
+ except Exception: # pragma: no cover
+ pass
+
+
+#: The default resolver.
+default_resolver: Resolver | None = None
+
+
+def get_default_resolver() -> Resolver:
+ """Get the default resolver, initializing it if necessary."""
+ if default_resolver is None:
+ reset_default_resolver()
+ assert default_resolver is not None
+ return default_resolver
+
+
+def reset_default_resolver() -> None:
+ """Re-initialize default resolver.
+
+ Note that the resolver configuration (i.e. /etc/resolv.conf on UNIX
+ systems) will be re-read immediately.
+ """
+
+ global default_resolver
+ default_resolver = Resolver()
+
+
+def resolve(
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ search: bool | None = None,
+) -> Answer: # pragma: no cover
+ """Query nameservers to find the answer to the question.
+
+ This is a convenience function that uses the default resolver
+ object to make the query.
+
+ See ``dns.resolver.Resolver.resolve`` for more information on the
+ parameters.
+ """
+
+ return get_default_resolver().resolve(
+ qname,
+ rdtype,
+ rdclass,
+ tcp,
+ source,
+ raise_on_no_answer,
+ source_port,
+ lifetime,
+ search,
+ )
+
+
+def query(
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+) -> Answer: # pragma: no cover
+ """Query nameservers to find the answer to the question.
+
+ This method calls resolve() with ``search=True``, and is
+ provided for backwards compatibility with prior versions of
+ dnspython. See the documentation for the resolve() method for
+ further details.
+ """
+ warnings.warn(
+ "please use dns.resolver.resolve() instead", DeprecationWarning, stacklevel=2
+ )
+ return resolve(
+ qname,
+ rdtype,
+ rdclass,
+ tcp,
+ source,
+ raise_on_no_answer,
+ source_port,
+ lifetime,
+ True,
+ )
+
+
+def resolve_address(ipaddr: str, *args: Any, **kwargs: Any) -> Answer:
+ """Use a resolver to run a reverse query for PTR records.
+
+ See ``dns.resolver.Resolver.resolve_address`` for more information on the
+ parameters.
+ """
+
+ return get_default_resolver().resolve_address(ipaddr, *args, **kwargs)
+
+
+def resolve_name(
+ name: dns.name.Name | str, family: int = socket.AF_UNSPEC, **kwargs: Any
+) -> HostAnswers:
+ """Use a resolver to query for address records.
+
+ See ``dns.resolver.Resolver.resolve_name`` for more information on the
+ parameters.
+ """
+
+ return get_default_resolver().resolve_name(name, family, **kwargs)
+
+
+def canonical_name(name: dns.name.Name | str) -> dns.name.Name:
+ """Determine the canonical name of *name*.
+
+ See ``dns.resolver.Resolver.canonical_name`` for more information on the
+ parameters and possible exceptions.
+ """
+
+ return get_default_resolver().canonical_name(name)
+
+
+def try_ddr(lifetime: float = 5.0) -> None: # pragma: no cover
+ """Try to update the default resolver's nameservers using Discovery of Designated
+ Resolvers (DDR). If successful, the resolver will subsequently use
+ DNS-over-HTTPS or DNS-over-TLS for future queries.
+
+ See :py:func:`dns.resolver.Resolver.try_ddr` for more information.
+ """
+ return get_default_resolver().try_ddr(lifetime)
+
+
+def zone_for_name(
+ name: dns.name.Name | str,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ tcp: bool = False,
+ resolver: Resolver | None = None,
+ lifetime: float | None = None,
+) -> dns.name.Name: # pyright: ignore[reportReturnType]
+ """Find the name of the zone which contains the specified name.
+
+ *name*, an absolute ``dns.name.Name`` or ``str``, the query name.
+
+ *rdclass*, an ``int``, the query class.
+
+ *tcp*, a ``bool``. If ``True``, use TCP to make the query.
+
+ *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use.
+ If ``None``, the default, then the default resolver is used.
+
+ *lifetime*, a ``float``, the total time to allow for the queries needed
+ to determine the zone. If ``None``, the default, then only the individual
+ query limits of the resolver apply.
+
+ Raises ``dns.resolver.NoRootSOA`` if there is no SOA RR at the DNS
+ root. (This is only likely to happen if you're using non-default
+ root servers in your network and they are misconfigured.)
+
+ Raises ``dns.resolver.LifetimeTimeout`` if the answer could not be
+ found in the allotted lifetime.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, dns.name.root)
+ if resolver is None:
+ resolver = get_default_resolver()
+ if not name.is_absolute():
+ raise NotAbsolute(name)
+ start = time.time()
+ expiration: float | None
+ if lifetime is not None:
+ expiration = start + lifetime
+ else:
+ expiration = None
+ while 1:
+ try:
+ rlifetime: float | None
+ if expiration is not None:
+ rlifetime = expiration - time.time()
+ if rlifetime <= 0:
+ rlifetime = 0
+ else:
+ rlifetime = None
+ answer = resolver.resolve(
+ name, dns.rdatatype.SOA, rdclass, tcp, lifetime=rlifetime
+ )
+ assert answer.rrset is not None
+ if answer.rrset.name == name:
+ return name
+ # otherwise we were CNAMEd or DNAMEd and need to look higher
+ except (NXDOMAIN, NoAnswer) as e:
+ if isinstance(e, NXDOMAIN):
+ response = e.responses().get(name)
+ else:
+ response = e.response() # pylint: disable=no-value-for-parameter
+ if response:
+ for rrs in response.authority:
+ if rrs.rdtype == dns.rdatatype.SOA and rrs.rdclass == rdclass:
+ (nr, _, _) = rrs.name.fullcompare(name)
+ if nr == dns.name.NAMERELN_SUPERDOMAIN:
+ # We're doing a proper superdomain check as
+ # if the name were equal we ought to have gotten
+ # it in the answer section! We are ignoring the
+ # possibility that the authority is insane and
+ # is including multiple SOA RRs for different
+ # authorities.
+ return rrs.name
+ # we couldn't extract anything useful from the response (e.g. it's
+ # a type 3 NXDOMAIN)
+ try:
+ name = name.parent()
+ except dns.name.NoParent:
+ raise NoRootSOA
+
+
+def make_resolver_at(
+ where: dns.name.Name | str,
+ port: int = 53,
+ family: int = socket.AF_UNSPEC,
+ resolver: Resolver | None = None,
+) -> Resolver:
+ """Make a stub resolver using the specified destination as the full resolver.
+
+ *where*, a ``dns.name.Name`` or ``str`` the domain name or IP address of the
+ full resolver.
+
+ *port*, an ``int``, the port to use. If not specified, the default is 53.
+
+ *family*, an ``int``, the address family to use. This parameter is used if
+ *where* is not an address. The default is ``socket.AF_UNSPEC`` in which case
+ the first address returned by ``resolve_name()`` will be used, otherwise the
+ first address of the specified family will be used.
+
+ *resolver*, a ``dns.resolver.Resolver`` or ``None``, the resolver to use for
+ resolution of hostnames. If not specified, the default resolver will be used.
+
+ Returns a ``dns.resolver.Resolver`` or raises an exception.
+ """
+ if resolver is None:
+ resolver = get_default_resolver()
+ nameservers: List[str | dns.nameserver.Nameserver] = []
+ if isinstance(where, str) and dns.inet.is_address(where):
+ nameservers.append(dns.nameserver.Do53Nameserver(where, port))
+ else:
+ for address in resolver.resolve_name(where, family).addresses():
+ nameservers.append(dns.nameserver.Do53Nameserver(address, port))
+ res = Resolver(configure=False)
+ res.nameservers = nameservers
+ return res
+
+
+def resolve_at(
+ where: dns.name.Name | str,
+ qname: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.A,
+ rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ tcp: bool = False,
+ source: str | None = None,
+ raise_on_no_answer: bool = True,
+ source_port: int = 0,
+ lifetime: float | None = None,
+ search: bool | None = None,
+ port: int = 53,
+ family: int = socket.AF_UNSPEC,
+ resolver: Resolver | None = None,
+) -> Answer:
+ """Query nameservers to find the answer to the question.
+
+ This is a convenience function that calls ``dns.resolver.make_resolver_at()`` to
+ make a resolver, and then uses it to resolve the query.
+
+ See ``dns.resolver.Resolver.resolve`` for more information on the resolution
+ parameters, and ``dns.resolver.make_resolver_at`` for information about the resolver
+ parameters *where*, *port*, *family*, and *resolver*.
+
+ If making more than one query, it is more efficient to call
+ ``dns.resolver.make_resolver_at()`` and then use that resolver for the queries
+ instead of calling ``resolve_at()`` multiple times.
+ """
+ return make_resolver_at(where, port, family, resolver).resolve(
+ qname,
+ rdtype,
+ rdclass,
+ tcp,
+ source,
+ raise_on_no_answer,
+ source_port,
+ lifetime,
+ search,
+ )
+
+
+#
+# Support for overriding the system resolver for all python code in the
+# running process.
+#
+
+_protocols_for_socktype: Dict[Any, List[Any]] = {
+ socket.SOCK_DGRAM: [socket.SOL_UDP],
+ socket.SOCK_STREAM: [socket.SOL_TCP],
+}
+
+_resolver: Resolver | None = None
+_original_getaddrinfo = socket.getaddrinfo
+_original_getnameinfo = socket.getnameinfo
+_original_getfqdn = socket.getfqdn
+_original_gethostbyname = socket.gethostbyname
+_original_gethostbyname_ex = socket.gethostbyname_ex
+_original_gethostbyaddr = socket.gethostbyaddr
+
+
+def _getaddrinfo(
+ host=None, service=None, family=socket.AF_UNSPEC, socktype=0, proto=0, flags=0
+):
+ if flags & socket.AI_NUMERICHOST != 0:
+ # Short circuit directly into the system's getaddrinfo(). We're
+ # not adding any value in this case, and this avoids infinite loops
+ # because dns.query.* needs to call getaddrinfo() for IPv6 scoping
+ # reasons. We will also do this short circuit below if we
+ # discover that the host is an address literal.
+ return _original_getaddrinfo(host, service, family, socktype, proto, flags)
+ if flags & (socket.AI_ADDRCONFIG | socket.AI_V4MAPPED) != 0:
+ # Not implemented. We raise a gaierror as opposed to a
+ # NotImplementedError as it helps callers handle errors more
+ # appropriately. [Issue #316]
+ #
+ # We raise EAI_FAIL as opposed to EAI_SYSTEM because there is
+ # no EAI_SYSTEM on Windows [Issue #416]. We didn't go for
+ # EAI_BADFLAGS as the flags aren't bad, we just don't
+ # implement them.
+ raise socket.gaierror(
+ socket.EAI_FAIL, "Non-recoverable failure in name resolution"
+ )
+ if host is None and service is None:
+ raise socket.gaierror(socket.EAI_NONAME, "Name or service not known")
+ addrs = []
+ canonical_name = None # pylint: disable=redefined-outer-name
+ # Is host None or an address literal? If so, use the system's
+ # getaddrinfo().
+ if host is None:
+ return _original_getaddrinfo(host, service, family, socktype, proto, flags)
+ try:
+ # We don't care about the result of af_for_address(), we're just
+ # calling it so it raises an exception if host is not an IPv4 or
+ # IPv6 address.
+ dns.inet.af_for_address(host)
+ return _original_getaddrinfo(host, service, family, socktype, proto, flags)
+ except Exception:
+ pass
+ # Something needs resolution!
+ try:
+ assert _resolver is not None
+ answers = _resolver.resolve_name(host, family)
+ addrs = answers.addresses_and_families()
+ canonical_name = answers.canonical_name().to_text(True)
+ except NXDOMAIN:
+ raise socket.gaierror(socket.EAI_NONAME, "Name or service not known")
+ except Exception:
+ # We raise EAI_AGAIN here as the failure may be temporary
+ # (e.g. a timeout) and EAI_SYSTEM isn't defined on Windows.
+ # [Issue #416]
+ raise socket.gaierror(socket.EAI_AGAIN, "Temporary failure in name resolution")
+ port = None
+ try:
+ # Is it a port literal?
+ if service is None:
+ port = 0
+ else:
+ port = int(service)
+ except Exception:
+ if flags & socket.AI_NUMERICSERV == 0:
+ try:
+ port = socket.getservbyname(service) # pyright: ignore
+ except Exception:
+ pass
+ if port is None:
+ raise socket.gaierror(socket.EAI_NONAME, "Name or service not known")
+ tuples = []
+ if socktype == 0:
+ socktypes = [socket.SOCK_DGRAM, socket.SOCK_STREAM]
+ else:
+ socktypes = [socktype]
+ if flags & socket.AI_CANONNAME != 0:
+ cname = canonical_name
+ else:
+ cname = ""
+ for addr, af in addrs:
+ for socktype in socktypes:
+ for sockproto in _protocols_for_socktype[socktype]:
+ proto = int(sockproto)
+ addr_tuple = dns.inet.low_level_address_tuple((addr, port), af)
+ tuples.append((af, socktype, proto, cname, addr_tuple))
+ if len(tuples) == 0:
+ raise socket.gaierror(socket.EAI_NONAME, "Name or service not known")
+ return tuples
+
+
+def _getnameinfo(sockaddr, flags=0):
+ host = sockaddr[0]
+ port = sockaddr[1]
+ if len(sockaddr) == 4:
+ scope = sockaddr[3]
+ family = socket.AF_INET6
+ else:
+ scope = None
+ family = socket.AF_INET
+ tuples = _getaddrinfo(host, port, family, socket.SOCK_STREAM, socket.SOL_TCP, 0)
+ if len(tuples) > 1:
+ raise OSError("sockaddr resolved to multiple addresses")
+ addr = tuples[0][4][0]
+ if flags & socket.NI_DGRAM:
+ pname = "udp"
+ else:
+ pname = "tcp"
+ assert isinstance(addr, str)
+ qname = dns.reversename.from_address(addr)
+ if flags & socket.NI_NUMERICHOST == 0:
+ try:
+ assert _resolver is not None
+ answer = _resolver.resolve(qname, "PTR")
+ assert answer.rrset is not None
+ rdata = cast(dns.rdtypes.ANY.PTR.PTR, answer.rrset[0])
+ hostname = rdata.target.to_text(True)
+ except (NXDOMAIN, NoAnswer):
+ if flags & socket.NI_NAMEREQD:
+ raise socket.gaierror(socket.EAI_NONAME, "Name or service not known")
+ hostname = addr
+ if scope is not None:
+ hostname += "%" + str(scope)
+ else:
+ hostname = addr
+ if scope is not None:
+ hostname += "%" + str(scope)
+ if flags & socket.NI_NUMERICSERV:
+ service = str(port)
+ else:
+ service = socket.getservbyport(port, pname)
+ return (hostname, service)
+
+
+def _getfqdn(name=None):
+ if name is None:
+ name = socket.gethostname()
+ try:
+ (name, _, _) = _gethostbyaddr(name)
+ # Python's version checks aliases too, but our gethostbyname
+ # ignores them, so we do so here as well.
+ except Exception: # pragma: no cover
+ pass
+ return name
+
+
+def _gethostbyname(name):
+ return _gethostbyname_ex(name)[2][0]
+
+
+def _gethostbyname_ex(name):
+ aliases = []
+ addresses = []
+ tuples = _getaddrinfo(
+ name, 0, socket.AF_INET, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME
+ )
+ canonical = tuples[0][3]
+ for item in tuples:
+ addresses.append(item[4][0])
+ # XXX we just ignore aliases
+ return (canonical, aliases, addresses)
+
+
+def _gethostbyaddr(ip):
+ try:
+ dns.ipv6.inet_aton(ip)
+ sockaddr = (ip, 80, 0, 0)
+ family = socket.AF_INET6
+ except Exception:
+ try:
+ dns.ipv4.inet_aton(ip)
+ except Exception:
+ raise socket.gaierror(socket.EAI_NONAME, "Name or service not known")
+ sockaddr = (ip, 80)
+ family = socket.AF_INET
+ (name, _) = _getnameinfo(sockaddr, socket.NI_NAMEREQD)
+ aliases = []
+ addresses = []
+ tuples = _getaddrinfo(
+ name, 0, family, socket.SOCK_STREAM, socket.SOL_TCP, socket.AI_CANONNAME
+ )
+ canonical = tuples[0][3]
+ # We only want to include an address from the tuples if it's the
+ # same as the one we asked about. We do this comparison in binary
+ # to avoid any differences in text representations.
+ bin_ip = dns.inet.inet_pton(family, ip)
+ for item in tuples:
+ addr = item[4][0]
+ assert isinstance(addr, str)
+ bin_addr = dns.inet.inet_pton(family, addr)
+ if bin_ip == bin_addr:
+ addresses.append(addr)
+ # XXX we just ignore aliases
+ return (canonical, aliases, addresses)
+
+
+def override_system_resolver(resolver: Resolver | None = None) -> None:
+ """Override the system resolver routines in the socket module with
+ versions which use dnspython's resolver.
+
+ This can be useful in testing situations where you want to control
+ the resolution behavior of python code without having to change
+ the system's resolver settings (e.g. /etc/resolv.conf).
+
+ The resolver to use may be specified; if it's not, the default
+ resolver will be used.
+
+ resolver, a ``dns.resolver.Resolver`` or ``None``, the resolver to use.
+ """
+
+ if resolver is None:
+ resolver = get_default_resolver()
+ global _resolver
+ _resolver = resolver
+ socket.getaddrinfo = _getaddrinfo
+ socket.getnameinfo = _getnameinfo
+ socket.getfqdn = _getfqdn
+ socket.gethostbyname = _gethostbyname
+ socket.gethostbyname_ex = _gethostbyname_ex
+ socket.gethostbyaddr = _gethostbyaddr
+
+
+def restore_system_resolver() -> None:
+ """Undo the effects of prior override_system_resolver()."""
+
+ global _resolver
+ _resolver = None
+ socket.getaddrinfo = _original_getaddrinfo
+ socket.getnameinfo = _original_getnameinfo
+ socket.getfqdn = _original_getfqdn
+ socket.gethostbyname = _original_gethostbyname
+ socket.gethostbyname_ex = _original_gethostbyname_ex
+ socket.gethostbyaddr = _original_gethostbyaddr
diff --git a/tapdown/lib/python3.11/site-packages/dns/reversename.py b/tapdown/lib/python3.11/site-packages/dns/reversename.py
new file mode 100644
index 0000000..60a4e83
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/reversename.py
@@ -0,0 +1,106 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2006-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Reverse Map Names."""
+
+import binascii
+
+import dns.exception
+import dns.ipv4
+import dns.ipv6
+import dns.name
+
+ipv4_reverse_domain = dns.name.from_text("in-addr.arpa.")
+ipv6_reverse_domain = dns.name.from_text("ip6.arpa.")
+
+
+def from_address(
+ text: str,
+ v4_origin: dns.name.Name = ipv4_reverse_domain,
+ v6_origin: dns.name.Name = ipv6_reverse_domain,
+) -> dns.name.Name:
+ """Convert an IPv4 or IPv6 address in textual form into a Name object whose
+ value is the reverse-map domain name of the address.
+
+ *text*, a ``str``, is an IPv4 or IPv6 address in textual form
+ (e.g. '127.0.0.1', '::1')
+
+ *v4_origin*, a ``dns.name.Name`` to append to the labels corresponding to
+ the address if the address is an IPv4 address, instead of the default
+ (in-addr.arpa.)
+
+ *v6_origin*, a ``dns.name.Name`` to append to the labels corresponding to
+ the address if the address is an IPv6 address, instead of the default
+ (ip6.arpa.)
+
+ Raises ``dns.exception.SyntaxError`` if the address is badly formed.
+
+ Returns a ``dns.name.Name``.
+ """
+
+ try:
+ v6 = dns.ipv6.inet_aton(text)
+ if dns.ipv6.is_mapped(v6):
+ parts = [str(byte) for byte in v6[12:]]
+ origin = v4_origin
+ else:
+ parts = [x for x in str(binascii.hexlify(v6).decode())]
+ origin = v6_origin
+ except Exception:
+ parts = [str(byte) for byte in dns.ipv4.inet_aton(text)]
+ origin = v4_origin
+ return dns.name.from_text(".".join(reversed(parts)), origin=origin)
+
+
+def to_address(
+ name: dns.name.Name,
+ v4_origin: dns.name.Name = ipv4_reverse_domain,
+ v6_origin: dns.name.Name = ipv6_reverse_domain,
+) -> str:
+ """Convert a reverse map domain name into textual address form.
+
+ *name*, a ``dns.name.Name``, an IPv4 or IPv6 address in reverse-map name
+ form.
+
+ *v4_origin*, a ``dns.name.Name`` representing the top-level domain for
+ IPv4 addresses, instead of the default (in-addr.arpa.)
+
+ *v6_origin*, a ``dns.name.Name`` representing the top-level domain for
+ IPv4 addresses, instead of the default (ip6.arpa.)
+
+ Raises ``dns.exception.SyntaxError`` if the name does not have a
+ reverse-map form.
+
+ Returns a ``str``.
+ """
+
+ if name.is_subdomain(v4_origin):
+ name = name.relativize(v4_origin)
+ text = b".".join(reversed(name.labels))
+ # run through inet_ntoa() to check syntax and make pretty.
+ return dns.ipv4.inet_ntoa(dns.ipv4.inet_aton(text))
+ elif name.is_subdomain(v6_origin):
+ name = name.relativize(v6_origin)
+ labels = list(reversed(name.labels))
+ parts = []
+ for i in range(0, len(labels), 4):
+ parts.append(b"".join(labels[i : i + 4]))
+ text = b":".join(parts)
+ # run through inet_ntoa() to check syntax and make pretty.
+ return dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(text))
+ else:
+ raise dns.exception.SyntaxError("unknown reverse-map address family")
diff --git a/tapdown/lib/python3.11/site-packages/dns/rrset.py b/tapdown/lib/python3.11/site-packages/dns/rrset.py
new file mode 100644
index 0000000..271ddbe
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/rrset.py
@@ -0,0 +1,287 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS RRsets (an RRset is a named rdataset)"""
+
+from typing import Any, Collection, Dict, cast
+
+import dns.name
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.renderer
+
+
+class RRset(dns.rdataset.Rdataset):
+ """A DNS RRset (named rdataset).
+
+ RRset inherits from Rdataset, and RRsets can be treated as
+ Rdatasets in most cases. There are, however, a few notable
+ exceptions. RRsets have different to_wire() and to_text() method
+ arguments, reflecting the fact that RRsets always have an owner
+ name.
+ """
+
+ __slots__ = ["name", "deleting"]
+
+ def __init__(
+ self,
+ name: dns.name.Name,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ deleting: dns.rdataclass.RdataClass | None = None,
+ ):
+ """Create a new RRset."""
+
+ super().__init__(rdclass, rdtype, covers)
+ self.name = name
+ self.deleting = deleting
+
+ def _clone(self):
+ obj = cast(RRset, super()._clone())
+ obj.name = self.name
+ obj.deleting = self.deleting
+ return obj
+
+ def __repr__(self):
+ if self.covers == 0:
+ ctext = ""
+ else:
+ ctext = "(" + dns.rdatatype.to_text(self.covers) + ")"
+ if self.deleting is not None:
+ dtext = " delete=" + dns.rdataclass.to_text(self.deleting)
+ else:
+ dtext = ""
+ return (
+ ""
+ )
+
+ def __str__(self):
+ return self.to_text()
+
+ def __eq__(self, other):
+ if isinstance(other, RRset):
+ if self.name != other.name:
+ return False
+ elif not isinstance(other, dns.rdataset.Rdataset):
+ return False
+ return super().__eq__(other)
+
+ def match(self, *args: Any, **kwargs: Any) -> bool: # type: ignore[override]
+ """Does this rrset match the specified attributes?
+
+ Behaves as :py:func:`full_match()` if the first argument is a
+ ``dns.name.Name``, and as :py:func:`dns.rdataset.Rdataset.match()`
+ otherwise.
+
+ (This behavior fixes a design mistake where the signature of this
+ method became incompatible with that of its superclass. The fix
+ makes RRsets matchable as Rdatasets while preserving backwards
+ compatibility.)
+ """
+ if isinstance(args[0], dns.name.Name):
+ return self.full_match(*args, **kwargs) # type: ignore[arg-type]
+ else:
+ return super().match(*args, **kwargs) # type: ignore[arg-type]
+
+ def full_match(
+ self,
+ name: dns.name.Name,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType,
+ deleting: dns.rdataclass.RdataClass | None = None,
+ ) -> bool:
+ """Returns ``True`` if this rrset matches the specified name, class,
+ type, covers, and deletion state.
+ """
+ if not super().match(rdclass, rdtype, covers):
+ return False
+ if self.name != name or self.deleting != deleting:
+ return False
+ return True
+
+ # pylint: disable=arguments-differ
+
+ def to_text( # type: ignore[override]
+ self,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ **kw: Dict[str, Any],
+ ) -> str:
+ """Convert the RRset into DNS zone file format.
+
+ See ``dns.name.Name.choose_relativity`` for more information
+ on how *origin* and *relativize* determine the way names
+ are emitted.
+
+ Any additional keyword arguments are passed on to the rdata
+ ``to_text()`` method.
+
+ *origin*, a ``dns.name.Name`` or ``None``, the origin for relative
+ names.
+
+ *relativize*, a ``bool``. If ``True``, names will be relativized
+ to *origin*.
+ """
+
+ return super().to_text(
+ self.name, origin, relativize, self.deleting, **kw # type: ignore
+ )
+
+ def to_wire( # type: ignore[override]
+ self,
+ file: Any,
+ compress: dns.name.CompressType | None = None, # type: ignore
+ origin: dns.name.Name | None = None,
+ **kw: Dict[str, Any],
+ ) -> int:
+ """Convert the RRset to wire format.
+
+ All keyword arguments are passed to ``dns.rdataset.to_wire()``; see
+ that function for details.
+
+ Returns an ``int``, the number of records emitted.
+ """
+
+ return super().to_wire(
+ self.name, file, compress, origin, self.deleting, **kw # type:ignore
+ )
+
+ # pylint: enable=arguments-differ
+
+ def to_rdataset(self) -> dns.rdataset.Rdataset:
+ """Convert an RRset into an Rdataset.
+
+ Returns a ``dns.rdataset.Rdataset``.
+ """
+ return dns.rdataset.from_rdata_list(self.ttl, list(self))
+
+
+def from_text_list(
+ name: dns.name.Name | str,
+ ttl: int,
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ text_rdatas: Collection[str],
+ idna_codec: dns.name.IDNACodec | None = None,
+ origin: dns.name.Name | None = None,
+ relativize: bool = True,
+ relativize_to: dns.name.Name | None = None,
+) -> RRset:
+ """Create an RRset with the specified name, TTL, class, and type, and with
+ the specified list of rdatas in text format.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder to use; if ``None``, the default IDNA 2003
+ encoder/decoder is used.
+
+ *origin*, a ``dns.name.Name`` (or ``None``), the
+ origin to use for relative names.
+
+ *relativize*, a ``bool``. If true, name will be relativized.
+
+ *relativize_to*, a ``dns.name.Name`` (or ``None``), the origin to use
+ when relativizing names. If not set, the *origin* value will be used.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None, idna_codec=idna_codec)
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ r = RRset(name, rdclass, rdtype)
+ r.update_ttl(ttl)
+ for t in text_rdatas:
+ rd = dns.rdata.from_text(
+ r.rdclass, r.rdtype, t, origin, relativize, relativize_to, idna_codec
+ )
+ r.add(rd)
+ return r
+
+
+def from_text(
+ name: dns.name.Name | str,
+ ttl: int,
+ rdclass: dns.rdataclass.RdataClass | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ *text_rdatas: Any,
+) -> RRset:
+ """Create an RRset with the specified name, TTL, class, and type and with
+ the specified rdatas in text format.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ return from_text_list(
+ name, ttl, rdclass, rdtype, cast(Collection[str], text_rdatas)
+ )
+
+
+def from_rdata_list(
+ name: dns.name.Name | str,
+ ttl: int,
+ rdatas: Collection[dns.rdata.Rdata],
+ idna_codec: dns.name.IDNACodec | None = None,
+) -> RRset:
+ """Create an RRset with the specified name and TTL, and with
+ the specified list of rdata objects.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder to use; if ``None``, the default IDNA 2003
+ encoder/decoder is used.
+
+ Returns a ``dns.rrset.RRset`` object.
+
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None, idna_codec=idna_codec)
+
+ if len(rdatas) == 0:
+ raise ValueError("rdata list must not be empty")
+ r = None
+ for rd in rdatas:
+ if r is None:
+ r = RRset(name, rd.rdclass, rd.rdtype)
+ r.update_ttl(ttl)
+ r.add(rd)
+ assert r is not None
+ return r
+
+
+def from_rdata(name: dns.name.Name | str, ttl: int, *rdatas: Any) -> RRset:
+ """Create an RRset with the specified name and TTL, and with
+ the specified rdata objects.
+
+ Returns a ``dns.rrset.RRset`` object.
+ """
+
+ return from_rdata_list(name, ttl, cast(Collection[dns.rdata.Rdata], rdatas))
diff --git a/tapdown/lib/python3.11/site-packages/dns/serial.py b/tapdown/lib/python3.11/site-packages/dns/serial.py
new file mode 100644
index 0000000..3417299
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/serial.py
@@ -0,0 +1,118 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+"""Serial Number Arthimetic from RFC 1982"""
+
+
+class Serial:
+ def __init__(self, value: int, bits: int = 32):
+ self.value = value % 2**bits
+ self.bits = bits
+
+ def __repr__(self):
+ return f"dns.serial.Serial({self.value}, {self.bits})"
+
+ def __eq__(self, other):
+ if isinstance(other, int):
+ other = Serial(other, self.bits)
+ elif not isinstance(other, Serial) or other.bits != self.bits:
+ return NotImplemented
+ return self.value == other.value
+
+ def __ne__(self, other):
+ if isinstance(other, int):
+ other = Serial(other, self.bits)
+ elif not isinstance(other, Serial) or other.bits != self.bits:
+ return NotImplemented
+ return self.value != other.value
+
+ def __lt__(self, other):
+ if isinstance(other, int):
+ other = Serial(other, self.bits)
+ elif not isinstance(other, Serial) or other.bits != self.bits:
+ return NotImplemented
+ if self.value < other.value and other.value - self.value < 2 ** (self.bits - 1):
+ return True
+ elif self.value > other.value and self.value - other.value > 2 ** (
+ self.bits - 1
+ ):
+ return True
+ else:
+ return False
+
+ def __le__(self, other):
+ return self == other or self < other
+
+ def __gt__(self, other):
+ if isinstance(other, int):
+ other = Serial(other, self.bits)
+ elif not isinstance(other, Serial) or other.bits != self.bits:
+ return NotImplemented
+ if self.value < other.value and other.value - self.value > 2 ** (self.bits - 1):
+ return True
+ elif self.value > other.value and self.value - other.value < 2 ** (
+ self.bits - 1
+ ):
+ return True
+ else:
+ return False
+
+ def __ge__(self, other):
+ return self == other or self > other
+
+ def __add__(self, other):
+ v = self.value
+ if isinstance(other, Serial):
+ delta = other.value
+ elif isinstance(other, int):
+ delta = other
+ else:
+ raise ValueError
+ if abs(delta) > (2 ** (self.bits - 1) - 1):
+ raise ValueError
+ v += delta
+ v = v % 2**self.bits
+ return Serial(v, self.bits)
+
+ def __iadd__(self, other):
+ v = self.value
+ if isinstance(other, Serial):
+ delta = other.value
+ elif isinstance(other, int):
+ delta = other
+ else:
+ raise ValueError
+ if abs(delta) > (2 ** (self.bits - 1) - 1):
+ raise ValueError
+ v += delta
+ v = v % 2**self.bits
+ self.value = v
+ return self
+
+ def __sub__(self, other):
+ v = self.value
+ if isinstance(other, Serial):
+ delta = other.value
+ elif isinstance(other, int):
+ delta = other
+ else:
+ raise ValueError
+ if abs(delta) > (2 ** (self.bits - 1) - 1):
+ raise ValueError
+ v -= delta
+ v = v % 2**self.bits
+ return Serial(v, self.bits)
+
+ def __isub__(self, other):
+ v = self.value
+ if isinstance(other, Serial):
+ delta = other.value
+ elif isinstance(other, int):
+ delta = other
+ else:
+ raise ValueError
+ if abs(delta) > (2 ** (self.bits - 1) - 1):
+ raise ValueError
+ v -= delta
+ v = v % 2**self.bits
+ self.value = v
+ return self
diff --git a/tapdown/lib/python3.11/site-packages/dns/set.py b/tapdown/lib/python3.11/site-packages/dns/set.py
new file mode 100644
index 0000000..ae8f0dd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/set.py
@@ -0,0 +1,308 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import itertools
+
+
+class Set:
+ """A simple set class.
+
+ This class was originally used to deal with python not having a set class, and
+ originally the class used lists in its implementation. The ordered and indexable
+ nature of RRsets and Rdatasets is unfortunately widely used in dnspython
+ applications, so for backwards compatibility sets continue to be a custom class, now
+ based on an ordered dictionary.
+ """
+
+ __slots__ = ["items"]
+
+ def __init__(self, items=None):
+ """Initialize the set.
+
+ *items*, an iterable or ``None``, the initial set of items.
+ """
+
+ self.items = dict()
+ if items is not None:
+ for item in items:
+ # This is safe for how we use set, but if other code
+ # subclasses it could be a legitimate issue.
+ self.add(item) # lgtm[py/init-calls-subclass]
+
+ def __repr__(self):
+ return f"dns.set.Set({repr(list(self.items.keys()))})" # pragma: no cover
+
+ def add(self, item):
+ """Add an item to the set."""
+
+ if item not in self.items:
+ self.items[item] = None
+
+ def remove(self, item):
+ """Remove an item from the set."""
+
+ try:
+ del self.items[item]
+ except KeyError:
+ raise ValueError
+
+ def discard(self, item):
+ """Remove an item from the set if present."""
+
+ self.items.pop(item, None)
+
+ def pop(self):
+ """Remove an arbitrary item from the set."""
+ (k, _) = self.items.popitem()
+ return k
+
+ def _clone(self) -> "Set":
+ """Make a (shallow) copy of the set.
+
+ There is a 'clone protocol' that subclasses of this class
+ should use. To make a copy, first call your super's _clone()
+ method, and use the object returned as the new instance. Then
+ make shallow copies of the attributes defined in the subclass.
+
+ This protocol allows us to write the set algorithms that
+ return new instances (e.g. union) once, and keep using them in
+ subclasses.
+ """
+
+ if hasattr(self, "_clone_class"):
+ cls = self._clone_class # type: ignore
+ else:
+ cls = self.__class__
+ obj = cls.__new__(cls)
+ obj.items = dict()
+ obj.items.update(self.items)
+ return obj
+
+ def __copy__(self):
+ """Make a (shallow) copy of the set."""
+
+ return self._clone()
+
+ def copy(self):
+ """Make a (shallow) copy of the set."""
+
+ return self._clone()
+
+ def union_update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ if self is other: # lgtm[py/comparison-using-is]
+ return
+ for item in other.items:
+ self.add(item)
+
+ def intersection_update(self, other):
+ """Update the set, removing any elements from other which are not
+ in both sets.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ if self is other: # lgtm[py/comparison-using-is]
+ return
+ # we make a copy of the list so that we can remove items from
+ # the list without breaking the iterator.
+ for item in list(self.items):
+ if item not in other.items:
+ del self.items[item]
+
+ def difference_update(self, other):
+ """Update the set, removing any elements from other which are in
+ the set.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ if self is other: # lgtm[py/comparison-using-is]
+ self.items.clear()
+ else:
+ for item in other.items:
+ self.discard(item)
+
+ def symmetric_difference_update(self, other):
+ """Update the set, retaining only elements unique to both sets."""
+
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ if self is other: # lgtm[py/comparison-using-is]
+ self.items.clear()
+ else:
+ overlap = self.intersection(other)
+ self.union_update(other)
+ self.difference_update(overlap)
+
+ def union(self, other):
+ """Return a new set which is the union of ``self`` and ``other``.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.union_update(other)
+ return obj
+
+ def intersection(self, other):
+ """Return a new set which is the intersection of ``self`` and
+ ``other``.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.intersection_update(other)
+ return obj
+
+ def difference(self, other):
+ """Return a new set which ``self`` - ``other``, i.e. the items
+ in ``self`` which are not also in ``other``.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.difference_update(other)
+ return obj
+
+ def symmetric_difference(self, other):
+ """Return a new set which (``self`` - ``other``) | (``other``
+ - ``self), ie: the items in either ``self`` or ``other`` which
+ are not contained in their intersection.
+
+ Returns the same Set type as this set.
+ """
+
+ obj = self._clone()
+ obj.symmetric_difference_update(other)
+ return obj
+
+ def __or__(self, other):
+ return self.union(other)
+
+ def __and__(self, other):
+ return self.intersection(other)
+
+ def __add__(self, other):
+ return self.union(other)
+
+ def __sub__(self, other):
+ return self.difference(other)
+
+ def __xor__(self, other):
+ return self.symmetric_difference(other)
+
+ def __ior__(self, other):
+ self.union_update(other)
+ return self
+
+ def __iand__(self, other):
+ self.intersection_update(other)
+ return self
+
+ def __iadd__(self, other):
+ self.union_update(other)
+ return self
+
+ def __isub__(self, other):
+ self.difference_update(other)
+ return self
+
+ def __ixor__(self, other):
+ self.symmetric_difference_update(other)
+ return self
+
+ def update(self, other):
+ """Update the set, adding any elements from other which are not
+ already in the set.
+
+ *other*, the collection of items with which to update the set, which
+ may be any iterable type.
+ """
+
+ for item in other:
+ self.add(item)
+
+ def clear(self):
+ """Make the set empty."""
+ self.items.clear()
+
+ def __eq__(self, other):
+ return self.items == other.items
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __len__(self):
+ return len(self.items)
+
+ def __iter__(self):
+ return iter(self.items)
+
+ def __getitem__(self, i):
+ if isinstance(i, slice):
+ return list(itertools.islice(self.items, i.start, i.stop, i.step))
+ else:
+ return next(itertools.islice(self.items, i, i + 1))
+
+ def __delitem__(self, i):
+ if isinstance(i, slice):
+ for elt in list(self[i]):
+ del self.items[elt]
+ else:
+ del self.items[self[i]]
+
+ def issubset(self, other):
+ """Is this set a subset of *other*?
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ for item in self.items:
+ if item not in other.items:
+ return False
+ return True
+
+ def issuperset(self, other):
+ """Is this set a superset of *other*?
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ for item in other.items:
+ if item not in self.items:
+ return False
+ return True
+
+ def isdisjoint(self, other):
+ if not isinstance(other, Set):
+ raise ValueError("other must be a Set instance")
+ for item in other.items:
+ if item in self.items:
+ return False
+ return True
diff --git a/tapdown/lib/python3.11/site-packages/dns/tokenizer.py b/tapdown/lib/python3.11/site-packages/dns/tokenizer.py
new file mode 100644
index 0000000..86ae3e2
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/tokenizer.py
@@ -0,0 +1,706 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""Tokenize DNS zone file format"""
+
+import io
+import sys
+from typing import Any, List, Tuple
+
+import dns.exception
+import dns.name
+import dns.ttl
+
+_DELIMITERS = {" ", "\t", "\n", ";", "(", ")", '"'}
+_QUOTING_DELIMITERS = {'"'}
+
+EOF = 0
+EOL = 1
+WHITESPACE = 2
+IDENTIFIER = 3
+QUOTED_STRING = 4
+COMMENT = 5
+DELIMITER = 6
+
+
+class UngetBufferFull(dns.exception.DNSException):
+ """An attempt was made to unget a token when the unget buffer was full."""
+
+
+class Token:
+ """A DNS zone file format token.
+
+ ttype: The token type
+ value: The token value
+ has_escape: Does the token value contain escapes?
+ """
+
+ def __init__(
+ self,
+ ttype: int,
+ value: Any = "",
+ has_escape: bool = False,
+ comment: str | None = None,
+ ):
+ """Initialize a token instance."""
+
+ self.ttype = ttype
+ self.value = value
+ self.has_escape = has_escape
+ self.comment = comment
+
+ def is_eof(self) -> bool:
+ return self.ttype == EOF
+
+ def is_eol(self) -> bool:
+ return self.ttype == EOL
+
+ def is_whitespace(self) -> bool:
+ return self.ttype == WHITESPACE
+
+ def is_identifier(self) -> bool:
+ return self.ttype == IDENTIFIER
+
+ def is_quoted_string(self) -> bool:
+ return self.ttype == QUOTED_STRING
+
+ def is_comment(self) -> bool:
+ return self.ttype == COMMENT
+
+ def is_delimiter(self) -> bool: # pragma: no cover (we don't return delimiters yet)
+ return self.ttype == DELIMITER
+
+ def is_eol_or_eof(self) -> bool:
+ return self.ttype == EOL or self.ttype == EOF
+
+ def __eq__(self, other):
+ if not isinstance(other, Token):
+ return False
+ return self.ttype == other.ttype and self.value == other.value
+
+ def __ne__(self, other):
+ if not isinstance(other, Token):
+ return True
+ return self.ttype != other.ttype or self.value != other.value
+
+ def __str__(self):
+ return f'{self.ttype} "{self.value}"'
+
+ def unescape(self) -> "Token":
+ if not self.has_escape:
+ return self
+ unescaped = ""
+ l = len(self.value)
+ i = 0
+ while i < l:
+ c = self.value[i]
+ i += 1
+ if c == "\\":
+ if i >= l: # pragma: no cover (can't happen via get())
+ raise dns.exception.UnexpectedEnd
+ c = self.value[i]
+ i += 1
+ if c.isdigit():
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c2 = self.value[i]
+ i += 1
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c3 = self.value[i]
+ i += 1
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
+ if codepoint > 255:
+ raise dns.exception.SyntaxError
+ c = chr(codepoint)
+ unescaped += c
+ return Token(self.ttype, unescaped)
+
+ def unescape_to_bytes(self) -> "Token":
+ # We used to use unescape() for TXT-like records, but this
+ # caused problems as we'd process DNS escapes into Unicode code
+ # points instead of byte values, and then a to_text() of the
+ # processed data would not equal the original input. For
+ # example, \226 in the TXT record would have a to_text() of
+ # \195\162 because we applied UTF-8 encoding to Unicode code
+ # point 226.
+ #
+ # We now apply escapes while converting directly to bytes,
+ # avoiding this double encoding.
+ #
+ # This code also handles cases where the unicode input has
+ # non-ASCII code-points in it by converting it to UTF-8. TXT
+ # records aren't defined for Unicode, but this is the best we
+ # can do to preserve meaning. For example,
+ #
+ # foo\u200bbar
+ #
+ # (where \u200b is Unicode code point 0x200b) will be treated
+ # as if the input had been the UTF-8 encoding of that string,
+ # namely:
+ #
+ # foo\226\128\139bar
+ #
+ unescaped = b""
+ l = len(self.value)
+ i = 0
+ while i < l:
+ c = self.value[i]
+ i += 1
+ if c == "\\":
+ if i >= l: # pragma: no cover (can't happen via get())
+ raise dns.exception.UnexpectedEnd
+ c = self.value[i]
+ i += 1
+ if c.isdigit():
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c2 = self.value[i]
+ i += 1
+ if i >= l:
+ raise dns.exception.UnexpectedEnd
+ c3 = self.value[i]
+ i += 1
+ if not (c2.isdigit() and c3.isdigit()):
+ raise dns.exception.SyntaxError
+ codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
+ if codepoint > 255:
+ raise dns.exception.SyntaxError
+ unescaped += b"%c" % (codepoint)
+ else:
+ # Note that as mentioned above, if c is a Unicode
+ # code point outside of the ASCII range, then this
+ # += is converting that code point to its UTF-8
+ # encoding and appending multiple bytes to
+ # unescaped.
+ unescaped += c.encode()
+ else:
+ unescaped += c.encode()
+ return Token(self.ttype, bytes(unescaped))
+
+
+class Tokenizer:
+ """A DNS zone file format tokenizer.
+
+ A token object is basically a (type, value) tuple. The valid
+ types are EOF, EOL, WHITESPACE, IDENTIFIER, QUOTED_STRING,
+ COMMENT, and DELIMITER.
+
+ file: The file to tokenize
+
+ ungotten_char: The most recently ungotten character, or None.
+
+ ungotten_token: The most recently ungotten token, or None.
+
+ multiline: The current multiline level. This value is increased
+ by one every time a '(' delimiter is read, and decreased by one every time
+ a ')' delimiter is read.
+
+ quoting: This variable is true if the tokenizer is currently
+ reading a quoted string.
+
+ eof: This variable is true if the tokenizer has encountered EOF.
+
+ delimiters: The current delimiter dictionary.
+
+ line_number: The current line number
+
+ filename: A filename that will be returned by the where() method.
+
+ idna_codec: A dns.name.IDNACodec, specifies the IDNA
+ encoder/decoder. If None, the default IDNA 2003
+ encoder/decoder is used.
+ """
+
+ def __init__(
+ self,
+ f: Any = sys.stdin,
+ filename: str | None = None,
+ idna_codec: dns.name.IDNACodec | None = None,
+ ):
+ """Initialize a tokenizer instance.
+
+ f: The file to tokenize. The default is sys.stdin.
+ This parameter may also be a string, in which case the tokenizer
+ will take its input from the contents of the string.
+
+ filename: the name of the filename that the where() method
+ will return.
+
+ idna_codec: A dns.name.IDNACodec, specifies the IDNA
+ encoder/decoder. If None, the default IDNA 2003
+ encoder/decoder is used.
+ """
+
+ if isinstance(f, str):
+ f = io.StringIO(f)
+ if filename is None:
+ filename = ""
+ elif isinstance(f, bytes):
+ f = io.StringIO(f.decode())
+ if filename is None:
+ filename = ""
+ else:
+ if filename is None:
+ if f is sys.stdin:
+ filename = ""
+ else:
+ filename = ""
+ self.file = f
+ self.ungotten_char: str | None = None
+ self.ungotten_token: Token | None = None
+ self.multiline = 0
+ self.quoting = False
+ self.eof = False
+ self.delimiters = _DELIMITERS
+ self.line_number = 1
+ assert filename is not None
+ self.filename = filename
+ if idna_codec is None:
+ self.idna_codec: dns.name.IDNACodec = dns.name.IDNA_2003
+ else:
+ self.idna_codec = idna_codec
+
+ def _get_char(self) -> str:
+ """Read a character from input."""
+
+ if self.ungotten_char is None:
+ if self.eof:
+ c = ""
+ else:
+ c = self.file.read(1)
+ if c == "":
+ self.eof = True
+ elif c == "\n":
+ self.line_number += 1
+ else:
+ c = self.ungotten_char
+ self.ungotten_char = None
+ return c
+
+ def where(self) -> Tuple[str, int]:
+ """Return the current location in the input.
+
+ Returns a (string, int) tuple. The first item is the filename of
+ the input, the second is the current line number.
+ """
+
+ return (self.filename, self.line_number)
+
+ def _unget_char(self, c: str) -> None:
+ """Unget a character.
+
+ The unget buffer for characters is only one character large; it is
+ an error to try to unget a character when the unget buffer is not
+ empty.
+
+ c: the character to unget
+ raises UngetBufferFull: there is already an ungotten char
+ """
+
+ if self.ungotten_char is not None:
+ # this should never happen!
+ raise UngetBufferFull # pragma: no cover
+ self.ungotten_char = c
+
+ def skip_whitespace(self) -> int:
+ """Consume input until a non-whitespace character is encountered.
+
+ The non-whitespace character is then ungotten, and the number of
+ whitespace characters consumed is returned.
+
+ If the tokenizer is in multiline mode, then newlines are whitespace.
+
+ Returns the number of characters skipped.
+ """
+
+ skipped = 0
+ while True:
+ c = self._get_char()
+ if c != " " and c != "\t":
+ if (c != "\n") or not self.multiline:
+ self._unget_char(c)
+ return skipped
+ skipped += 1
+
+ def get(self, want_leading: bool = False, want_comment: bool = False) -> Token:
+ """Get the next token.
+
+ want_leading: If True, return a WHITESPACE token if the
+ first character read is whitespace. The default is False.
+
+ want_comment: If True, return a COMMENT token if the
+ first token read is a comment. The default is False.
+
+ Raises dns.exception.UnexpectedEnd: input ended prematurely
+
+ Raises dns.exception.SyntaxError: input was badly formed
+
+ Returns a Token.
+ """
+
+ if self.ungotten_token is not None:
+ utoken = self.ungotten_token
+ self.ungotten_token = None
+ if utoken.is_whitespace():
+ if want_leading:
+ return utoken
+ elif utoken.is_comment():
+ if want_comment:
+ return utoken
+ else:
+ return utoken
+ skipped = self.skip_whitespace()
+ if want_leading and skipped > 0:
+ return Token(WHITESPACE, " ")
+ token = ""
+ ttype = IDENTIFIER
+ has_escape = False
+ while True:
+ c = self._get_char()
+ if c == "" or c in self.delimiters:
+ if c == "" and self.quoting:
+ raise dns.exception.UnexpectedEnd
+ if token == "" and ttype != QUOTED_STRING:
+ if c == "(":
+ self.multiline += 1
+ self.skip_whitespace()
+ continue
+ elif c == ")":
+ if self.multiline <= 0:
+ raise dns.exception.SyntaxError
+ self.multiline -= 1
+ self.skip_whitespace()
+ continue
+ elif c == '"':
+ if not self.quoting:
+ self.quoting = True
+ self.delimiters = _QUOTING_DELIMITERS
+ ttype = QUOTED_STRING
+ continue
+ else:
+ self.quoting = False
+ self.delimiters = _DELIMITERS
+ self.skip_whitespace()
+ continue
+ elif c == "\n":
+ return Token(EOL, "\n")
+ elif c == ";":
+ while 1:
+ c = self._get_char()
+ if c == "\n" or c == "":
+ break
+ token += c
+ if want_comment:
+ self._unget_char(c)
+ return Token(COMMENT, token)
+ elif c == "":
+ if self.multiline:
+ raise dns.exception.SyntaxError(
+ "unbalanced parentheses"
+ )
+ return Token(EOF, comment=token)
+ elif self.multiline:
+ self.skip_whitespace()
+ token = ""
+ continue
+ else:
+ return Token(EOL, "\n", comment=token)
+ else:
+ # This code exists in case we ever want a
+ # delimiter to be returned. It never produces
+ # a token currently.
+ token = c
+ ttype = DELIMITER
+ else:
+ self._unget_char(c)
+ break
+ elif self.quoting and c == "\n":
+ raise dns.exception.SyntaxError("newline in quoted string")
+ elif c == "\\":
+ #
+ # It's an escape. Put it and the next character into
+ # the token; it will be checked later for goodness.
+ #
+ token += c
+ has_escape = True
+ c = self._get_char()
+ if c == "" or (c == "\n" and not self.quoting):
+ raise dns.exception.UnexpectedEnd
+ token += c
+ if token == "" and ttype != QUOTED_STRING:
+ if self.multiline:
+ raise dns.exception.SyntaxError("unbalanced parentheses")
+ ttype = EOF
+ return Token(ttype, token, has_escape)
+
+ def unget(self, token: Token) -> None:
+ """Unget a token.
+
+ The unget buffer for tokens is only one token large; it is
+ an error to try to unget a token when the unget buffer is not
+ empty.
+
+ token: the token to unget
+
+ Raises UngetBufferFull: there is already an ungotten token
+ """
+
+ if self.ungotten_token is not None:
+ raise UngetBufferFull
+ self.ungotten_token = token
+
+ def next(self):
+ """Return the next item in an iteration.
+
+ Returns a Token.
+ """
+
+ token = self.get()
+ if token.is_eof():
+ raise StopIteration
+ return token
+
+ __next__ = next
+
+ def __iter__(self):
+ return self
+
+ # Helpers
+
+ def get_int(self, base: int = 10) -> int:
+ """Read the next token and interpret it as an unsigned integer.
+
+ Raises dns.exception.SyntaxError if not an unsigned integer.
+
+ Returns an int.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("expecting an identifier")
+ if not token.value.isdigit():
+ raise dns.exception.SyntaxError("expecting an integer")
+ return int(token.value, base)
+
+ def get_uint8(self) -> int:
+ """Read the next token and interpret it as an 8-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not an 8-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ value = self.get_int()
+ if value < 0 or value > 255:
+ raise dns.exception.SyntaxError(f"{value} is not an unsigned 8-bit integer")
+ return value
+
+ def get_uint16(self, base: int = 10) -> int:
+ """Read the next token and interpret it as a 16-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not a 16-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ value = self.get_int(base=base)
+ if value < 0 or value > 65535:
+ if base == 8:
+ raise dns.exception.SyntaxError(
+ f"{value:o} is not an octal unsigned 16-bit integer"
+ )
+ else:
+ raise dns.exception.SyntaxError(
+ f"{value} is not an unsigned 16-bit integer"
+ )
+ return value
+
+ def get_uint32(self, base: int = 10) -> int:
+ """Read the next token and interpret it as a 32-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not a 32-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ value = self.get_int(base=base)
+ if value < 0 or value > 4294967295:
+ raise dns.exception.SyntaxError(
+ f"{value} is not an unsigned 32-bit integer"
+ )
+ return value
+
+ def get_uint48(self, base: int = 10) -> int:
+ """Read the next token and interpret it as a 48-bit unsigned
+ integer.
+
+ Raises dns.exception.SyntaxError if not a 48-bit unsigned integer.
+
+ Returns an int.
+ """
+
+ value = self.get_int(base=base)
+ if value < 0 or value > 281474976710655:
+ raise dns.exception.SyntaxError(
+ f"{value} is not an unsigned 48-bit integer"
+ )
+ return value
+
+ def get_string(self, max_length: int | None = None) -> str:
+ """Read the next token and interpret it as a string.
+
+ Raises dns.exception.SyntaxError if not a string.
+ Raises dns.exception.SyntaxError if token value length
+ exceeds max_length (if specified).
+
+ Returns a string.
+ """
+
+ token = self.get().unescape()
+ if not (token.is_identifier() or token.is_quoted_string()):
+ raise dns.exception.SyntaxError("expecting a string")
+ if max_length and len(token.value) > max_length:
+ raise dns.exception.SyntaxError("string too long")
+ return token.value
+
+ def get_identifier(self) -> str:
+ """Read the next token, which should be an identifier.
+
+ Raises dns.exception.SyntaxError if not an identifier.
+
+ Returns a string.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("expecting an identifier")
+ return token.value
+
+ def get_remaining(self, max_tokens: int | None = None) -> List[Token]:
+ """Return the remaining tokens on the line, until an EOL or EOF is seen.
+
+ max_tokens: If not None, stop after this number of tokens.
+
+ Returns a list of tokens.
+ """
+
+ tokens = []
+ while True:
+ token = self.get()
+ if token.is_eol_or_eof():
+ self.unget(token)
+ break
+ tokens.append(token)
+ if len(tokens) == max_tokens:
+ break
+ return tokens
+
+ def concatenate_remaining_identifiers(self, allow_empty: bool = False) -> str:
+ """Read the remaining tokens on the line, which should be identifiers.
+
+ Raises dns.exception.SyntaxError if there are no remaining tokens,
+ unless `allow_empty=True` is given.
+
+ Raises dns.exception.SyntaxError if a token is seen that is not an
+ identifier.
+
+ Returns a string containing a concatenation of the remaining
+ identifiers.
+ """
+ s = ""
+ while True:
+ token = self.get().unescape()
+ if token.is_eol_or_eof():
+ self.unget(token)
+ break
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ s += token.value
+ if not (allow_empty or s):
+ raise dns.exception.SyntaxError("expecting another identifier")
+ return s
+
+ def as_name(
+ self,
+ token: Token,
+ origin: dns.name.Name | None = None,
+ relativize: bool = False,
+ relativize_to: dns.name.Name | None = None,
+ ) -> dns.name.Name:
+ """Try to interpret the token as a DNS name.
+
+ Raises dns.exception.SyntaxError if not a name.
+
+ Returns a dns.name.Name.
+ """
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("expecting an identifier")
+ name = dns.name.from_text(token.value, origin, self.idna_codec)
+ return name.choose_relativity(relativize_to or origin, relativize)
+
+ def get_name(
+ self,
+ origin: dns.name.Name | None = None,
+ relativize: bool = False,
+ relativize_to: dns.name.Name | None = None,
+ ) -> dns.name.Name:
+ """Read the next token and interpret it as a DNS name.
+
+ Raises dns.exception.SyntaxError if not a name.
+
+ Returns a dns.name.Name.
+ """
+
+ token = self.get()
+ return self.as_name(token, origin, relativize, relativize_to)
+
+ def get_eol_as_token(self) -> Token:
+ """Read the next token and raise an exception if it isn't EOL or
+ EOF.
+
+ Returns a string.
+ """
+
+ token = self.get()
+ if not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError(
+ f'expected EOL or EOF, got {token.ttype} "{token.value}"'
+ )
+ return token
+
+ def get_eol(self) -> str:
+ return self.get_eol_as_token().value
+
+ def get_ttl(self) -> int:
+ """Read the next token and interpret it as a DNS TTL.
+
+ Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
+ identifier or badly formed.
+
+ Returns an int.
+ """
+
+ token = self.get().unescape()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("expecting an identifier")
+ return dns.ttl.from_text(token.value)
diff --git a/tapdown/lib/python3.11/site-packages/dns/transaction.py b/tapdown/lib/python3.11/site-packages/dns/transaction.py
new file mode 100644
index 0000000..9ecd737
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/transaction.py
@@ -0,0 +1,651 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import collections
+from typing import Any, Callable, Iterator, List, Tuple
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.rrset
+import dns.serial
+import dns.ttl
+
+
+class TransactionManager:
+ def reader(self) -> "Transaction":
+ """Begin a read-only transaction."""
+ raise NotImplementedError # pragma: no cover
+
+ def writer(self, replacement: bool = False) -> "Transaction":
+ """Begin a writable transaction.
+
+ *replacement*, a ``bool``. If `True`, the content of the
+ transaction completely replaces any prior content. If False,
+ the default, then the content of the transaction updates the
+ existing content.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def origin_information(
+ self,
+ ) -> Tuple[dns.name.Name | None, bool, dns.name.Name | None]:
+ """Returns a tuple
+
+ (absolute_origin, relativize, effective_origin)
+
+ giving the absolute name of the default origin for any
+ relative domain names, the "effective origin", and whether
+ names should be relativized. The "effective origin" is the
+ absolute origin if relativize is False, and the empty name if
+ relativize is true. (The effective origin is provided even
+ though it can be computed from the absolute_origin and
+ relativize setting because it avoids a lot of code
+ duplication.)
+
+ If the returned names are `None`, then no origin information is
+ available.
+
+ This information is used by code working with transactions to
+ allow it to coordinate relativization. The transaction code
+ itself takes what it gets (i.e. does not change name
+ relativity).
+
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def get_class(self) -> dns.rdataclass.RdataClass:
+ """The class of the transaction manager."""
+ raise NotImplementedError # pragma: no cover
+
+ def from_wire_origin(self) -> dns.name.Name | None:
+ """Origin to use in from_wire() calls."""
+ (absolute_origin, relativize, _) = self.origin_information()
+ if relativize:
+ return absolute_origin
+ else:
+ return None
+
+
+class DeleteNotExact(dns.exception.DNSException):
+ """Existing data did not match data specified by an exact delete."""
+
+
+class ReadOnly(dns.exception.DNSException):
+ """Tried to write to a read-only transaction."""
+
+
+class AlreadyEnded(dns.exception.DNSException):
+ """Tried to use an already-ended transaction."""
+
+
+def _ensure_immutable_rdataset(rdataset):
+ if rdataset is None or isinstance(rdataset, dns.rdataset.ImmutableRdataset):
+ return rdataset
+ return dns.rdataset.ImmutableRdataset(rdataset)
+
+
+def _ensure_immutable_node(node):
+ if node is None or node.is_immutable():
+ return node
+ return dns.node.ImmutableNode(node)
+
+
+CheckPutRdatasetType = Callable[
+ ["Transaction", dns.name.Name, dns.rdataset.Rdataset], None
+]
+CheckDeleteRdatasetType = Callable[
+ ["Transaction", dns.name.Name, dns.rdatatype.RdataType, dns.rdatatype.RdataType],
+ None,
+]
+CheckDeleteNameType = Callable[["Transaction", dns.name.Name], None]
+
+
+class Transaction:
+ def __init__(
+ self,
+ manager: TransactionManager,
+ replacement: bool = False,
+ read_only: bool = False,
+ ):
+ self.manager = manager
+ self.replacement = replacement
+ self.read_only = read_only
+ self._ended = False
+ self._check_put_rdataset: List[CheckPutRdatasetType] = []
+ self._check_delete_rdataset: List[CheckDeleteRdatasetType] = []
+ self._check_delete_name: List[CheckDeleteNameType] = []
+
+ #
+ # This is the high level API
+ #
+ # Note that we currently use non-immutable types in the return type signature to
+ # avoid covariance problems, e.g. if the caller has a List[Rdataset], mypy will be
+ # unhappy if we return an ImmutableRdataset.
+
+ def get(
+ self,
+ name: dns.name.Name | str | None,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> dns.rdataset.Rdataset:
+ """Return the rdataset associated with *name*, *rdtype*, and *covers*,
+ or `None` if not found.
+
+ Note that the returned rdataset is immutable.
+ """
+ self._check_ended()
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ covers = dns.rdatatype.RdataType.make(covers)
+ rdataset = self._get_rdataset(name, rdtype, covers)
+ return _ensure_immutable_rdataset(rdataset)
+
+ def get_node(self, name: dns.name.Name) -> dns.node.Node | None:
+ """Return the node at *name*, if any.
+
+ Returns an immutable node or ``None``.
+ """
+ return _ensure_immutable_node(self._get_node(name))
+
+ def _check_read_only(self) -> None:
+ if self.read_only:
+ raise ReadOnly
+
+ def add(self, *args: Any) -> None:
+ """Add records.
+
+ The arguments may be:
+
+ - rrset
+
+ - name, rdataset...
+
+ - name, ttl, rdata...
+ """
+ self._check_ended()
+ self._check_read_only()
+ self._add(False, args)
+
+ def replace(self, *args: Any) -> None:
+ """Replace the existing rdataset at the name with the specified
+ rdataset, or add the specified rdataset if there was no existing
+ rdataset.
+
+ The arguments may be:
+
+ - rrset
+
+ - name, rdataset...
+
+ - name, ttl, rdata...
+
+ Note that if you want to replace the entire node, you should do
+ a delete of the name followed by one or more calls to add() or
+ replace().
+ """
+ self._check_ended()
+ self._check_read_only()
+ self._add(True, args)
+
+ def delete(self, *args: Any) -> None:
+ """Delete records.
+
+ It is not an error if some of the records are not in the existing
+ set.
+
+ The arguments may be:
+
+ - rrset
+
+ - name
+
+ - name, rdatatype, [covers]
+
+ - name, rdataset...
+
+ - name, rdata...
+ """
+ self._check_ended()
+ self._check_read_only()
+ self._delete(False, args)
+
+ def delete_exact(self, *args: Any) -> None:
+ """Delete records.
+
+ The arguments may be:
+
+ - rrset
+
+ - name
+
+ - name, rdatatype, [covers]
+
+ - name, rdataset...
+
+ - name, rdata...
+
+ Raises dns.transaction.DeleteNotExact if some of the records
+ are not in the existing set.
+
+ """
+ self._check_ended()
+ self._check_read_only()
+ self._delete(True, args)
+
+ def name_exists(self, name: dns.name.Name | str) -> bool:
+ """Does the specified name exist?"""
+ self._check_ended()
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ return self._name_exists(name)
+
+ def update_serial(
+ self,
+ value: int = 1,
+ relative: bool = True,
+ name: dns.name.Name = dns.name.empty,
+ ) -> None:
+ """Update the serial number.
+
+ *value*, an `int`, is an increment if *relative* is `True`, or the
+ actual value to set if *relative* is `False`.
+
+ Raises `KeyError` if there is no SOA rdataset at *name*.
+
+ Raises `ValueError` if *value* is negative or if the increment is
+ so large that it would cause the new serial to be less than the
+ prior value.
+ """
+ self._check_ended()
+ if value < 0:
+ raise ValueError("negative update_serial() value")
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ rdataset = self._get_rdataset(name, dns.rdatatype.SOA, dns.rdatatype.NONE)
+ if rdataset is None or len(rdataset) == 0:
+ raise KeyError
+ if relative:
+ serial = dns.serial.Serial(rdataset[0].serial) + value
+ else:
+ serial = dns.serial.Serial(value)
+ serial = serial.value # convert back to int
+ if serial == 0:
+ serial = 1
+ rdata = rdataset[0].replace(serial=serial)
+ new_rdataset = dns.rdataset.from_rdata(rdataset.ttl, rdata)
+ self.replace(name, new_rdataset)
+
+ def __iter__(self):
+ self._check_ended()
+ return self._iterate_rdatasets()
+
+ def changed(self) -> bool:
+ """Has this transaction changed anything?
+
+ For read-only transactions, the result is always `False`.
+
+ For writable transactions, the result is `True` if at some time
+ during the life of the transaction, the content was changed.
+ """
+ self._check_ended()
+ return self._changed()
+
+ def commit(self) -> None:
+ """Commit the transaction.
+
+ Normally transactions are used as context managers and commit
+ or rollback automatically, but it may be done explicitly if needed.
+ A ``dns.transaction.Ended`` exception will be raised if you try
+ to use a transaction after it has been committed or rolled back.
+
+ Raises an exception if the commit fails (in which case the transaction
+ is also rolled back.
+ """
+ self._end(True)
+
+ def rollback(self) -> None:
+ """Rollback the transaction.
+
+ Normally transactions are used as context managers and commit
+ or rollback automatically, but it may be done explicitly if needed.
+ A ``dns.transaction.AlreadyEnded`` exception will be raised if you try
+ to use a transaction after it has been committed or rolled back.
+
+ Rollback cannot otherwise fail.
+ """
+ self._end(False)
+
+ def check_put_rdataset(self, check: CheckPutRdatasetType) -> None:
+ """Call *check* before putting (storing) an rdataset.
+
+ The function is called with the transaction, the name, and the rdataset.
+
+ The check function may safely make non-mutating transaction method
+ calls, but behavior is undefined if mutating transaction methods are
+ called. The check function should raise an exception if it objects to
+ the put, and otherwise should return ``None``.
+ """
+ self._check_put_rdataset.append(check)
+
+ def check_delete_rdataset(self, check: CheckDeleteRdatasetType) -> None:
+ """Call *check* before deleting an rdataset.
+
+ The function is called with the transaction, the name, the rdatatype,
+ and the covered rdatatype.
+
+ The check function may safely make non-mutating transaction method
+ calls, but behavior is undefined if mutating transaction methods are
+ called. The check function should raise an exception if it objects to
+ the put, and otherwise should return ``None``.
+ """
+ self._check_delete_rdataset.append(check)
+
+ def check_delete_name(self, check: CheckDeleteNameType) -> None:
+ """Call *check* before putting (storing) an rdataset.
+
+ The function is called with the transaction and the name.
+
+ The check function may safely make non-mutating transaction method
+ calls, but behavior is undefined if mutating transaction methods are
+ called. The check function should raise an exception if it objects to
+ the put, and otherwise should return ``None``.
+ """
+ self._check_delete_name.append(check)
+
+ def iterate_rdatasets(
+ self,
+ ) -> Iterator[Tuple[dns.name.Name, dns.rdataset.Rdataset]]:
+ """Iterate all the rdatasets in the transaction, returning
+ (`dns.name.Name`, `dns.rdataset.Rdataset`) tuples.
+
+ Note that as is usual with python iterators, adding or removing items
+ while iterating will invalidate the iterator and may raise `RuntimeError`
+ or fail to iterate over all entries."""
+ self._check_ended()
+ return self._iterate_rdatasets()
+
+ def iterate_names(self) -> Iterator[dns.name.Name]:
+ """Iterate all the names in the transaction.
+
+ Note that as is usual with python iterators, adding or removing names
+ while iterating will invalidate the iterator and may raise `RuntimeError`
+ or fail to iterate over all entries."""
+ self._check_ended()
+ return self._iterate_names()
+
+ #
+ # Helper methods
+ #
+
+ def _raise_if_not_empty(self, method, args):
+ if len(args) != 0:
+ raise TypeError(f"extra parameters to {method}")
+
+ def _rdataset_from_args(self, method, deleting, args):
+ try:
+ arg = args.popleft()
+ if isinstance(arg, dns.rrset.RRset):
+ rdataset = arg.to_rdataset()
+ elif isinstance(arg, dns.rdataset.Rdataset):
+ rdataset = arg
+ else:
+ if deleting:
+ ttl = 0
+ else:
+ if isinstance(arg, int):
+ ttl = arg
+ if ttl > dns.ttl.MAX_TTL:
+ raise ValueError(f"{method}: TTL value too big")
+ else:
+ raise TypeError(f"{method}: expected a TTL")
+ arg = args.popleft()
+ if isinstance(arg, dns.rdata.Rdata):
+ rdataset = dns.rdataset.from_rdata(ttl, arg)
+ else:
+ raise TypeError(f"{method}: expected an Rdata")
+ return rdataset
+ except IndexError:
+ if deleting:
+ return None
+ else:
+ # reraise
+ raise TypeError(f"{method}: expected more arguments")
+
+ def _add(self, replace, args):
+ if replace:
+ method = "replace()"
+ else:
+ method = "add()"
+ try:
+ args = collections.deque(args)
+ arg = args.popleft()
+ if isinstance(arg, str):
+ arg = dns.name.from_text(arg, None)
+ if isinstance(arg, dns.name.Name):
+ name = arg
+ rdataset = self._rdataset_from_args(method, False, args)
+ elif isinstance(arg, dns.rrset.RRset):
+ rrset = arg
+ name = rrset.name
+ # rrsets are also rdatasets, but they don't print the
+ # same and can't be stored in nodes, so convert.
+ rdataset = rrset.to_rdataset()
+ else:
+ raise TypeError(
+ f"{method} requires a name or RRset as the first argument"
+ )
+ assert rdataset is not None # for type checkers
+ if rdataset.rdclass != self.manager.get_class():
+ raise ValueError(f"{method} has objects of wrong RdataClass")
+ if rdataset.rdtype == dns.rdatatype.SOA:
+ (_, _, origin) = self._origin_information()
+ if name != origin:
+ raise ValueError(f"{method} has non-origin SOA")
+ self._raise_if_not_empty(method, args)
+ if not replace:
+ existing = self._get_rdataset(name, rdataset.rdtype, rdataset.covers)
+ if existing is not None:
+ if isinstance(existing, dns.rdataset.ImmutableRdataset):
+ trds = dns.rdataset.Rdataset(
+ existing.rdclass, existing.rdtype, existing.covers
+ )
+ trds.update(existing)
+ existing = trds
+ rdataset = existing.union(rdataset)
+ self._checked_put_rdataset(name, rdataset)
+ except IndexError:
+ raise TypeError(f"not enough parameters to {method}")
+
+ def _delete(self, exact, args):
+ if exact:
+ method = "delete_exact()"
+ else:
+ method = "delete()"
+ try:
+ args = collections.deque(args)
+ arg = args.popleft()
+ if isinstance(arg, str):
+ arg = dns.name.from_text(arg, None)
+ if isinstance(arg, dns.name.Name):
+ name = arg
+ if len(args) > 0 and (
+ isinstance(args[0], int) or isinstance(args[0], str)
+ ):
+ # deleting by type and (optionally) covers
+ rdtype = dns.rdatatype.RdataType.make(args.popleft())
+ if len(args) > 0:
+ covers = dns.rdatatype.RdataType.make(args.popleft())
+ else:
+ covers = dns.rdatatype.NONE
+ self._raise_if_not_empty(method, args)
+ existing = self._get_rdataset(name, rdtype, covers)
+ if existing is None:
+ if exact:
+ raise DeleteNotExact(f"{method}: missing rdataset")
+ else:
+ self._checked_delete_rdataset(name, rdtype, covers)
+ return
+ else:
+ rdataset = self._rdataset_from_args(method, True, args)
+ elif isinstance(arg, dns.rrset.RRset):
+ rdataset = arg # rrsets are also rdatasets
+ name = rdataset.name
+ else:
+ raise TypeError(
+ f"{method} requires a name or RRset as the first argument"
+ )
+ self._raise_if_not_empty(method, args)
+ if rdataset:
+ if rdataset.rdclass != self.manager.get_class():
+ raise ValueError(f"{method} has objects of wrong RdataClass")
+ existing = self._get_rdataset(name, rdataset.rdtype, rdataset.covers)
+ if existing is not None:
+ if exact:
+ intersection = existing.intersection(rdataset)
+ if intersection != rdataset:
+ raise DeleteNotExact(f"{method}: missing rdatas")
+ rdataset = existing.difference(rdataset)
+ if len(rdataset) == 0:
+ self._checked_delete_rdataset(
+ name, rdataset.rdtype, rdataset.covers
+ )
+ else:
+ self._checked_put_rdataset(name, rdataset)
+ elif exact:
+ raise DeleteNotExact(f"{method}: missing rdataset")
+ else:
+ if exact and not self._name_exists(name):
+ raise DeleteNotExact(f"{method}: name not known")
+ self._checked_delete_name(name)
+ except IndexError:
+ raise TypeError(f"not enough parameters to {method}")
+
+ def _check_ended(self):
+ if self._ended:
+ raise AlreadyEnded
+
+ def _end(self, commit):
+ self._check_ended()
+ try:
+ self._end_transaction(commit)
+ finally:
+ self._ended = True
+
+ def _checked_put_rdataset(self, name, rdataset):
+ for check in self._check_put_rdataset:
+ check(self, name, rdataset)
+ self._put_rdataset(name, rdataset)
+
+ def _checked_delete_rdataset(self, name, rdtype, covers):
+ for check in self._check_delete_rdataset:
+ check(self, name, rdtype, covers)
+ self._delete_rdataset(name, rdtype, covers)
+
+ def _checked_delete_name(self, name):
+ for check in self._check_delete_name:
+ check(self, name)
+ self._delete_name(name)
+
+ #
+ # Transactions are context managers.
+ #
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if not self._ended:
+ if exc_type is None:
+ self.commit()
+ else:
+ self.rollback()
+ return False
+
+ #
+ # This is the low level API, which must be implemented by subclasses
+ # of Transaction.
+ #
+
+ def _get_rdataset(self, name, rdtype, covers):
+ """Return the rdataset associated with *name*, *rdtype*, and *covers*,
+ or `None` if not found.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _put_rdataset(self, name, rdataset):
+ """Store the rdataset."""
+ raise NotImplementedError # pragma: no cover
+
+ def _delete_name(self, name):
+ """Delete all data associated with *name*.
+
+ It is not an error if the name does not exist.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _delete_rdataset(self, name, rdtype, covers):
+ """Delete all data associated with *name*, *rdtype*, and *covers*.
+
+ It is not an error if the rdataset does not exist.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _name_exists(self, name):
+ """Does name exist?
+
+ Returns a bool.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _changed(self):
+ """Has this transaction changed anything?"""
+ raise NotImplementedError # pragma: no cover
+
+ def _end_transaction(self, commit):
+ """End the transaction.
+
+ *commit*, a bool. If ``True``, commit the transaction, otherwise
+ roll it back.
+
+ If committing and the commit fails, then roll back and raise an
+ exception.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _set_origin(self, origin):
+ """Set the origin.
+
+ This method is called when reading a possibly relativized
+ source, and an origin setting operation occurs (e.g. $ORIGIN
+ in a zone file).
+ """
+ raise NotImplementedError # pragma: no cover
+
+ def _iterate_rdatasets(self):
+ """Return an iterator that yields (name, rdataset) tuples."""
+ raise NotImplementedError # pragma: no cover
+
+ def _iterate_names(self):
+ """Return an iterator that yields a name."""
+ raise NotImplementedError # pragma: no cover
+
+ def _get_node(self, name):
+ """Return the node at *name*, if any.
+
+ Returns a node or ``None``.
+ """
+ raise NotImplementedError # pragma: no cover
+
+ #
+ # Low-level API with a default implementation, in case a subclass needs
+ # to override.
+ #
+
+ def _origin_information(self):
+ # This is only used by _add()
+ return self.manager.origin_information()
diff --git a/tapdown/lib/python3.11/site-packages/dns/tsig.py b/tapdown/lib/python3.11/site-packages/dns/tsig.py
new file mode 100644
index 0000000..333f9aa
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/tsig.py
@@ -0,0 +1,359 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2001-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TSIG support."""
+
+import base64
+import hashlib
+import hmac
+import struct
+
+import dns.exception
+import dns.name
+import dns.rcode
+import dns.rdataclass
+import dns.rdatatype
+
+
+class BadTime(dns.exception.DNSException):
+ """The current time is not within the TSIG's validity time."""
+
+
+class BadSignature(dns.exception.DNSException):
+ """The TSIG signature fails to verify."""
+
+
+class BadKey(dns.exception.DNSException):
+ """The TSIG record owner name does not match the key."""
+
+
+class BadAlgorithm(dns.exception.DNSException):
+ """The TSIG algorithm does not match the key."""
+
+
+class PeerError(dns.exception.DNSException):
+ """Base class for all TSIG errors generated by the remote peer"""
+
+
+class PeerBadKey(PeerError):
+ """The peer didn't know the key we used"""
+
+
+class PeerBadSignature(PeerError):
+ """The peer didn't like the signature we sent"""
+
+
+class PeerBadTime(PeerError):
+ """The peer didn't like the time we sent"""
+
+
+class PeerBadTruncation(PeerError):
+ """The peer didn't like amount of truncation in the TSIG we sent"""
+
+
+# TSIG Algorithms
+
+HMAC_MD5 = dns.name.from_text("HMAC-MD5.SIG-ALG.REG.INT")
+HMAC_SHA1 = dns.name.from_text("hmac-sha1")
+HMAC_SHA224 = dns.name.from_text("hmac-sha224")
+HMAC_SHA256 = dns.name.from_text("hmac-sha256")
+HMAC_SHA256_128 = dns.name.from_text("hmac-sha256-128")
+HMAC_SHA384 = dns.name.from_text("hmac-sha384")
+HMAC_SHA384_192 = dns.name.from_text("hmac-sha384-192")
+HMAC_SHA512 = dns.name.from_text("hmac-sha512")
+HMAC_SHA512_256 = dns.name.from_text("hmac-sha512-256")
+GSS_TSIG = dns.name.from_text("gss-tsig")
+
+default_algorithm = HMAC_SHA256
+
+mac_sizes = {
+ HMAC_SHA1: 20,
+ HMAC_SHA224: 28,
+ HMAC_SHA256: 32,
+ HMAC_SHA256_128: 16,
+ HMAC_SHA384: 48,
+ HMAC_SHA384_192: 24,
+ HMAC_SHA512: 64,
+ HMAC_SHA512_256: 32,
+ HMAC_MD5: 16,
+ GSS_TSIG: 128, # This is what we assume to be the worst case!
+}
+
+
+class GSSTSig:
+ """
+ GSS-TSIG TSIG implementation. This uses the GSS-API context established
+ in the TKEY message handshake to sign messages using GSS-API message
+ integrity codes, per the RFC.
+
+ In order to avoid a direct GSSAPI dependency, the keyring holds a ref
+ to the GSSAPI object required, rather than the key itself.
+ """
+
+ def __init__(self, gssapi_context):
+ self.gssapi_context = gssapi_context
+ self.data = b""
+ self.name = "gss-tsig"
+
+ def update(self, data):
+ self.data += data
+
+ def sign(self):
+ # defer to the GSSAPI function to sign
+ return self.gssapi_context.get_signature(self.data)
+
+ def verify(self, expected):
+ try:
+ # defer to the GSSAPI function to verify
+ return self.gssapi_context.verify_signature(self.data, expected)
+ except Exception:
+ # note the usage of a bare exception
+ raise BadSignature
+
+
+class GSSTSigAdapter:
+ def __init__(self, keyring):
+ self.keyring = keyring
+
+ def __call__(self, message, keyname):
+ if keyname in self.keyring:
+ key = self.keyring[keyname]
+ if isinstance(key, Key) and key.algorithm == GSS_TSIG:
+ if message:
+ GSSTSigAdapter.parse_tkey_and_step(key, message, keyname)
+ return key
+ else:
+ return None
+
+ @classmethod
+ def parse_tkey_and_step(cls, key, message, keyname):
+ # if the message is a TKEY type, absorb the key material
+ # into the context using step(); this is used to allow the
+ # client to complete the GSSAPI negotiation before attempting
+ # to verify the signed response to a TKEY message exchange
+ try:
+ rrset = message.find_rrset(
+ message.answer, keyname, dns.rdataclass.ANY, dns.rdatatype.TKEY
+ )
+ if rrset:
+ token = rrset[0].key
+ gssapi_context = key.secret
+ return gssapi_context.step(token)
+ except KeyError:
+ pass
+
+
+class HMACTSig:
+ """
+ HMAC TSIG implementation. This uses the HMAC python module to handle the
+ sign/verify operations.
+ """
+
+ _hashes = {
+ HMAC_SHA1: hashlib.sha1,
+ HMAC_SHA224: hashlib.sha224,
+ HMAC_SHA256: hashlib.sha256,
+ HMAC_SHA256_128: (hashlib.sha256, 128),
+ HMAC_SHA384: hashlib.sha384,
+ HMAC_SHA384_192: (hashlib.sha384, 192),
+ HMAC_SHA512: hashlib.sha512,
+ HMAC_SHA512_256: (hashlib.sha512, 256),
+ HMAC_MD5: hashlib.md5,
+ }
+
+ def __init__(self, key, algorithm):
+ try:
+ hashinfo = self._hashes[algorithm]
+ except KeyError:
+ raise NotImplementedError(f"TSIG algorithm {algorithm} is not supported")
+
+ # create the HMAC context
+ if isinstance(hashinfo, tuple):
+ self.hmac_context = hmac.new(key, digestmod=hashinfo[0])
+ self.size = hashinfo[1]
+ else:
+ self.hmac_context = hmac.new(key, digestmod=hashinfo)
+ self.size = None
+ self.name = self.hmac_context.name
+ if self.size:
+ self.name += f"-{self.size}"
+
+ def update(self, data):
+ return self.hmac_context.update(data)
+
+ def sign(self):
+ # defer to the HMAC digest() function for that digestmod
+ digest = self.hmac_context.digest()
+ if self.size:
+ digest = digest[: (self.size // 8)]
+ return digest
+
+ def verify(self, expected):
+ # re-digest and compare the results
+ mac = self.sign()
+ if not hmac.compare_digest(mac, expected):
+ raise BadSignature
+
+
+def _digest(wire, key, rdata, time=None, request_mac=None, ctx=None, multi=None):
+ """Return a context containing the TSIG rdata for the input parameters
+ @rtype: dns.tsig.HMACTSig or dns.tsig.GSSTSig object
+ @raises ValueError: I{other_data} is too long
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ first = not (ctx and multi)
+ if first:
+ ctx = get_context(key)
+ if request_mac:
+ ctx.update(struct.pack("!H", len(request_mac)))
+ ctx.update(request_mac)
+ assert ctx is not None # for type checkers
+ ctx.update(struct.pack("!H", rdata.original_id))
+ ctx.update(wire[2:])
+ if first:
+ ctx.update(key.name.to_digestable())
+ ctx.update(struct.pack("!H", dns.rdataclass.ANY))
+ ctx.update(struct.pack("!I", 0))
+ if time is None:
+ time = rdata.time_signed
+ upper_time = (time >> 32) & 0xFFFF
+ lower_time = time & 0xFFFFFFFF
+ time_encoded = struct.pack("!HIH", upper_time, lower_time, rdata.fudge)
+ other_len = len(rdata.other)
+ if other_len > 65535:
+ raise ValueError("TSIG Other Data is > 65535 bytes")
+ if first:
+ ctx.update(key.algorithm.to_digestable() + time_encoded)
+ ctx.update(struct.pack("!HH", rdata.error, other_len) + rdata.other)
+ else:
+ ctx.update(time_encoded)
+ return ctx
+
+
+def _maybe_start_digest(key, mac, multi):
+ """If this is the first message in a multi-message sequence,
+ start a new context.
+ @rtype: dns.tsig.HMACTSig or dns.tsig.GSSTSig object
+ """
+ if multi:
+ ctx = get_context(key)
+ ctx.update(struct.pack("!H", len(mac)))
+ ctx.update(mac)
+ return ctx
+ else:
+ return None
+
+
+def sign(wire, key, rdata, time=None, request_mac=None, ctx=None, multi=False):
+ """Return a (tsig_rdata, mac, ctx) tuple containing the HMAC TSIG rdata
+ for the input parameters, the HMAC MAC calculated by applying the
+ TSIG signature algorithm, and the TSIG digest context.
+ @rtype: (string, dns.tsig.HMACTSig or dns.tsig.GSSTSig object)
+ @raises ValueError: I{other_data} is too long
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ ctx = _digest(wire, key, rdata, time, request_mac, ctx, multi)
+ mac = ctx.sign()
+ tsig = rdata.replace(time_signed=time, mac=mac)
+
+ return (tsig, _maybe_start_digest(key, mac, multi))
+
+
+def validate(
+ wire, key, owner, rdata, now, request_mac, tsig_start, ctx=None, multi=False
+):
+ """Validate the specified TSIG rdata against the other input parameters.
+
+ @raises FormError: The TSIG is badly formed.
+ @raises BadTime: There is too much time skew between the client and the
+ server.
+ @raises BadSignature: The TSIG signature did not validate
+ @rtype: dns.tsig.HMACTSig or dns.tsig.GSSTSig object"""
+
+ (adcount,) = struct.unpack("!H", wire[10:12])
+ if adcount == 0:
+ raise dns.exception.FormError
+ adcount -= 1
+ new_wire = wire[0:10] + struct.pack("!H", adcount) + wire[12:tsig_start]
+ if rdata.error != 0:
+ if rdata.error == dns.rcode.BADSIG:
+ raise PeerBadSignature
+ elif rdata.error == dns.rcode.BADKEY:
+ raise PeerBadKey
+ elif rdata.error == dns.rcode.BADTIME:
+ raise PeerBadTime
+ elif rdata.error == dns.rcode.BADTRUNC:
+ raise PeerBadTruncation
+ else:
+ raise PeerError(f"unknown TSIG error code {rdata.error}")
+ if abs(rdata.time_signed - now) > rdata.fudge:
+ raise BadTime
+ if key.name != owner:
+ raise BadKey
+ if key.algorithm != rdata.algorithm:
+ raise BadAlgorithm
+ ctx = _digest(new_wire, key, rdata, None, request_mac, ctx, multi)
+ ctx.verify(rdata.mac)
+ return _maybe_start_digest(key, rdata.mac, multi)
+
+
+def get_context(key):
+ """Returns an HMAC context for the specified key.
+
+ @rtype: HMAC context
+ @raises NotImplementedError: I{algorithm} is not supported
+ """
+
+ if key.algorithm == GSS_TSIG:
+ return GSSTSig(key.secret)
+ else:
+ return HMACTSig(key.secret, key.algorithm)
+
+
+class Key:
+ def __init__(
+ self,
+ name: dns.name.Name | str,
+ secret: bytes | str,
+ algorithm: dns.name.Name | str = default_algorithm,
+ ):
+ if isinstance(name, str):
+ name = dns.name.from_text(name)
+ self.name = name
+ if isinstance(secret, str):
+ secret = base64.decodebytes(secret.encode())
+ self.secret = secret
+ if isinstance(algorithm, str):
+ algorithm = dns.name.from_text(algorithm)
+ self.algorithm = algorithm
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Key)
+ and self.name == other.name
+ and self.secret == other.secret
+ and self.algorithm == other.algorithm
+ )
+
+ def __repr__(self):
+ r = f""
+ return r
diff --git a/tapdown/lib/python3.11/site-packages/dns/tsigkeyring.py b/tapdown/lib/python3.11/site-packages/dns/tsigkeyring.py
new file mode 100644
index 0000000..5996295
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/tsigkeyring.py
@@ -0,0 +1,68 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""A place to store TSIG keys."""
+
+import base64
+from typing import Any, Dict
+
+import dns.name
+import dns.tsig
+
+
+def from_text(textring: Dict[str, Any]) -> Dict[dns.name.Name, Any]:
+ """Convert a dictionary containing (textual DNS name, base64 secret)
+ pairs into a binary keyring which has (dns.name.Name, bytes) pairs, or
+ a dictionary containing (textual DNS name, (algorithm, base64 secret))
+ pairs into a binary keyring which has (dns.name.Name, dns.tsig.Key) pairs.
+ @rtype: dict"""
+
+ keyring: Dict[dns.name.Name, Any] = {}
+ for name, value in textring.items():
+ kname = dns.name.from_text(name)
+ if isinstance(value, str):
+ keyring[kname] = dns.tsig.Key(kname, value).secret
+ else:
+ (algorithm, secret) = value
+ keyring[kname] = dns.tsig.Key(kname, secret, algorithm)
+ return keyring
+
+
+def to_text(keyring: Dict[dns.name.Name, Any]) -> Dict[str, Any]:
+ """Convert a dictionary containing (dns.name.Name, dns.tsig.Key) pairs
+ into a text keyring which has (textual DNS name, (textual algorithm,
+ base64 secret)) pairs, or a dictionary containing (dns.name.Name, bytes)
+ pairs into a text keyring which has (textual DNS name, base64 secret) pairs.
+ @rtype: dict"""
+
+ textring = {}
+
+ def b64encode(secret):
+ return base64.encodebytes(secret).decode().rstrip()
+
+ for name, key in keyring.items():
+ tname = name.to_text()
+ if isinstance(key, bytes):
+ textring[tname] = b64encode(key)
+ else:
+ if isinstance(key.secret, bytes):
+ text_secret = b64encode(key.secret)
+ else:
+ text_secret = str(key.secret)
+
+ textring[tname] = (key.algorithm.to_text(), text_secret)
+ return textring
diff --git a/tapdown/lib/python3.11/site-packages/dns/ttl.py b/tapdown/lib/python3.11/site-packages/dns/ttl.py
new file mode 100644
index 0000000..16289cd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/ttl.py
@@ -0,0 +1,90 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS TTL conversion."""
+
+import dns.exception
+
+# Technically TTLs are supposed to be between 0 and 2**31 - 1, with values
+# greater than that interpreted as 0, but we do not impose this policy here
+# as values > 2**31 - 1 occur in real world data.
+#
+# We leave it to applications to impose tighter bounds if desired.
+MAX_TTL = 2**32 - 1
+
+
+class BadTTL(dns.exception.SyntaxError):
+ """DNS TTL value is not well-formed."""
+
+
+def from_text(text: str) -> int:
+ """Convert the text form of a TTL to an integer.
+
+ The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
+
+ *text*, a ``str``, the textual TTL.
+
+ Raises ``dns.ttl.BadTTL`` if the TTL is not well-formed.
+
+ Returns an ``int``.
+ """
+
+ if text.isdigit():
+ total = int(text)
+ elif len(text) == 0:
+ raise BadTTL
+ else:
+ total = 0
+ current = 0
+ need_digit = True
+ for c in text:
+ if c.isdigit():
+ current *= 10
+ current += int(c)
+ need_digit = False
+ else:
+ if need_digit:
+ raise BadTTL
+ c = c.lower()
+ if c == "w":
+ total += current * 604800
+ elif c == "d":
+ total += current * 86400
+ elif c == "h":
+ total += current * 3600
+ elif c == "m":
+ total += current * 60
+ elif c == "s":
+ total += current
+ else:
+ raise BadTTL(f"unknown unit '{c}'")
+ current = 0
+ need_digit = True
+ if not current == 0:
+ raise BadTTL("trailing integer")
+ if total < 0 or total > MAX_TTL:
+ raise BadTTL("TTL should be between 0 and 2**32 - 1 (inclusive)")
+ return total
+
+
+def make(value: int | str) -> int:
+ if isinstance(value, int):
+ return value
+ elif isinstance(value, str):
+ return from_text(value)
+ else:
+ raise ValueError("cannot convert value to TTL")
diff --git a/tapdown/lib/python3.11/site-packages/dns/update.py b/tapdown/lib/python3.11/site-packages/dns/update.py
new file mode 100644
index 0000000..0e4aee4
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/update.py
@@ -0,0 +1,389 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Dynamic Update Support"""
+
+from typing import Any, List
+
+import dns.enum
+import dns.exception
+import dns.message
+import dns.name
+import dns.opcode
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.rrset
+import dns.tsig
+
+
+class UpdateSection(dns.enum.IntEnum):
+ """Update sections"""
+
+ ZONE = 0
+ PREREQ = 1
+ UPDATE = 2
+ ADDITIONAL = 3
+
+ @classmethod
+ def _maximum(cls):
+ return 3
+
+
+class UpdateMessage(dns.message.Message): # lgtm[py/missing-equals]
+ # ignore the mypy error here as we mean to use a different enum
+ _section_enum = UpdateSection # type: ignore
+
+ def __init__(
+ self,
+ zone: dns.name.Name | str | None = None,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ keyring: Any | None = None,
+ keyname: dns.name.Name | None = None,
+ keyalgorithm: dns.name.Name | str = dns.tsig.default_algorithm,
+ id: int | None = None,
+ ):
+ """Initialize a new DNS Update object.
+
+ See the documentation of the Message class for a complete
+ description of the keyring dictionary.
+
+ *zone*, a ``dns.name.Name``, ``str``, or ``None``, the zone
+ which is being updated. ``None`` should only be used by dnspython's
+ message constructors, as a zone is required for the convenience
+ methods like ``add()``, ``replace()``, etc.
+
+ *rdclass*, an ``int`` or ``str``, the class of the zone.
+
+ The *keyring*, *keyname*, and *keyalgorithm* parameters are passed to
+ ``use_tsig()``; see its documentation for details.
+ """
+ super().__init__(id=id)
+ self.flags |= dns.opcode.to_flags(dns.opcode.UPDATE)
+ if isinstance(zone, str):
+ zone = dns.name.from_text(zone)
+ self.origin = zone
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ self.zone_rdclass = rdclass
+ if self.origin:
+ self.find_rrset(
+ self.zone,
+ self.origin,
+ rdclass,
+ dns.rdatatype.SOA,
+ create=True,
+ force_unique=True,
+ )
+ if keyring is not None:
+ self.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+
+ @property
+ def zone(self) -> List[dns.rrset.RRset]:
+ """The zone section."""
+ return self.sections[0]
+
+ @zone.setter
+ def zone(self, v):
+ self.sections[0] = v
+
+ @property
+ def prerequisite(self) -> List[dns.rrset.RRset]:
+ """The prerequisite section."""
+ return self.sections[1]
+
+ @prerequisite.setter
+ def prerequisite(self, v):
+ self.sections[1] = v
+
+ @property
+ def update(self) -> List[dns.rrset.RRset]:
+ """The update section."""
+ return self.sections[2]
+
+ @update.setter
+ def update(self, v):
+ self.sections[2] = v
+
+ def _add_rr(self, name, ttl, rd, deleting=None, section=None):
+ """Add a single RR to the update section."""
+
+ if section is None:
+ section = self.update
+ covers = rd.covers()
+ rrset = self.find_rrset(
+ section, name, self.zone_rdclass, rd.rdtype, covers, deleting, True, True
+ )
+ rrset.add(rd, ttl)
+
+ def _add(self, replace, section, name, *args):
+ """Add records.
+
+ *replace* is the replacement mode. If ``False``,
+ RRs are added to an existing RRset; if ``True``, the RRset
+ is replaced with the specified contents. The second
+ argument is the section to add to. The third argument
+ is always a name. The other arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ if isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ if replace:
+ self.delete(name, rds.rdtype)
+ for rd in rds:
+ self._add_rr(name, rds.ttl, rd, section=section)
+ else:
+ args = list(args)
+ ttl = int(args.pop(0))
+ if isinstance(args[0], dns.rdata.Rdata):
+ if replace:
+ self.delete(name, args[0].rdtype)
+ for rd in args:
+ self._add_rr(name, ttl, rd, section=section)
+ else:
+ rdtype = dns.rdatatype.RdataType.make(args.pop(0))
+ if replace:
+ self.delete(name, rdtype)
+ for s in args:
+ rd = dns.rdata.from_text(self.zone_rdclass, rdtype, s, self.origin)
+ self._add_rr(name, ttl, rd, section=section)
+
+ def add(self, name: dns.name.Name | str, *args: Any) -> None:
+ """Add records.
+
+ The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+ """
+
+ self._add(False, self.update, name, *args)
+
+ def delete(self, name: dns.name.Name | str, *args: Any) -> None:
+ """Delete records.
+
+ The first argument is always a name. The other
+ arguments can be:
+
+ - *empty*
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, [string...]
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ self.find_rrset(
+ self.update,
+ name,
+ dns.rdataclass.ANY,
+ dns.rdatatype.ANY,
+ dns.rdatatype.NONE,
+ dns.rdataclass.ANY,
+ True,
+ True,
+ )
+ elif isinstance(args[0], dns.rdataset.Rdataset):
+ for rds in args:
+ for rd in rds:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ largs = list(args)
+ if isinstance(largs[0], dns.rdata.Rdata):
+ for rd in largs:
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+ else:
+ rdtype = dns.rdatatype.RdataType.make(largs.pop(0))
+ if len(largs) == 0:
+ self.find_rrset(
+ self.update,
+ name,
+ self.zone_rdclass,
+ rdtype,
+ dns.rdatatype.NONE,
+ dns.rdataclass.ANY,
+ True,
+ True,
+ )
+ else:
+ for s in largs:
+ rd = dns.rdata.from_text(
+ self.zone_rdclass,
+ rdtype,
+ s, # type: ignore[arg-type]
+ self.origin,
+ )
+ self._add_rr(name, 0, rd, dns.rdataclass.NONE)
+
+ def replace(self, name: dns.name.Name | str, *args: Any) -> None:
+ """Replace records.
+
+ The first argument is always a name. The other
+ arguments can be:
+
+ - rdataset...
+
+ - ttl, rdata...
+
+ - ttl, rdtype, string...
+
+ Note that if you want to replace the entire node, you should do
+ a delete of the name followed by one or more calls to add.
+ """
+
+ self._add(True, self.update, name, *args)
+
+ def present(self, name: dns.name.Name | str, *args: Any) -> None:
+ """Require that an owner name (and optionally an rdata type,
+ or specific rdataset) exists as a prerequisite to the
+ execution of the update.
+
+ The first argument is always a name.
+ The other arguments can be:
+
+ - rdataset...
+
+ - rdata...
+
+ - rdtype, string...
+ """
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ if len(args) == 0:
+ self.find_rrset(
+ self.prerequisite,
+ name,
+ dns.rdataclass.ANY,
+ dns.rdatatype.ANY,
+ dns.rdatatype.NONE,
+ None,
+ True,
+ True,
+ )
+ elif (
+ isinstance(args[0], dns.rdataset.Rdataset)
+ or isinstance(args[0], dns.rdata.Rdata)
+ or len(args) > 1
+ ):
+ if not isinstance(args[0], dns.rdataset.Rdataset):
+ # Add a 0 TTL
+ largs = list(args)
+ largs.insert(0, 0) # type: ignore[arg-type]
+ self._add(False, self.prerequisite, name, *largs)
+ else:
+ self._add(False, self.prerequisite, name, *args)
+ else:
+ rdtype = dns.rdatatype.RdataType.make(args[0])
+ self.find_rrset(
+ self.prerequisite,
+ name,
+ dns.rdataclass.ANY,
+ rdtype,
+ dns.rdatatype.NONE,
+ None,
+ True,
+ True,
+ )
+
+ def absent(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str | None = None,
+ ) -> None:
+ """Require that an owner name (and optionally an rdata type) does
+ not exist as a prerequisite to the execution of the update."""
+
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ if rdtype is None:
+ self.find_rrset(
+ self.prerequisite,
+ name,
+ dns.rdataclass.NONE,
+ dns.rdatatype.ANY,
+ dns.rdatatype.NONE,
+ None,
+ True,
+ True,
+ )
+ else:
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ self.find_rrset(
+ self.prerequisite,
+ name,
+ dns.rdataclass.NONE,
+ rdtype,
+ dns.rdatatype.NONE,
+ None,
+ True,
+ True,
+ )
+
+ def _get_one_rr_per_rrset(self, value):
+ # Updates are always one_rr_per_rrset
+ return True
+
+ def _parse_rr_header(self, section, name, rdclass, rdtype): # pyright: ignore
+ deleting = None
+ empty = False
+ if section == UpdateSection.ZONE:
+ if (
+ dns.rdataclass.is_metaclass(rdclass)
+ or rdtype != dns.rdatatype.SOA
+ or self.zone
+ ):
+ raise dns.exception.FormError
+ else:
+ if not self.zone:
+ raise dns.exception.FormError
+ if rdclass in (dns.rdataclass.ANY, dns.rdataclass.NONE):
+ deleting = rdclass
+ rdclass = self.zone[0].rdclass
+ empty = (
+ deleting == dns.rdataclass.ANY or section == UpdateSection.PREREQ
+ )
+ return (rdclass, rdtype, deleting, empty)
+
+
+# backwards compatibility
+Update = UpdateMessage
+
+### BEGIN generated UpdateSection constants
+
+ZONE = UpdateSection.ZONE
+PREREQ = UpdateSection.PREREQ
+UPDATE = UpdateSection.UPDATE
+ADDITIONAL = UpdateSection.ADDITIONAL
+
+### END generated UpdateSection constants
diff --git a/tapdown/lib/python3.11/site-packages/dns/version.py b/tapdown/lib/python3.11/site-packages/dns/version.py
new file mode 100644
index 0000000..e11dd29
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/version.py
@@ -0,0 +1,42 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""dnspython release version information."""
+
+#: MAJOR
+MAJOR = 2
+#: MINOR
+MINOR = 8
+#: MICRO
+MICRO = 0
+#: RELEASELEVEL
+RELEASELEVEL = 0x0F
+#: SERIAL
+SERIAL = 0
+
+if RELEASELEVEL == 0x0F: # pragma: no cover lgtm[py/unreachable-statement]
+ #: version
+ version = f"{MAJOR}.{MINOR}.{MICRO}" # lgtm[py/unreachable-statement]
+elif RELEASELEVEL == 0x00: # pragma: no cover lgtm[py/unreachable-statement]
+ version = f"{MAJOR}.{MINOR}.{MICRO}dev{SERIAL}" # lgtm[py/unreachable-statement]
+elif RELEASELEVEL == 0x0C: # pragma: no cover lgtm[py/unreachable-statement]
+ version = f"{MAJOR}.{MINOR}.{MICRO}rc{SERIAL}" # lgtm[py/unreachable-statement]
+else: # pragma: no cover lgtm[py/unreachable-statement]
+ version = f"{MAJOR}.{MINOR}.{MICRO}{RELEASELEVEL:x}{SERIAL}" # lgtm[py/unreachable-statement]
+
+#: hexversion
+hexversion = MAJOR << 24 | MINOR << 16 | MICRO << 8 | RELEASELEVEL << 4 | SERIAL
diff --git a/tapdown/lib/python3.11/site-packages/dns/versioned.py b/tapdown/lib/python3.11/site-packages/dns/versioned.py
new file mode 100644
index 0000000..3644711
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/versioned.py
@@ -0,0 +1,320 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+"""DNS Versioned Zones."""
+
+import collections
+import threading
+from typing import Callable, Deque, Set, cast
+
+import dns.exception
+import dns.name
+import dns.node
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.rdtypes.ANY.SOA
+import dns.zone
+
+
+class UseTransaction(dns.exception.DNSException):
+ """To alter a versioned zone, use a transaction."""
+
+
+# Backwards compatibility
+Node = dns.zone.VersionedNode
+ImmutableNode = dns.zone.ImmutableVersionedNode
+Version = dns.zone.Version
+WritableVersion = dns.zone.WritableVersion
+ImmutableVersion = dns.zone.ImmutableVersion
+Transaction = dns.zone.Transaction
+
+
+class Zone(dns.zone.Zone): # lgtm[py/missing-equals]
+ __slots__ = [
+ "_versions",
+ "_versions_lock",
+ "_write_txn",
+ "_write_waiters",
+ "_write_event",
+ "_pruning_policy",
+ "_readers",
+ ]
+
+ node_factory: Callable[[], dns.node.Node] = Node
+
+ def __init__(
+ self,
+ origin: dns.name.Name | str | None,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ relativize: bool = True,
+ pruning_policy: Callable[["Zone", Version], bool | None] | None = None,
+ ):
+ """Initialize a versioned zone object.
+
+ *origin* is the origin of the zone. It may be a ``dns.name.Name``,
+ a ``str``, or ``None``. If ``None``, then the zone's origin will
+ be set by the first ``$ORIGIN`` line in a zone file.
+
+ *rdclass*, an ``int``, the zone's rdata class; the default is class IN.
+
+ *relativize*, a ``bool``, determine's whether domain names are
+ relativized to the zone's origin. The default is ``True``.
+
+ *pruning policy*, a function taking a ``Zone`` and a ``Version`` and returning
+ a ``bool``, or ``None``. Should the version be pruned? If ``None``,
+ the default policy, which retains one version is used.
+ """
+ super().__init__(origin, rdclass, relativize)
+ self._versions: Deque[Version] = collections.deque()
+ self._version_lock = threading.Lock()
+ if pruning_policy is None:
+ self._pruning_policy = self._default_pruning_policy
+ else:
+ self._pruning_policy = pruning_policy
+ self._write_txn: Transaction | None = None
+ self._write_event: threading.Event | None = None
+ self._write_waiters: Deque[threading.Event] = collections.deque()
+ self._readers: Set[Transaction] = set()
+ self._commit_version_unlocked(
+ None, WritableVersion(self, replacement=True), origin
+ )
+
+ def reader(
+ self, id: int | None = None, serial: int | None = None
+ ) -> Transaction: # pylint: disable=arguments-differ
+ if id is not None and serial is not None:
+ raise ValueError("cannot specify both id and serial")
+ with self._version_lock:
+ if id is not None:
+ version = None
+ for v in reversed(self._versions):
+ if v.id == id:
+ version = v
+ break
+ if version is None:
+ raise KeyError("version not found")
+ elif serial is not None:
+ if self.relativize:
+ oname = dns.name.empty
+ else:
+ assert self.origin is not None
+ oname = self.origin
+ version = None
+ for v in reversed(self._versions):
+ n = v.nodes.get(oname)
+ if n:
+ rds = n.get_rdataset(self.rdclass, dns.rdatatype.SOA)
+ if rds is None:
+ continue
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, rds[0])
+ if rds and soa.serial == serial:
+ version = v
+ break
+ if version is None:
+ raise KeyError("serial not found")
+ else:
+ version = self._versions[-1]
+ txn = Transaction(self, False, version)
+ self._readers.add(txn)
+ return txn
+
+ def writer(self, replacement: bool = False) -> Transaction:
+ event = None
+ while True:
+ with self._version_lock:
+ # Checking event == self._write_event ensures that either
+ # no one was waiting before we got lucky and found no write
+ # txn, or we were the one who was waiting and got woken up.
+ # This prevents "taking cuts" when creating a write txn.
+ if self._write_txn is None and event == self._write_event:
+ # Creating the transaction defers version setup
+ # (i.e. copying the nodes dictionary) until we
+ # give up the lock, so that we hold the lock as
+ # short a time as possible. This is why we call
+ # _setup_version() below.
+ self._write_txn = Transaction(
+ self, replacement, make_immutable=True
+ )
+ # give up our exclusive right to make a Transaction
+ self._write_event = None
+ break
+ # Someone else is writing already, so we will have to
+ # wait, but we want to do the actual wait outside the
+ # lock.
+ event = threading.Event()
+ self._write_waiters.append(event)
+ # wait (note we gave up the lock!)
+ #
+ # We only wake one sleeper at a time, so it's important
+ # that no event waiter can exit this method (e.g. via
+ # cancellation) without returning a transaction or waking
+ # someone else up.
+ #
+ # This is not a problem with Threading module threads as
+ # they cannot be canceled, but could be an issue with trio
+ # tasks when we do the async version of writer().
+ # I.e. we'd need to do something like:
+ #
+ # try:
+ # event.wait()
+ # except trio.Cancelled:
+ # with self._version_lock:
+ # self._maybe_wakeup_one_waiter_unlocked()
+ # raise
+ #
+ event.wait()
+ # Do the deferred version setup.
+ self._write_txn._setup_version()
+ return self._write_txn
+
+ def _maybe_wakeup_one_waiter_unlocked(self):
+ if len(self._write_waiters) > 0:
+ self._write_event = self._write_waiters.popleft()
+ self._write_event.set()
+
+ # pylint: disable=unused-argument
+ def _default_pruning_policy(self, zone, version):
+ return True
+
+ # pylint: enable=unused-argument
+
+ def _prune_versions_unlocked(self):
+ assert len(self._versions) > 0
+ # Don't ever prune a version greater than or equal to one that
+ # a reader has open. This pins versions in memory while the
+ # reader is open, and importantly lets the reader open a txn on
+ # a successor version (e.g. if generating an IXFR).
+ #
+ # Note our definition of least_kept also ensures we do not try to
+ # delete the greatest version.
+ if len(self._readers) > 0:
+ least_kept = min(txn.version.id for txn in self._readers) # pyright: ignore
+ else:
+ least_kept = self._versions[-1].id
+ while self._versions[0].id < least_kept and self._pruning_policy(
+ self, self._versions[0]
+ ):
+ self._versions.popleft()
+
+ def set_max_versions(self, max_versions: int | None) -> None:
+ """Set a pruning policy that retains up to the specified number
+ of versions
+ """
+ if max_versions is not None and max_versions < 1:
+ raise ValueError("max versions must be at least 1")
+ if max_versions is None:
+ # pylint: disable=unused-argument
+ def policy(zone, _): # pyright: ignore
+ return False
+
+ else:
+
+ def policy(zone, _):
+ return len(zone._versions) > max_versions
+
+ self.set_pruning_policy(policy)
+
+ def set_pruning_policy(
+ self, policy: Callable[["Zone", Version], bool | None] | None
+ ) -> None:
+ """Set the pruning policy for the zone.
+
+ The *policy* function takes a `Version` and returns `True` if
+ the version should be pruned, and `False` otherwise. `None`
+ may also be specified for policy, in which case the default policy
+ is used.
+
+ Pruning checking proceeds from the least version and the first
+ time the function returns `False`, the checking stops. I.e. the
+ retained versions are always a consecutive sequence.
+ """
+ if policy is None:
+ policy = self._default_pruning_policy
+ with self._version_lock:
+ self._pruning_policy = policy
+ self._prune_versions_unlocked()
+
+ def _end_read(self, txn):
+ with self._version_lock:
+ self._readers.remove(txn)
+ self._prune_versions_unlocked()
+
+ def _end_write_unlocked(self, txn):
+ assert self._write_txn == txn
+ self._write_txn = None
+ self._maybe_wakeup_one_waiter_unlocked()
+
+ def _end_write(self, txn):
+ with self._version_lock:
+ self._end_write_unlocked(txn)
+
+ def _commit_version_unlocked(self, txn, version, origin):
+ self._versions.append(version)
+ self._prune_versions_unlocked()
+ self.nodes = version.nodes
+ if self.origin is None:
+ self.origin = origin
+ # txn can be None in __init__ when we make the empty version.
+ if txn is not None:
+ self._end_write_unlocked(txn)
+
+ def _commit_version(self, txn, version, origin):
+ with self._version_lock:
+ self._commit_version_unlocked(txn, version, origin)
+
+ def _get_next_version_id(self):
+ if len(self._versions) > 0:
+ id = self._versions[-1].id + 1
+ else:
+ id = 1
+ return id
+
+ def find_node(
+ self, name: dns.name.Name | str, create: bool = False
+ ) -> dns.node.Node:
+ if create:
+ raise UseTransaction
+ return super().find_node(name)
+
+ def delete_node(self, name: dns.name.Name | str) -> None:
+ raise UseTransaction
+
+ def find_rdataset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset:
+ if create:
+ raise UseTransaction
+ rdataset = super().find_rdataset(name, rdtype, covers)
+ return dns.rdataset.ImmutableRdataset(rdataset)
+
+ def get_rdataset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset | None:
+ if create:
+ raise UseTransaction
+ rdataset = super().get_rdataset(name, rdtype, covers)
+ if rdataset is not None:
+ return dns.rdataset.ImmutableRdataset(rdataset)
+ else:
+ return None
+
+ def delete_rdataset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> None:
+ raise UseTransaction
+
+ def replace_rdataset(
+ self, name: dns.name.Name | str, replacement: dns.rdataset.Rdataset
+ ) -> None:
+ raise UseTransaction
diff --git a/tapdown/lib/python3.11/site-packages/dns/win32util.py b/tapdown/lib/python3.11/site-packages/dns/win32util.py
new file mode 100644
index 0000000..2d77b4c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/win32util.py
@@ -0,0 +1,438 @@
+import sys
+
+import dns._features
+
+# pylint: disable=W0612,W0613,C0301
+
+if sys.platform == "win32":
+ import ctypes
+ import ctypes.wintypes as wintypes
+ import winreg # pylint: disable=import-error
+ from enum import IntEnum
+
+ import dns.name
+
+ # Keep pylint quiet on non-windows.
+ try:
+ _ = WindowsError # pylint: disable=used-before-assignment
+ except NameError:
+ WindowsError = Exception
+
+ class ConfigMethod(IntEnum):
+ Registry = 1
+ WMI = 2
+ Win32 = 3
+
+ class DnsInfo:
+ def __init__(self):
+ self.domain = None
+ self.nameservers = []
+ self.search = []
+
+ _config_method = ConfigMethod.Registry
+
+ if dns._features.have("wmi"):
+ import threading
+
+ import pythoncom # pylint: disable=import-error
+ import wmi # pylint: disable=import-error
+
+ # Prefer WMI by default if wmi is installed.
+ _config_method = ConfigMethod.WMI
+
+ class _WMIGetter(threading.Thread):
+ # pylint: disable=possibly-used-before-assignment
+ def __init__(self):
+ super().__init__()
+ self.info = DnsInfo()
+
+ def run(self):
+ pythoncom.CoInitialize()
+ try:
+ system = wmi.WMI()
+ for interface in system.Win32_NetworkAdapterConfiguration():
+ if interface.IPEnabled and interface.DNSServerSearchOrder:
+ self.info.nameservers = list(interface.DNSServerSearchOrder)
+ if interface.DNSDomain:
+ self.info.domain = _config_domain(interface.DNSDomain)
+ if interface.DNSDomainSuffixSearchOrder:
+ self.info.search = [
+ _config_domain(x)
+ for x in interface.DNSDomainSuffixSearchOrder
+ ]
+ break
+ finally:
+ pythoncom.CoUninitialize()
+
+ def get(self):
+ # We always run in a separate thread to avoid any issues with
+ # the COM threading model.
+ self.start()
+ self.join()
+ return self.info
+
+ else:
+
+ class _WMIGetter: # type: ignore
+ pass
+
+ def _config_domain(domain):
+ # Sometimes DHCP servers add a '.' prefix to the default domain, and
+ # Windows just stores such values in the registry (see #687).
+ # Check for this and fix it.
+ if domain.startswith("."):
+ domain = domain[1:]
+ return dns.name.from_text(domain)
+
+ class _RegistryGetter:
+ def __init__(self):
+ self.info = DnsInfo()
+
+ def _split(self, text):
+ # The windows registry has used both " " and "," as a delimiter, and while
+ # it is currently using "," in Windows 10 and later, updates can seemingly
+ # leave a space in too, e.g. "a, b". So we just convert all commas to
+ # spaces, and use split() in its default configuration, which splits on
+ # all whitespace and ignores empty strings.
+ return text.replace(",", " ").split()
+
+ def _config_nameservers(self, nameservers):
+ for ns in self._split(nameservers):
+ if ns not in self.info.nameservers:
+ self.info.nameservers.append(ns)
+
+ def _config_search(self, search):
+ for s in self._split(search):
+ s = _config_domain(s)
+ if s not in self.info.search:
+ self.info.search.append(s)
+
+ def _config_fromkey(self, key, always_try_domain):
+ try:
+ servers, _ = winreg.QueryValueEx(key, "NameServer")
+ except WindowsError:
+ servers = None
+ if servers:
+ self._config_nameservers(servers)
+ if servers or always_try_domain:
+ try:
+ dom, _ = winreg.QueryValueEx(key, "Domain")
+ if dom:
+ self.info.domain = _config_domain(dom)
+ except WindowsError:
+ pass
+ else:
+ try:
+ servers, _ = winreg.QueryValueEx(key, "DhcpNameServer")
+ except WindowsError:
+ servers = None
+ if servers:
+ self._config_nameservers(servers)
+ try:
+ dom, _ = winreg.QueryValueEx(key, "DhcpDomain")
+ if dom:
+ self.info.domain = _config_domain(dom)
+ except WindowsError:
+ pass
+ try:
+ search, _ = winreg.QueryValueEx(key, "SearchList")
+ except WindowsError:
+ search = None
+ if search is None:
+ try:
+ search, _ = winreg.QueryValueEx(key, "DhcpSearchList")
+ except WindowsError:
+ search = None
+ if search:
+ self._config_search(search)
+
+ def _is_nic_enabled(self, lm, guid):
+ # Look in the Windows Registry to determine whether the network
+ # interface corresponding to the given guid is enabled.
+ #
+ # (Code contributed by Paul Marks, thanks!)
+ #
+ try:
+ # This hard-coded location seems to be consistent, at least
+ # from Windows 2000 through Vista.
+ connection_key = winreg.OpenKey(
+ lm,
+ r"SYSTEM\CurrentControlSet\Control\Network"
+ r"\{4D36E972-E325-11CE-BFC1-08002BE10318}"
+ rf"\{guid}\Connection",
+ )
+
+ try:
+ # The PnpInstanceID points to a key inside Enum
+ (pnp_id, ttype) = winreg.QueryValueEx(
+ connection_key, "PnpInstanceID"
+ )
+
+ if ttype != winreg.REG_SZ:
+ raise ValueError # pragma: no cover
+
+ device_key = winreg.OpenKey(
+ lm, rf"SYSTEM\CurrentControlSet\Enum\{pnp_id}"
+ )
+
+ try:
+ # Get ConfigFlags for this device
+ (flags, ttype) = winreg.QueryValueEx(device_key, "ConfigFlags")
+
+ if ttype != winreg.REG_DWORD:
+ raise ValueError # pragma: no cover
+
+ # Based on experimentation, bit 0x1 indicates that the
+ # device is disabled.
+ #
+ # XXXRTH I suspect we really want to & with 0x03 so
+ # that CONFIGFLAGS_REMOVED devices are also ignored,
+ # but we're shifting to WMI as ConfigFlags is not
+ # supposed to be used.
+ return not flags & 0x1
+
+ finally:
+ device_key.Close()
+ finally:
+ connection_key.Close()
+ except Exception: # pragma: no cover
+ return False
+
+ def get(self):
+ """Extract resolver configuration from the Windows registry."""
+
+ lm = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
+ try:
+ tcp_params = winreg.OpenKey(
+ lm, r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters"
+ )
+ try:
+ self._config_fromkey(tcp_params, True)
+ finally:
+ tcp_params.Close()
+ interfaces = winreg.OpenKey(
+ lm,
+ r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces",
+ )
+ try:
+ i = 0
+ while True:
+ try:
+ guid = winreg.EnumKey(interfaces, i)
+ i += 1
+ key = winreg.OpenKey(interfaces, guid)
+ try:
+ if not self._is_nic_enabled(lm, guid):
+ continue
+ self._config_fromkey(key, False)
+ finally:
+ key.Close()
+ except OSError:
+ break
+ finally:
+ interfaces.Close()
+ finally:
+ lm.Close()
+ return self.info
+
+ class _Win32Getter(_RegistryGetter):
+
+ def get(self):
+ """Get the attributes using the Windows API."""
+ # Load the IP Helper library
+ # # https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses
+ IPHLPAPI = ctypes.WinDLL("Iphlpapi.dll")
+
+ # Constants
+ AF_UNSPEC = 0
+ ERROR_SUCCESS = 0
+ GAA_FLAG_INCLUDE_PREFIX = 0x00000010
+ AF_INET = 2
+ AF_INET6 = 23
+ IF_TYPE_SOFTWARE_LOOPBACK = 24
+
+ # Define necessary structures
+ class SOCKADDRV4(ctypes.Structure):
+ _fields_ = [
+ ("sa_family", wintypes.USHORT),
+ ("sa_data", ctypes.c_ubyte * 14),
+ ]
+
+ class SOCKADDRV6(ctypes.Structure):
+ _fields_ = [
+ ("sa_family", wintypes.USHORT),
+ ("sa_data", ctypes.c_ubyte * 26),
+ ]
+
+ class SOCKET_ADDRESS(ctypes.Structure):
+ _fields_ = [
+ ("lpSockaddr", ctypes.POINTER(SOCKADDRV4)),
+ ("iSockaddrLength", wintypes.INT),
+ ]
+
+ class IP_ADAPTER_DNS_SERVER_ADDRESS(ctypes.Structure):
+ pass # Forward declaration
+
+ IP_ADAPTER_DNS_SERVER_ADDRESS._fields_ = [
+ ("Length", wintypes.ULONG),
+ ("Reserved", wintypes.DWORD),
+ ("Next", ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS)),
+ ("Address", SOCKET_ADDRESS),
+ ]
+
+ class IF_LUID(ctypes.Structure):
+ _fields_ = [("Value", ctypes.c_ulonglong)]
+
+ class NET_IF_NETWORK_GUID(ctypes.Structure):
+ _fields_ = [("Value", ctypes.c_ubyte * 16)]
+
+ class IP_ADAPTER_PREFIX_XP(ctypes.Structure):
+ pass # Left undefined here for simplicity
+
+ class IP_ADAPTER_GATEWAY_ADDRESS_LH(ctypes.Structure):
+ pass # Left undefined here for simplicity
+
+ class IP_ADAPTER_DNS_SUFFIX(ctypes.Structure):
+ _fields_ = [
+ ("String", ctypes.c_wchar * 256),
+ ("Next", ctypes.POINTER(ctypes.c_void_p)),
+ ]
+
+ class IP_ADAPTER_UNICAST_ADDRESS_LH(ctypes.Structure):
+ pass # Left undefined here for simplicity
+
+ class IP_ADAPTER_MULTICAST_ADDRESS_XP(ctypes.Structure):
+ pass # Left undefined here for simplicity
+
+ class IP_ADAPTER_ANYCAST_ADDRESS_XP(ctypes.Structure):
+ pass # Left undefined here for simplicity
+
+ class IP_ADAPTER_DNS_SERVER_ADDRESS_XP(ctypes.Structure):
+ pass # Left undefined here for simplicity
+
+ class IP_ADAPTER_ADDRESSES(ctypes.Structure):
+ pass # Forward declaration
+
+ IP_ADAPTER_ADDRESSES._fields_ = [
+ ("Length", wintypes.ULONG),
+ ("IfIndex", wintypes.DWORD),
+ ("Next", ctypes.POINTER(IP_ADAPTER_ADDRESSES)),
+ ("AdapterName", ctypes.c_char_p),
+ ("FirstUnicastAddress", ctypes.POINTER(SOCKET_ADDRESS)),
+ ("FirstAnycastAddress", ctypes.POINTER(SOCKET_ADDRESS)),
+ ("FirstMulticastAddress", ctypes.POINTER(SOCKET_ADDRESS)),
+ (
+ "FirstDnsServerAddress",
+ ctypes.POINTER(IP_ADAPTER_DNS_SERVER_ADDRESS),
+ ),
+ ("DnsSuffix", wintypes.LPWSTR),
+ ("Description", wintypes.LPWSTR),
+ ("FriendlyName", wintypes.LPWSTR),
+ ("PhysicalAddress", ctypes.c_ubyte * 8),
+ ("PhysicalAddressLength", wintypes.ULONG),
+ ("Flags", wintypes.ULONG),
+ ("Mtu", wintypes.ULONG),
+ ("IfType", wintypes.ULONG),
+ ("OperStatus", ctypes.c_uint),
+ # Remaining fields removed for brevity
+ ]
+
+ def format_ipv4(sockaddr_in):
+ return ".".join(map(str, sockaddr_in.sa_data[2:6]))
+
+ def format_ipv6(sockaddr_in6):
+ # The sa_data is:
+ #
+ # USHORT sin6_port;
+ # ULONG sin6_flowinfo;
+ # IN6_ADDR sin6_addr;
+ # ULONG sin6_scope_id;
+ #
+ # which is 2 + 4 + 16 + 4 = 26 bytes, and we need the plus 6 below
+ # to be in the sin6_addr range.
+ parts = [
+ sockaddr_in6.sa_data[i + 6] << 8 | sockaddr_in6.sa_data[i + 6 + 1]
+ for i in range(0, 16, 2)
+ ]
+ return ":".join(f"{part:04x}" for part in parts)
+
+ buffer_size = ctypes.c_ulong(15000)
+ while True:
+ buffer = ctypes.create_string_buffer(buffer_size.value)
+
+ ret_val = IPHLPAPI.GetAdaptersAddresses(
+ AF_UNSPEC,
+ GAA_FLAG_INCLUDE_PREFIX,
+ None,
+ buffer,
+ ctypes.byref(buffer_size),
+ )
+
+ if ret_val == ERROR_SUCCESS:
+ break
+ elif ret_val != 0x6F: # ERROR_BUFFER_OVERFLOW
+ print(f"Error retrieving adapter information: {ret_val}")
+ return
+
+ adapter_addresses = ctypes.cast(
+ buffer, ctypes.POINTER(IP_ADAPTER_ADDRESSES)
+ )
+
+ current_adapter = adapter_addresses
+ while current_adapter:
+
+ # Skip non-operational adapters.
+ oper_status = current_adapter.contents.OperStatus
+ if oper_status != 1:
+ current_adapter = current_adapter.contents.Next
+ continue
+
+ # Exclude loopback adapters.
+ if current_adapter.contents.IfType == IF_TYPE_SOFTWARE_LOOPBACK:
+ current_adapter = current_adapter.contents.Next
+ continue
+
+ # Get the domain from the DnsSuffix attribute.
+ dns_suffix = current_adapter.contents.DnsSuffix
+ if dns_suffix:
+ self.info.domain = dns.name.from_text(dns_suffix)
+
+ current_dns_server = current_adapter.contents.FirstDnsServerAddress
+ while current_dns_server:
+ sockaddr = current_dns_server.contents.Address.lpSockaddr
+ sockaddr_family = sockaddr.contents.sa_family
+
+ ip = None
+ if sockaddr_family == AF_INET: # IPv4
+ ip = format_ipv4(sockaddr.contents)
+ elif sockaddr_family == AF_INET6: # IPv6
+ sockaddr = ctypes.cast(sockaddr, ctypes.POINTER(SOCKADDRV6))
+ ip = format_ipv6(sockaddr.contents)
+
+ if ip:
+ if ip not in self.info.nameservers:
+ self.info.nameservers.append(ip)
+
+ current_dns_server = current_dns_server.contents.Next
+
+ current_adapter = current_adapter.contents.Next
+
+ # Use the registry getter to get the search info, since it is set at the system level.
+ registry_getter = _RegistryGetter()
+ info = registry_getter.get()
+ self.info.search = info.search
+ return self.info
+
+ def set_config_method(method: ConfigMethod) -> None:
+ global _config_method
+ _config_method = method
+
+ def get_dns_info() -> DnsInfo:
+ """Extract resolver configuration."""
+ if _config_method == ConfigMethod.Win32:
+ getter = _Win32Getter()
+ elif _config_method == ConfigMethod.WMI:
+ getter = _WMIGetter()
+ else:
+ getter = _RegistryGetter()
+ return getter.get()
diff --git a/tapdown/lib/python3.11/site-packages/dns/wire.py b/tapdown/lib/python3.11/site-packages/dns/wire.py
new file mode 100644
index 0000000..cd027fa
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/wire.py
@@ -0,0 +1,98 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+import contextlib
+import struct
+from typing import Iterator, Optional, Tuple
+
+import dns.exception
+import dns.name
+
+
+class Parser:
+ """Helper class for parsing DNS wire format."""
+
+ def __init__(self, wire: bytes, current: int = 0):
+ """Initialize a Parser
+
+ *wire*, a ``bytes`` contains the data to be parsed, and possibly other data.
+ Typically it is the whole message or a slice of it.
+
+ *current*, an `int`, the offset within *wire* where parsing should begin.
+ """
+ self.wire = wire
+ self.current = 0
+ self.end = len(self.wire)
+ if current:
+ self.seek(current)
+ self.furthest = current
+
+ def remaining(self) -> int:
+ return self.end - self.current
+
+ def get_bytes(self, size: int) -> bytes:
+ assert size >= 0
+ if size > self.remaining():
+ raise dns.exception.FormError
+ output = self.wire[self.current : self.current + size]
+ self.current += size
+ self.furthest = max(self.furthest, self.current)
+ return output
+
+ def get_counted_bytes(self, length_size: int = 1) -> bytes:
+ length = int.from_bytes(self.get_bytes(length_size), "big")
+ return self.get_bytes(length)
+
+ def get_remaining(self) -> bytes:
+ return self.get_bytes(self.remaining())
+
+ def get_uint8(self) -> int:
+ return struct.unpack("!B", self.get_bytes(1))[0]
+
+ def get_uint16(self) -> int:
+ return struct.unpack("!H", self.get_bytes(2))[0]
+
+ def get_uint32(self) -> int:
+ return struct.unpack("!I", self.get_bytes(4))[0]
+
+ def get_uint48(self) -> int:
+ return int.from_bytes(self.get_bytes(6), "big")
+
+ def get_struct(self, format: str) -> Tuple:
+ return struct.unpack(format, self.get_bytes(struct.calcsize(format)))
+
+ def get_name(self, origin: Optional["dns.name.Name"] = None) -> "dns.name.Name":
+ name = dns.name.from_wire_parser(self)
+ if origin:
+ name = name.relativize(origin)
+ return name
+
+ def seek(self, where: int) -> None:
+ # Note that seeking to the end is OK! (If you try to read
+ # after such a seek, you'll get an exception as expected.)
+ if where < 0 or where > self.end:
+ raise dns.exception.FormError
+ self.current = where
+
+ @contextlib.contextmanager
+ def restrict_to(self, size: int) -> Iterator:
+ assert size >= 0
+ if size > self.remaining():
+ raise dns.exception.FormError
+ saved_end = self.end
+ try:
+ self.end = self.current + size
+ yield
+ # We make this check here and not in the finally as we
+ # don't want to raise if we're already raising for some
+ # other reason.
+ if self.current != self.end:
+ raise dns.exception.FormError
+ finally:
+ self.end = saved_end
+
+ @contextlib.contextmanager
+ def restore_furthest(self) -> Iterator:
+ try:
+ yield None
+ finally:
+ self.current = self.furthest
diff --git a/tapdown/lib/python3.11/site-packages/dns/xfr.py b/tapdown/lib/python3.11/site-packages/dns/xfr.py
new file mode 100644
index 0000000..219fdc8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/xfr.py
@@ -0,0 +1,356 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2017 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from typing import Any, List, Tuple, cast
+
+import dns.edns
+import dns.exception
+import dns.message
+import dns.name
+import dns.rcode
+import dns.rdata
+import dns.rdataset
+import dns.rdatatype
+import dns.rdtypes
+import dns.rdtypes.ANY
+import dns.rdtypes.ANY.SMIMEA
+import dns.rdtypes.ANY.SOA
+import dns.rdtypes.svcbbase
+import dns.serial
+import dns.transaction
+import dns.tsig
+import dns.zone
+
+
+class TransferError(dns.exception.DNSException):
+ """A zone transfer response got a non-zero rcode."""
+
+ def __init__(self, rcode):
+ message = f"Zone transfer error: {dns.rcode.to_text(rcode)}"
+ super().__init__(message)
+ self.rcode = rcode
+
+
+class SerialWentBackwards(dns.exception.FormError):
+ """The current serial number is less than the serial we know."""
+
+
+class UseTCP(dns.exception.DNSException):
+ """This IXFR cannot be completed with UDP."""
+
+
+class Inbound:
+ """
+ State machine for zone transfers.
+ """
+
+ def __init__(
+ self,
+ txn_manager: dns.transaction.TransactionManager,
+ rdtype: dns.rdatatype.RdataType = dns.rdatatype.AXFR,
+ serial: int | None = None,
+ is_udp: bool = False,
+ ):
+ """Initialize an inbound zone transfer.
+
+ *txn_manager* is a :py:class:`dns.transaction.TransactionManager`.
+
+ *rdtype* can be `dns.rdatatype.AXFR` or `dns.rdatatype.IXFR`
+
+ *serial* is the base serial number for IXFRs, and is required in
+ that case.
+
+ *is_udp*, a ``bool`` indidicates if UDP is being used for this
+ XFR.
+ """
+ self.txn_manager = txn_manager
+ self.txn: dns.transaction.Transaction | None = None
+ self.rdtype = rdtype
+ if rdtype == dns.rdatatype.IXFR:
+ if serial is None:
+ raise ValueError("a starting serial must be supplied for IXFRs")
+ self.incremental = True
+ elif rdtype == dns.rdatatype.AXFR:
+ if is_udp:
+ raise ValueError("is_udp specified for AXFR")
+ self.incremental = False
+ else:
+ raise ValueError("rdtype is not IXFR or AXFR")
+ self.serial = serial
+ self.is_udp = is_udp
+ (_, _, self.origin) = txn_manager.origin_information()
+ self.soa_rdataset: dns.rdataset.Rdataset | None = None
+ self.done = False
+ self.expecting_SOA = False
+ self.delete_mode = False
+
+ def process_message(self, message: dns.message.Message) -> bool:
+ """Process one message in the transfer.
+
+ The message should have the same relativization as was specified when
+ the `dns.xfr.Inbound` was created. The message should also have been
+ created with `one_rr_per_rrset=True` because order matters.
+
+ Returns `True` if the transfer is complete, and `False` otherwise.
+ """
+ if self.txn is None:
+ self.txn = self.txn_manager.writer(not self.incremental)
+ rcode = message.rcode()
+ if rcode != dns.rcode.NOERROR:
+ raise TransferError(rcode)
+ #
+ # We don't require a question section, but if it is present is
+ # should be correct.
+ #
+ if len(message.question) > 0:
+ if message.question[0].name != self.origin:
+ raise dns.exception.FormError("wrong question name")
+ if message.question[0].rdtype != self.rdtype:
+ raise dns.exception.FormError("wrong question rdatatype")
+ answer_index = 0
+ if self.soa_rdataset is None:
+ #
+ # This is the first message. We're expecting an SOA at
+ # the origin.
+ #
+ if not message.answer or message.answer[0].name != self.origin:
+ raise dns.exception.FormError("No answer or RRset not for zone origin")
+ rrset = message.answer[0]
+ rdataset = rrset
+ if rdataset.rdtype != dns.rdatatype.SOA:
+ raise dns.exception.FormError("first RRset is not an SOA")
+ answer_index = 1
+ self.soa_rdataset = rdataset.copy() # pyright: ignore
+ if self.incremental:
+ assert self.soa_rdataset is not None
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, self.soa_rdataset[0])
+ if soa.serial == self.serial:
+ #
+ # We're already up-to-date.
+ #
+ self.done = True
+ elif dns.serial.Serial(soa.serial) < self.serial:
+ # It went backwards!
+ raise SerialWentBackwards
+ else:
+ if self.is_udp and len(message.answer[answer_index:]) == 0:
+ #
+ # There are no more records, so this is the
+ # "truncated" response. Say to use TCP
+ #
+ raise UseTCP
+ #
+ # Note we're expecting another SOA so we can detect
+ # if this IXFR response is an AXFR-style response.
+ #
+ self.expecting_SOA = True
+ #
+ # Process the answer section (other than the initial SOA in
+ # the first message).
+ #
+ for rrset in message.answer[answer_index:]:
+ name = rrset.name
+ rdataset = rrset
+ if self.done:
+ raise dns.exception.FormError("answers after final SOA")
+ assert self.txn is not None # for mypy
+ if rdataset.rdtype == dns.rdatatype.SOA and name == self.origin:
+ #
+ # Every time we see an origin SOA delete_mode inverts
+ #
+ if self.incremental:
+ self.delete_mode = not self.delete_mode
+ #
+ # If this SOA Rdataset is equal to the first we saw
+ # then we're finished. If this is an IXFR we also
+ # check that we're seeing the record in the expected
+ # part of the response.
+ #
+ if rdataset == self.soa_rdataset and (
+ (not self.incremental) or self.delete_mode
+ ):
+ #
+ # This is the final SOA
+ #
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, rdataset[0])
+ if self.expecting_SOA:
+ # We got an empty IXFR sequence!
+ raise dns.exception.FormError("empty IXFR sequence")
+ if self.incremental and self.serial != soa.serial:
+ raise dns.exception.FormError("unexpected end of IXFR sequence")
+ self.txn.replace(name, rdataset)
+ self.txn.commit()
+ self.txn = None
+ self.done = True
+ else:
+ #
+ # This is not the final SOA
+ #
+ self.expecting_SOA = False
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, rdataset[0])
+ if self.incremental:
+ if self.delete_mode:
+ # This is the start of an IXFR deletion set
+ if soa.serial != self.serial:
+ raise dns.exception.FormError(
+ "IXFR base serial mismatch"
+ )
+ else:
+ # This is the start of an IXFR addition set
+ self.serial = soa.serial
+ self.txn.replace(name, rdataset)
+ else:
+ # We saw a non-final SOA for the origin in an AXFR.
+ raise dns.exception.FormError("unexpected origin SOA in AXFR")
+ continue
+ if self.expecting_SOA:
+ #
+ # We made an IXFR request and are expecting another
+ # SOA RR, but saw something else, so this must be an
+ # AXFR response.
+ #
+ self.incremental = False
+ self.expecting_SOA = False
+ self.delete_mode = False
+ self.txn.rollback()
+ self.txn = self.txn_manager.writer(True)
+ #
+ # Note we are falling through into the code below
+ # so whatever rdataset this was gets written.
+ #
+ # Add or remove the data
+ if self.delete_mode:
+ self.txn.delete_exact(name, rdataset)
+ else:
+ self.txn.add(name, rdataset)
+ if self.is_udp and not self.done:
+ #
+ # This is a UDP IXFR and we didn't get to done, and we didn't
+ # get the proper "truncated" response
+ #
+ raise dns.exception.FormError("unexpected end of UDP IXFR")
+ return self.done
+
+ #
+ # Inbounds are context managers.
+ #
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.txn:
+ self.txn.rollback()
+ return False
+
+
+def make_query(
+ txn_manager: dns.transaction.TransactionManager,
+ serial: int | None = 0,
+ use_edns: int | bool | None = None,
+ ednsflags: int | None = None,
+ payload: int | None = None,
+ request_payload: int | None = None,
+ options: List[dns.edns.Option] | None = None,
+ keyring: Any = None,
+ keyname: dns.name.Name | None = None,
+ keyalgorithm: dns.name.Name | str = dns.tsig.default_algorithm,
+) -> Tuple[dns.message.QueryMessage, int | None]:
+ """Make an AXFR or IXFR query.
+
+ *txn_manager* is a ``dns.transaction.TransactionManager``, typically a
+ ``dns.zone.Zone``.
+
+ *serial* is an ``int`` or ``None``. If 0, then IXFR will be
+ attempted using the most recent serial number from the
+ *txn_manager*; it is the caller's responsibility to ensure there
+ are no write transactions active that could invalidate the
+ retrieved serial. If a serial cannot be determined, AXFR will be
+ forced. Other integer values are the starting serial to use.
+ ``None`` forces an AXFR.
+
+ Please see the documentation for :py:func:`dns.message.make_query` and
+ :py:func:`dns.message.Message.use_tsig` for details on the other parameters
+ to this function.
+
+ Returns a `(query, serial)` tuple.
+ """
+ (zone_origin, _, origin) = txn_manager.origin_information()
+ if zone_origin is None:
+ raise ValueError("no zone origin")
+ if serial is None:
+ rdtype = dns.rdatatype.AXFR
+ elif not isinstance(serial, int):
+ raise ValueError("serial is not an integer")
+ elif serial == 0:
+ with txn_manager.reader() as txn:
+ rdataset = txn.get(origin, "SOA")
+ if rdataset:
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, rdataset[0])
+ serial = soa.serial
+ rdtype = dns.rdatatype.IXFR
+ else:
+ serial = None
+ rdtype = dns.rdatatype.AXFR
+ elif serial > 0 and serial < 4294967296:
+ rdtype = dns.rdatatype.IXFR
+ else:
+ raise ValueError("serial out-of-range")
+ rdclass = txn_manager.get_class()
+ q = dns.message.make_query(
+ zone_origin,
+ rdtype,
+ rdclass,
+ use_edns,
+ False,
+ ednsflags,
+ payload,
+ request_payload,
+ options,
+ )
+ if serial is not None:
+ rdata = dns.rdata.from_text(rdclass, "SOA", f". . {serial} 0 0 0 0")
+ rrset = q.find_rrset(
+ q.authority, zone_origin, rdclass, dns.rdatatype.SOA, create=True
+ )
+ rrset.add(rdata, 0)
+ if keyring is not None:
+ q.use_tsig(keyring, keyname, algorithm=keyalgorithm)
+ return (q, serial)
+
+
+def extract_serial_from_query(query: dns.message.Message) -> int | None:
+ """Extract the SOA serial number from query if it is an IXFR and return
+ it, otherwise return None.
+
+ *query* is a dns.message.QueryMessage that is an IXFR or AXFR request.
+
+ Raises if the query is not an IXFR or AXFR, or if an IXFR doesn't have
+ an appropriate SOA RRset in the authority section.
+ """
+ if not isinstance(query, dns.message.QueryMessage):
+ raise ValueError("query not a QueryMessage")
+ question = query.question[0]
+ if question.rdtype == dns.rdatatype.AXFR:
+ return None
+ elif question.rdtype != dns.rdatatype.IXFR:
+ raise ValueError("query is not an AXFR or IXFR")
+ soa_rrset = query.find_rrset(
+ query.authority, question.name, question.rdclass, dns.rdatatype.SOA
+ )
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, soa_rrset[0])
+ return soa.serial
diff --git a/tapdown/lib/python3.11/site-packages/dns/zone.py b/tapdown/lib/python3.11/site-packages/dns/zone.py
new file mode 100644
index 0000000..f916ffe
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/zone.py
@@ -0,0 +1,1462 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Zones."""
+
+import contextlib
+import io
+import os
+import struct
+from typing import (
+ Any,
+ Callable,
+ Iterable,
+ Iterator,
+ List,
+ MutableMapping,
+ Set,
+ Tuple,
+ cast,
+)
+
+import dns.exception
+import dns.grange
+import dns.immutable
+import dns.name
+import dns.node
+import dns.rdata
+import dns.rdataclass
+import dns.rdataset
+import dns.rdatatype
+import dns.rdtypes.ANY.SOA
+import dns.rdtypes.ANY.ZONEMD
+import dns.rrset
+import dns.tokenizer
+import dns.transaction
+import dns.ttl
+import dns.zonefile
+from dns.zonetypes import DigestHashAlgorithm, DigestScheme, _digest_hashers
+
+
+class BadZone(dns.exception.DNSException):
+ """The DNS zone is malformed."""
+
+
+class NoSOA(BadZone):
+ """The DNS zone has no SOA RR at its origin."""
+
+
+class NoNS(BadZone):
+ """The DNS zone has no NS RRset at its origin."""
+
+
+class UnknownOrigin(BadZone):
+ """The DNS zone's origin is unknown."""
+
+
+class UnsupportedDigestScheme(dns.exception.DNSException):
+ """The zone digest's scheme is unsupported."""
+
+
+class UnsupportedDigestHashAlgorithm(dns.exception.DNSException):
+ """The zone digest's origin is unsupported."""
+
+
+class NoDigest(dns.exception.DNSException):
+ """The DNS zone has no ZONEMD RRset at its origin."""
+
+
+class DigestVerificationFailure(dns.exception.DNSException):
+ """The ZONEMD digest failed to verify."""
+
+
+def _validate_name(
+ name: dns.name.Name,
+ origin: dns.name.Name | None,
+ relativize: bool,
+) -> dns.name.Name:
+ # This name validation code is shared by Zone and Version
+ if origin is None:
+ # This should probably never happen as other code (e.g.
+ # _rr_line) will notice the lack of an origin before us, but
+ # we check just in case!
+ raise KeyError("no zone origin is defined")
+ if name.is_absolute():
+ if not name.is_subdomain(origin):
+ raise KeyError("name parameter must be a subdomain of the zone origin")
+ if relativize:
+ name = name.relativize(origin)
+ else:
+ # We have a relative name. Make sure that the derelativized name is
+ # not too long.
+ try:
+ abs_name = name.derelativize(origin)
+ except dns.name.NameTooLong:
+ # We map dns.name.NameTooLong to KeyError to be consistent with
+ # the other exceptions above.
+ raise KeyError("relative name too long for zone")
+ if not relativize:
+ # We have a relative name in a non-relative zone, so use the
+ # derelativized name.
+ name = abs_name
+ return name
+
+
+class Zone(dns.transaction.TransactionManager):
+ """A DNS zone.
+
+ A ``Zone`` is a mapping from names to nodes. The zone object may be
+ treated like a Python dictionary, e.g. ``zone[name]`` will retrieve
+ the node associated with that name. The *name* may be a
+ ``dns.name.Name object``, or it may be a string. In either case,
+ if the name is relative it is treated as relative to the origin of
+ the zone.
+ """
+
+ node_factory: Callable[[], dns.node.Node] = dns.node.Node
+ map_factory: Callable[[], MutableMapping[dns.name.Name, dns.node.Node]] = dict
+ # We only require the version types as "Version" to allow for flexibility, as
+ # only the version protocol matters
+ writable_version_factory: Callable[["Zone", bool], "Version"] | None = None
+ immutable_version_factory: Callable[["Version"], "Version"] | None = None
+
+ __slots__ = ["rdclass", "origin", "nodes", "relativize"]
+
+ def __init__(
+ self,
+ origin: dns.name.Name | str | None,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ relativize: bool = True,
+ ):
+ """Initialize a zone object.
+
+ *origin* is the origin of the zone. It may be a ``dns.name.Name``,
+ a ``str``, or ``None``. If ``None``, then the zone's origin will
+ be set by the first ``$ORIGIN`` line in a zone file.
+
+ *rdclass*, an ``int``, the zone's rdata class; the default is class IN.
+
+ *relativize*, a ``bool``, determine's whether domain names are
+ relativized to the zone's origin. The default is ``True``.
+ """
+
+ if origin is not None:
+ if isinstance(origin, str):
+ origin = dns.name.from_text(origin)
+ elif not isinstance(origin, dns.name.Name):
+ raise ValueError("origin parameter must be convertible to a DNS name")
+ if not origin.is_absolute():
+ raise ValueError("origin parameter must be an absolute name")
+ self.origin = origin
+ self.rdclass = rdclass
+ self.nodes: MutableMapping[dns.name.Name, dns.node.Node] = self.map_factory()
+ self.relativize = relativize
+
+ def __eq__(self, other):
+ """Two zones are equal if they have the same origin, class, and
+ nodes.
+
+ Returns a ``bool``.
+ """
+
+ if not isinstance(other, Zone):
+ return False
+ if (
+ self.rdclass != other.rdclass
+ or self.origin != other.origin
+ or self.nodes != other.nodes
+ ):
+ return False
+ return True
+
+ def __ne__(self, other):
+ """Are two zones not equal?
+
+ Returns a ``bool``.
+ """
+
+ return not self.__eq__(other)
+
+ def _validate_name(self, name: dns.name.Name | str) -> dns.name.Name:
+ # Note that any changes in this method should have corresponding changes
+ # made in the Version _validate_name() method.
+ if isinstance(name, str):
+ name = dns.name.from_text(name, None)
+ elif not isinstance(name, dns.name.Name):
+ raise KeyError("name parameter must be convertible to a DNS name")
+ return _validate_name(name, self.origin, self.relativize)
+
+ def __getitem__(self, key):
+ key = self._validate_name(key)
+ return self.nodes[key]
+
+ def __setitem__(self, key, value):
+ key = self._validate_name(key)
+ self.nodes[key] = value
+
+ def __delitem__(self, key):
+ key = self._validate_name(key)
+ del self.nodes[key]
+
+ def __iter__(self):
+ return self.nodes.__iter__()
+
+ def keys(self):
+ return self.nodes.keys()
+
+ def values(self):
+ return self.nodes.values()
+
+ def items(self):
+ return self.nodes.items()
+
+ def get(self, key):
+ key = self._validate_name(key)
+ return self.nodes.get(key)
+
+ def __contains__(self, key):
+ key = self._validate_name(key)
+ return key in self.nodes
+
+ def find_node(
+ self, name: dns.name.Name | str, create: bool = False
+ ) -> dns.node.Node:
+ """Find a node in the zone, possibly creating it.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *create*, a ``bool``. If true, the node will be created if it does
+ not exist.
+
+ Raises ``KeyError`` if the name is not known and create was
+ not specified, or if the name was not a subdomain of the origin.
+
+ Returns a ``dns.node.Node``.
+ """
+
+ name = self._validate_name(name)
+ node = self.nodes.get(name)
+ if node is None:
+ if not create:
+ raise KeyError
+ node = self.node_factory()
+ self.nodes[name] = node
+ return node
+
+ def get_node(
+ self, name: dns.name.Name | str, create: bool = False
+ ) -> dns.node.Node | None:
+ """Get a node in the zone, possibly creating it.
+
+ This method is like ``find_node()``, except it returns None instead
+ of raising an exception if the node does not exist and creation
+ has not been requested.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *create*, a ``bool``. If true, the node will be created if it does
+ not exist.
+
+ Returns a ``dns.node.Node`` or ``None``.
+ """
+
+ try:
+ node = self.find_node(name, create)
+ except KeyError:
+ node = None
+ return node
+
+ def delete_node(self, name: dns.name.Name | str) -> None:
+ """Delete the specified node if it exists.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ It is not an error if the node does not exist.
+ """
+
+ name = self._validate_name(name)
+ if name in self.nodes:
+ del self.nodes[name]
+
+ def find_rdataset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset:
+ """Look for an rdataset with the specified name and type in the zone,
+ and return an rdataset encapsulating it.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ KeyError is raised if the name or type are not found.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdatatype.RdataType`` or ``str`` the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+
+ *create*, a ``bool``. If true, the node will be created if it does
+ not exist.
+
+ Raises ``KeyError`` if the name is not known and create was
+ not specified, or if the name was not a subdomain of the origin.
+
+ Returns a ``dns.rdataset.Rdataset``.
+ """
+
+ name = self._validate_name(name)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ covers = dns.rdatatype.RdataType.make(covers)
+ node = self.find_node(name, create)
+ return node.find_rdataset(self.rdclass, rdtype, covers, create)
+
+ def get_rdataset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset | None:
+ """Look for an rdataset with the specified name and type in the zone.
+
+ This method is like ``find_rdataset()``, except it returns None instead
+ of raising an exception if the rdataset does not exist and creation
+ has not been requested.
+
+ The rdataset returned is not a copy; changes to it will change
+ the zone.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdatatype.RdataType`` or ``str``, the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+
+ *create*, a ``bool``. If true, the node will be created if it does
+ not exist.
+
+ Raises ``KeyError`` if the name is not known and create was
+ not specified, or if the name was not a subdomain of the origin.
+
+ Returns a ``dns.rdataset.Rdataset`` or ``None``.
+ """
+
+ try:
+ rdataset = self.find_rdataset(name, rdtype, covers, create)
+ except KeyError:
+ rdataset = None
+ return rdataset
+
+ def delete_rdataset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> None:
+ """Delete the rdataset matching *rdtype* and *covers*, if it
+ exists at the node specified by *name*.
+
+ It is not an error if the node does not exist, or if there is no matching
+ rdataset at the node.
+
+ If the node has no rdatasets after the deletion, it will itself be deleted.
+
+ *name*: the name of the node to find. The value may be a ``dns.name.Name`` or a
+ ``str``. If absolute, the name must be a subdomain of the zone's origin. If
+ ``zone.relativize`` is ``True``, then the name will be relativized.
+
+ *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdatatype.RdataType`` or ``str`` or ``None``, the covered
+ type. Usually this value is ``dns.rdatatype.NONE``, but if the rdtype is
+ ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``, then the covers value will be
+ the rdata type the SIG/RRSIG covers. The library treats the SIG and RRSIG types
+ as if they were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA). This
+ makes RRSIGs much easier to work with than if RRSIGs covering different rdata
+ types were aggregated into a single RRSIG rdataset.
+ """
+
+ name = self._validate_name(name)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ covers = dns.rdatatype.RdataType.make(covers)
+ node = self.get_node(name)
+ if node is not None:
+ node.delete_rdataset(self.rdclass, rdtype, covers)
+ if len(node) == 0:
+ self.delete_node(name)
+
+ def replace_rdataset(
+ self, name: dns.name.Name | str, replacement: dns.rdataset.Rdataset
+ ) -> None:
+ """Replace an rdataset at name.
+
+ It is not an error if there is no rdataset matching I{replacement}.
+
+ Ownership of the *replacement* object is transferred to the zone;
+ in other words, this method does not store a copy of *replacement*
+ at the node, it stores *replacement* itself.
+
+ If the node does not exist, it is created.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *replacement*, a ``dns.rdataset.Rdataset``, the replacement rdataset.
+ """
+
+ if replacement.rdclass != self.rdclass:
+ raise ValueError("replacement.rdclass != zone.rdclass")
+ node = self.find_node(name, True)
+ node.replace_rdataset(replacement)
+
+ def find_rrset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> dns.rrset.RRset:
+ """Look for an rdataset with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ This method is less efficient than the similar
+ ``find_rdataset()`` because it creates an RRset instead of
+ returning the matching rdataset. It may be more convenient
+ for some uses since it returns an object which binds the owner
+ name to the rdataset.
+
+ This method may not be used to create new nodes or rdatasets;
+ use ``find_rdataset`` instead.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *rdtype*, a ``dns.rdatatype.RdataType`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdatatype.RdataType`` or ``str``, the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+
+ *create*, a ``bool``. If true, the node will be created if it does
+ not exist.
+
+ Raises ``KeyError`` if the name is not known and create was
+ not specified, or if the name was not a subdomain of the origin.
+
+ Returns a ``dns.rrset.RRset`` or ``None``.
+ """
+
+ vname = self._validate_name(name)
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ covers = dns.rdatatype.RdataType.make(covers)
+ rdataset = self.nodes[vname].find_rdataset(self.rdclass, rdtype, covers)
+ rrset = dns.rrset.RRset(vname, self.rdclass, rdtype, covers)
+ rrset.update(rdataset)
+ return rrset
+
+ def get_rrset(
+ self,
+ name: dns.name.Name | str,
+ rdtype: dns.rdatatype.RdataType | str,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> dns.rrset.RRset | None:
+ """Look for an rdataset with the specified name and type in the zone,
+ and return an RRset encapsulating it.
+
+ This method is less efficient than the similar ``get_rdataset()``
+ because it creates an RRset instead of returning the matching
+ rdataset. It may be more convenient for some uses since it
+ returns an object which binds the owner name to the rdataset.
+
+ This method may not be used to create new nodes or rdatasets;
+ use ``get_rdataset()`` instead.
+
+ *name*: the name of the node to find.
+ The value may be a ``dns.name.Name`` or a ``str``. If absolute, the
+ name must be a subdomain of the zone's origin. If ``zone.relativize``
+ is ``True``, then the name will be relativized.
+
+ *rdtype*, a ``dns.rdataset.Rdataset`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdataset.Rdataset`` or ``str``, the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+
+ *create*, a ``bool``. If true, the node will be created if it does
+ not exist.
+
+ Returns a ``dns.rrset.RRset`` or ``None``.
+ """
+
+ try:
+ rrset = self.find_rrset(name, rdtype, covers)
+ except KeyError:
+ rrset = None
+ return rrset
+
+ def iterate_rdatasets(
+ self,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.ANY,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> Iterator[Tuple[dns.name.Name, dns.rdataset.Rdataset]]:
+ """Return a generator which yields (name, rdataset) tuples for
+ all rdatasets in the zone which have the specified *rdtype*
+ and *covers*. If *rdtype* is ``dns.rdatatype.ANY``, the default,
+ then all rdatasets will be matched.
+
+ *rdtype*, a ``dns.rdataset.Rdataset`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdataset.Rdataset`` or ``str``, the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+ """
+
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ covers = dns.rdatatype.RdataType.make(covers)
+ for name, node in self.items():
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or (
+ rds.rdtype == rdtype and rds.covers == covers
+ ):
+ yield (name, rds)
+
+ def iterate_rdatas(
+ self,
+ rdtype: dns.rdatatype.RdataType | str = dns.rdatatype.ANY,
+ covers: dns.rdatatype.RdataType | str = dns.rdatatype.NONE,
+ ) -> Iterator[Tuple[dns.name.Name, int, dns.rdata.Rdata]]:
+ """Return a generator which yields (name, ttl, rdata) tuples for
+ all rdatas in the zone which have the specified *rdtype*
+ and *covers*. If *rdtype* is ``dns.rdatatype.ANY``, the default,
+ then all rdatas will be matched.
+
+ *rdtype*, a ``dns.rdataset.Rdataset`` or ``str``, the rdata type desired.
+
+ *covers*, a ``dns.rdataset.Rdataset`` or ``str``, the covered type.
+ Usually this value is ``dns.rdatatype.NONE``, but if the
+ rdtype is ``dns.rdatatype.SIG`` or ``dns.rdatatype.RRSIG``,
+ then the covers value will be the rdata type the SIG/RRSIG
+ covers. The library treats the SIG and RRSIG types as if they
+ were a family of types, e.g. RRSIG(A), RRSIG(NS), RRSIG(SOA).
+ This makes RRSIGs much easier to work with than if RRSIGs
+ covering different rdata types were aggregated into a single
+ RRSIG rdataset.
+ """
+
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ covers = dns.rdatatype.RdataType.make(covers)
+ for name, node in self.items():
+ for rds in node:
+ if rdtype == dns.rdatatype.ANY or (
+ rds.rdtype == rdtype and rds.covers == covers
+ ):
+ for rdata in rds:
+ yield (name, rds.ttl, rdata)
+
+ def to_file(
+ self,
+ f: Any,
+ sorted: bool = True,
+ relativize: bool = True,
+ nl: str | None = None,
+ want_comments: bool = False,
+ want_origin: bool = False,
+ ) -> None:
+ """Write a zone to a file.
+
+ *f*, a file or `str`. If *f* is a string, it is treated
+ as the name of a file to open.
+
+ *sorted*, a ``bool``. If True, the default, then the file
+ will be written with the names sorted in DNSSEC order from
+ least to greatest. Otherwise the names will be written in
+ whatever order they happen to have in the zone's dictionary.
+
+ *relativize*, a ``bool``. If True, the default, then domain
+ names in the output will be relativized to the zone's origin
+ if possible.
+
+ *nl*, a ``str`` or None. The end of line string. If not
+ ``None``, the output will use the platform's native
+ end-of-line marker (i.e. LF on POSIX, CRLF on Windows).
+
+ *want_comments*, a ``bool``. If ``True``, emit end-of-line comments
+ as part of writing the file. If ``False``, the default, do not
+ emit them.
+
+ *want_origin*, a ``bool``. If ``True``, emit a $ORIGIN line at
+ the start of the file. If ``False``, the default, do not emit
+ one.
+ """
+
+ if isinstance(f, str):
+ cm: contextlib.AbstractContextManager = open(f, "wb")
+ else:
+ cm = contextlib.nullcontext(f)
+ with cm as f:
+ # must be in this way, f.encoding may contain None, or even
+ # attribute may not be there
+ file_enc = getattr(f, "encoding", None)
+ if file_enc is None:
+ file_enc = "utf-8"
+
+ if nl is None:
+ # binary mode, '\n' is not enough
+ nl_b = os.linesep.encode(file_enc)
+ nl = "\n"
+ elif isinstance(nl, str):
+ nl_b = nl.encode(file_enc)
+ else:
+ nl_b = nl
+ nl = nl.decode()
+
+ if want_origin:
+ assert self.origin is not None
+ l = "$ORIGIN " + self.origin.to_text()
+ l_b = l.encode(file_enc)
+ try:
+ f.write(l_b)
+ f.write(nl_b)
+ except TypeError: # textual mode
+ f.write(l)
+ f.write(nl)
+
+ if sorted:
+ names = list(self.keys())
+ names.sort()
+ else:
+ names = self.keys()
+ for n in names:
+ l = self[n].to_text(
+ n,
+ origin=self.origin, # pyright: ignore
+ relativize=relativize, # pyright: ignore
+ want_comments=want_comments, # pyright: ignore
+ )
+ l_b = l.encode(file_enc)
+
+ try:
+ f.write(l_b)
+ f.write(nl_b)
+ except TypeError: # textual mode
+ f.write(l)
+ f.write(nl)
+
+ def to_text(
+ self,
+ sorted: bool = True,
+ relativize: bool = True,
+ nl: str | None = None,
+ want_comments: bool = False,
+ want_origin: bool = False,
+ ) -> str:
+ """Return a zone's text as though it were written to a file.
+
+ *sorted*, a ``bool``. If True, the default, then the file
+ will be written with the names sorted in DNSSEC order from
+ least to greatest. Otherwise the names will be written in
+ whatever order they happen to have in the zone's dictionary.
+
+ *relativize*, a ``bool``. If True, the default, then domain
+ names in the output will be relativized to the zone's origin
+ if possible.
+
+ *nl*, a ``str`` or None. The end of line string. If not
+ ``None``, the output will use the platform's native
+ end-of-line marker (i.e. LF on POSIX, CRLF on Windows).
+
+ *want_comments*, a ``bool``. If ``True``, emit end-of-line comments
+ as part of writing the file. If ``False``, the default, do not
+ emit them.
+
+ *want_origin*, a ``bool``. If ``True``, emit a $ORIGIN line at
+ the start of the output. If ``False``, the default, do not emit
+ one.
+
+ Returns a ``str``.
+ """
+ temp_buffer = io.StringIO()
+ self.to_file(temp_buffer, sorted, relativize, nl, want_comments, want_origin)
+ return_value = temp_buffer.getvalue()
+ temp_buffer.close()
+ return return_value
+
+ def check_origin(self) -> None:
+ """Do some simple checking of the zone's origin.
+
+ Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
+
+ Raises ``dns.zone.NoNS`` if there is no NS RRset.
+
+ Raises ``KeyError`` if there is no origin node.
+ """
+ if self.relativize:
+ name = dns.name.empty
+ else:
+ assert self.origin is not None
+ name = self.origin
+ if self.get_rdataset(name, dns.rdatatype.SOA) is None:
+ raise NoSOA
+ if self.get_rdataset(name, dns.rdatatype.NS) is None:
+ raise NoNS
+
+ def get_soa(
+ self, txn: dns.transaction.Transaction | None = None
+ ) -> dns.rdtypes.ANY.SOA.SOA:
+ """Get the zone SOA rdata.
+
+ Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
+
+ Returns a ``dns.rdtypes.ANY.SOA.SOA`` Rdata.
+ """
+ if self.relativize:
+ origin_name = dns.name.empty
+ else:
+ if self.origin is None:
+ # get_soa() has been called very early, and there must not be
+ # an SOA if there is no origin.
+ raise NoSOA
+ origin_name = self.origin
+ soa_rds: dns.rdataset.Rdataset | None
+ if txn:
+ soa_rds = txn.get(origin_name, dns.rdatatype.SOA)
+ else:
+ soa_rds = self.get_rdataset(origin_name, dns.rdatatype.SOA)
+ if soa_rds is None:
+ raise NoSOA
+ else:
+ soa = cast(dns.rdtypes.ANY.SOA.SOA, soa_rds[0])
+ return soa
+
+ def _compute_digest(
+ self,
+ hash_algorithm: DigestHashAlgorithm,
+ scheme: DigestScheme = DigestScheme.SIMPLE,
+ ) -> bytes:
+ hashinfo = _digest_hashers.get(hash_algorithm)
+ if not hashinfo:
+ raise UnsupportedDigestHashAlgorithm
+ if scheme != DigestScheme.SIMPLE:
+ raise UnsupportedDigestScheme
+
+ if self.relativize:
+ origin_name = dns.name.empty
+ else:
+ assert self.origin is not None
+ origin_name = self.origin
+ hasher = hashinfo()
+ for name, node in sorted(self.items()):
+ rrnamebuf = name.to_digestable(self.origin)
+ for rdataset in sorted(node, key=lambda rds: (rds.rdtype, rds.covers)):
+ if name == origin_name and dns.rdatatype.ZONEMD in (
+ rdataset.rdtype,
+ rdataset.covers,
+ ):
+ continue
+ rrfixed = struct.pack(
+ "!HHI", rdataset.rdtype, rdataset.rdclass, rdataset.ttl
+ )
+ rdatas = [rdata.to_digestable(self.origin) for rdata in rdataset]
+ for rdata in sorted(rdatas):
+ rrlen = struct.pack("!H", len(rdata))
+ hasher.update(rrnamebuf + rrfixed + rrlen + rdata)
+ return hasher.digest()
+
+ def compute_digest(
+ self,
+ hash_algorithm: DigestHashAlgorithm,
+ scheme: DigestScheme = DigestScheme.SIMPLE,
+ ) -> dns.rdtypes.ANY.ZONEMD.ZONEMD:
+ serial = self.get_soa().serial
+ digest = self._compute_digest(hash_algorithm, scheme)
+ return dns.rdtypes.ANY.ZONEMD.ZONEMD(
+ self.rdclass, dns.rdatatype.ZONEMD, serial, scheme, hash_algorithm, digest
+ )
+
+ def verify_digest(
+ self, zonemd: dns.rdtypes.ANY.ZONEMD.ZONEMD | None = None
+ ) -> None:
+ digests: dns.rdataset.Rdataset | List[dns.rdtypes.ANY.ZONEMD.ZONEMD]
+ if zonemd:
+ digests = [zonemd]
+ else:
+ assert self.origin is not None
+ rds = self.get_rdataset(self.origin, dns.rdatatype.ZONEMD)
+ if rds is None:
+ raise NoDigest
+ digests = rds
+ for digest in digests:
+ try:
+ computed = self._compute_digest(digest.hash_algorithm, digest.scheme)
+ if computed == digest.digest:
+ return
+ except Exception:
+ pass
+ raise DigestVerificationFailure
+
+ # TransactionManager methods
+
+ def reader(self) -> "Transaction":
+ return Transaction(self, False, Version(self, 1, self.nodes, self.origin))
+
+ def writer(self, replacement: bool = False) -> "Transaction":
+ txn = Transaction(self, replacement)
+ txn._setup_version()
+ return txn
+
+ def origin_information(
+ self,
+ ) -> Tuple[dns.name.Name | None, bool, dns.name.Name | None]:
+ effective: dns.name.Name | None
+ if self.relativize:
+ effective = dns.name.empty
+ else:
+ effective = self.origin
+ return (self.origin, self.relativize, effective)
+
+ def get_class(self):
+ return self.rdclass
+
+ # Transaction methods
+
+ def _end_read(self, txn):
+ pass
+
+ def _end_write(self, txn):
+ pass
+
+ def _commit_version(self, txn, version, origin):
+ self.nodes = version.nodes
+ if self.origin is None:
+ self.origin = origin
+
+ def _get_next_version_id(self) -> int:
+ # Versions are ephemeral and all have id 1
+ return 1
+
+
+# These classes used to be in dns.versioned, but have moved here so we can use
+# the copy-on-write transaction mechanism for both kinds of zones. In a
+# regular zone, the version only exists during the transaction, and the nodes
+# are regular dns.node.Nodes.
+
+# A node with a version id.
+
+
+class VersionedNode(dns.node.Node): # lgtm[py/missing-equals]
+ __slots__ = ["id"]
+
+ def __init__(self):
+ super().__init__()
+ # A proper id will get set by the Version
+ self.id = 0
+
+
+@dns.immutable.immutable
+class ImmutableVersionedNode(VersionedNode):
+ def __init__(self, node):
+ super().__init__()
+ self.id = node.id
+ self.rdatasets = tuple(
+ [dns.rdataset.ImmutableRdataset(rds) for rds in node.rdatasets]
+ )
+
+ def find_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset:
+ if create:
+ raise TypeError("immutable")
+ return super().find_rdataset(rdclass, rdtype, covers, False)
+
+ def get_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ create: bool = False,
+ ) -> dns.rdataset.Rdataset | None:
+ if create:
+ raise TypeError("immutable")
+ return super().get_rdataset(rdclass, rdtype, covers, False)
+
+ def delete_rdataset(
+ self,
+ rdclass: dns.rdataclass.RdataClass,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType = dns.rdatatype.NONE,
+ ) -> None:
+ raise TypeError("immutable")
+
+ def replace_rdataset(self, replacement: dns.rdataset.Rdataset) -> None:
+ raise TypeError("immutable")
+
+ def is_immutable(self) -> bool:
+ return True
+
+
+class Version:
+ def __init__(
+ self,
+ zone: Zone,
+ id: int,
+ nodes: MutableMapping[dns.name.Name, dns.node.Node] | None = None,
+ origin: dns.name.Name | None = None,
+ ):
+ self.zone = zone
+ self.id = id
+ if nodes is not None:
+ self.nodes = nodes
+ else:
+ self.nodes = zone.map_factory()
+ self.origin = origin
+
+ def _validate_name(self, name: dns.name.Name) -> dns.name.Name:
+ return _validate_name(name, self.origin, self.zone.relativize)
+
+ def get_node(self, name: dns.name.Name) -> dns.node.Node | None:
+ name = self._validate_name(name)
+ return self.nodes.get(name)
+
+ def get_rdataset(
+ self,
+ name: dns.name.Name,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType,
+ ) -> dns.rdataset.Rdataset | None:
+ node = self.get_node(name)
+ if node is None:
+ return None
+ return node.get_rdataset(self.zone.rdclass, rdtype, covers)
+
+ def keys(self):
+ return self.nodes.keys()
+
+ def items(self):
+ return self.nodes.items()
+
+
+class WritableVersion(Version):
+ def __init__(self, zone: Zone, replacement: bool = False):
+ # The zone._versions_lock must be held by our caller in a versioned
+ # zone.
+ id = zone._get_next_version_id()
+ super().__init__(zone, id)
+ if not replacement:
+ # We copy the map, because that gives us a simple and thread-safe
+ # way of doing versions, and we have a garbage collector to help
+ # us. We only make new node objects if we actually change the
+ # node.
+ self.nodes.update(zone.nodes)
+ # We have to copy the zone origin as it may be None in the first
+ # version, and we don't want to mutate the zone until we commit.
+ self.origin = zone.origin
+ self.changed: Set[dns.name.Name] = set()
+
+ def _maybe_cow_with_name(
+ self, name: dns.name.Name
+ ) -> Tuple[dns.node.Node, dns.name.Name]:
+ name = self._validate_name(name)
+ node = self.nodes.get(name)
+ if node is None or name not in self.changed:
+ new_node = self.zone.node_factory()
+ if hasattr(new_node, "id"):
+ # We keep doing this for backwards compatibility, as earlier
+ # code used new_node.id != self.id for the "do we need to CoW?"
+ # test. Now we use the changed set as this works with both
+ # regular zones and versioned zones.
+ #
+ # We ignore the mypy error as this is safe but it doesn't see it.
+ new_node.id = self.id # type: ignore
+ if node is not None:
+ # moo! copy on write!
+ new_node.rdatasets.extend(node.rdatasets)
+ self.nodes[name] = new_node
+ self.changed.add(name)
+ return (new_node, name)
+ else:
+ return (node, name)
+
+ def _maybe_cow(self, name: dns.name.Name) -> dns.node.Node:
+ return self._maybe_cow_with_name(name)[0]
+
+ def delete_node(self, name: dns.name.Name) -> None:
+ name = self._validate_name(name)
+ if name in self.nodes:
+ del self.nodes[name]
+ self.changed.add(name)
+
+ def put_rdataset(
+ self, name: dns.name.Name, rdataset: dns.rdataset.Rdataset
+ ) -> None:
+ node = self._maybe_cow(name)
+ node.replace_rdataset(rdataset)
+
+ def delete_rdataset(
+ self,
+ name: dns.name.Name,
+ rdtype: dns.rdatatype.RdataType,
+ covers: dns.rdatatype.RdataType,
+ ) -> None:
+ node = self._maybe_cow(name)
+ node.delete_rdataset(self.zone.rdclass, rdtype, covers)
+ if len(node) == 0:
+ del self.nodes[name]
+
+
+@dns.immutable.immutable
+class ImmutableVersion(Version):
+ def __init__(self, version: Version):
+ if not isinstance(version, WritableVersion):
+ raise ValueError(
+ "a dns.zone.ImmutableVersion requires a dns.zone.WritableVersion"
+ )
+ # We tell super() that it's a replacement as we don't want it
+ # to copy the nodes, as we're about to do that with an
+ # immutable Dict.
+ super().__init__(version.zone, True)
+ # set the right id!
+ self.id = version.id
+ # keep the origin
+ self.origin = version.origin
+ # Make changed nodes immutable
+ for name in version.changed:
+ node = version.nodes.get(name)
+ # it might not exist if we deleted it in the version
+ if node:
+ version.nodes[name] = ImmutableVersionedNode(node)
+ # We're changing the type of the nodes dictionary here on purpose, so
+ # we ignore the mypy error.
+ self.nodes = dns.immutable.Dict(
+ version.nodes, True, self.zone.map_factory
+ ) # type: ignore
+
+
+class Transaction(dns.transaction.Transaction):
+ def __init__(self, zone, replacement, version=None, make_immutable=False):
+ read_only = version is not None
+ super().__init__(zone, replacement, read_only)
+ self.version = version
+ self.make_immutable = make_immutable
+
+ @property
+ def zone(self):
+ return self.manager
+
+ def _setup_version(self):
+ assert self.version is None
+ factory = self.manager.writable_version_factory # pyright: ignore
+ if factory is None:
+ factory = WritableVersion
+ self.version = factory(self.zone, self.replacement) # pyright: ignore
+
+ def _get_rdataset(self, name, rdtype, covers):
+ assert self.version is not None
+ return self.version.get_rdataset(name, rdtype, covers)
+
+ def _put_rdataset(self, name, rdataset):
+ assert not self.read_only
+ assert self.version is not None
+ self.version.put_rdataset(name, rdataset)
+
+ def _delete_name(self, name):
+ assert not self.read_only
+ assert self.version is not None
+ self.version.delete_node(name)
+
+ def _delete_rdataset(self, name, rdtype, covers):
+ assert not self.read_only
+ assert self.version is not None
+ self.version.delete_rdataset(name, rdtype, covers)
+
+ def _name_exists(self, name):
+ assert self.version is not None
+ return self.version.get_node(name) is not None
+
+ def _changed(self):
+ if self.read_only:
+ return False
+ else:
+ assert self.version is not None
+ return len(self.version.changed) > 0
+
+ def _end_transaction(self, commit):
+ assert self.zone is not None
+ assert self.version is not None
+ if self.read_only:
+ self.zone._end_read(self) # pyright: ignore
+ elif commit and len(self.version.changed) > 0:
+ if self.make_immutable:
+ factory = self.manager.immutable_version_factory # pyright: ignore
+ if factory is None:
+ factory = ImmutableVersion
+ version = factory(self.version)
+ else:
+ version = self.version
+ self.zone._commit_version( # pyright: ignore
+ self, version, self.version.origin
+ )
+
+ else:
+ # rollback
+ self.zone._end_write(self) # pyright: ignore
+
+ def _set_origin(self, origin):
+ assert self.version is not None
+ if self.version.origin is None:
+ self.version.origin = origin
+
+ def _iterate_rdatasets(self):
+ assert self.version is not None
+ for name, node in self.version.items():
+ for rdataset in node:
+ yield (name, rdataset)
+
+ def _iterate_names(self):
+ assert self.version is not None
+ return self.version.keys()
+
+ def _get_node(self, name):
+ assert self.version is not None
+ return self.version.get_node(name)
+
+ def _origin_information(self):
+ assert self.version is not None
+ (absolute, relativize, effective) = self.manager.origin_information()
+ if absolute is None and self.version.origin is not None:
+ # No origin has been committed yet, but we've learned one as part of
+ # this txn. Use it.
+ absolute = self.version.origin
+ if relativize:
+ effective = dns.name.empty
+ else:
+ effective = absolute
+ return (absolute, relativize, effective)
+
+
+def _from_text(
+ text: Any,
+ origin: dns.name.Name | str | None = None,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ relativize: bool = True,
+ zone_factory: Any = Zone,
+ filename: str | None = None,
+ allow_include: bool = False,
+ check_origin: bool = True,
+ idna_codec: dns.name.IDNACodec | None = None,
+ allow_directives: bool | Iterable[str] = True,
+) -> Zone:
+ # See the comments for the public APIs from_text() and from_file() for
+ # details.
+
+ # 'text' can also be a file, but we don't publish that fact
+ # since it's an implementation detail. The official file
+ # interface is from_file().
+
+ if filename is None:
+ filename = ""
+ zone = zone_factory(origin, rdclass, relativize=relativize)
+ with zone.writer(True) as txn:
+ tok = dns.tokenizer.Tokenizer(text, filename, idna_codec=idna_codec)
+ reader = dns.zonefile.Reader(
+ tok,
+ rdclass,
+ txn,
+ allow_include=allow_include,
+ allow_directives=allow_directives,
+ )
+ try:
+ reader.read()
+ except dns.zonefile.UnknownOrigin:
+ # for backwards compatibility
+ raise UnknownOrigin
+ # Now that we're done reading, do some basic checking of the zone.
+ if check_origin:
+ zone.check_origin()
+ return zone
+
+
+def from_text(
+ text: str,
+ origin: dns.name.Name | str | None = None,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ relativize: bool = True,
+ zone_factory: Any = Zone,
+ filename: str | None = None,
+ allow_include: bool = False,
+ check_origin: bool = True,
+ idna_codec: dns.name.IDNACodec | None = None,
+ allow_directives: bool | Iterable[str] = True,
+) -> Zone:
+ """Build a zone object from a zone file format string.
+
+ *text*, a ``str``, the zone file format input.
+
+ *origin*, a ``dns.name.Name``, a ``str``, or ``None``. The origin
+ of the zone; if not specified, the first ``$ORIGIN`` statement in the
+ zone file will determine the origin of the zone.
+
+ *rdclass*, a ``dns.rdataclass.RdataClass``, the zone's rdata class; the default is
+ class IN.
+
+ *relativize*, a ``bool``, determine's whether domain names are
+ relativized to the zone's origin. The default is ``True``.
+
+ *zone_factory*, the zone factory to use or ``None``. If ``None``, then
+ ``dns.zone.Zone`` will be used. The value may be any class or callable
+ that returns a subclass of ``dns.zone.Zone``.
+
+ *filename*, a ``str`` or ``None``, the filename to emit when
+ describing where an error occurred; the default is ``''``.
+
+ *allow_include*, a ``bool``. If ``True``, the default, then ``$INCLUDE``
+ directives are permitted. If ``False``, then encoutering a ``$INCLUDE``
+ will raise a ``SyntaxError`` exception.
+
+ *check_origin*, a ``bool``. If ``True``, the default, then sanity
+ checks of the origin node will be made by calling the zone's
+ ``check_origin()`` method.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ *allow_directives*, a ``bool`` or an iterable of `str`. If ``True``, the default,
+ then directives are permitted, and the *allow_include* parameter controls whether
+ ``$INCLUDE`` is permitted. If ``False`` or an empty iterable, then no directive
+ processing is done and any directive-like text will be treated as a regular owner
+ name. If a non-empty iterable, then only the listed directives (including the
+ ``$``) are allowed.
+
+ Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
+
+ Raises ``dns.zone.NoNS`` if there is no NS RRset.
+
+ Raises ``KeyError`` if there is no origin node.
+
+ Returns a subclass of ``dns.zone.Zone``.
+ """
+ return _from_text(
+ text,
+ origin,
+ rdclass,
+ relativize,
+ zone_factory,
+ filename,
+ allow_include,
+ check_origin,
+ idna_codec,
+ allow_directives,
+ )
+
+
+def from_file(
+ f: Any,
+ origin: dns.name.Name | str | None = None,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ relativize: bool = True,
+ zone_factory: Any = Zone,
+ filename: str | None = None,
+ allow_include: bool = True,
+ check_origin: bool = True,
+ idna_codec: dns.name.IDNACodec | None = None,
+ allow_directives: bool | Iterable[str] = True,
+) -> Zone:
+ """Read a zone file and build a zone object.
+
+ *f*, a file or ``str``. If *f* is a string, it is treated
+ as the name of a file to open.
+
+ *origin*, a ``dns.name.Name``, a ``str``, or ``None``. The origin
+ of the zone; if not specified, the first ``$ORIGIN`` statement in the
+ zone file will determine the origin of the zone.
+
+ *rdclass*, an ``int``, the zone's rdata class; the default is class IN.
+
+ *relativize*, a ``bool``, determine's whether domain names are
+ relativized to the zone's origin. The default is ``True``.
+
+ *zone_factory*, the zone factory to use or ``None``. If ``None``, then
+ ``dns.zone.Zone`` will be used. The value may be any class or callable
+ that returns a subclass of ``dns.zone.Zone``.
+
+ *filename*, a ``str`` or ``None``, the filename to emit when
+ describing where an error occurred; the default is ``''``.
+
+ *allow_include*, a ``bool``. If ``True``, the default, then ``$INCLUDE``
+ directives are permitted. If ``False``, then encoutering a ``$INCLUDE``
+ will raise a ``SyntaxError`` exception.
+
+ *check_origin*, a ``bool``. If ``True``, the default, then sanity
+ checks of the origin node will be made by calling the zone's
+ ``check_origin()`` method.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used.
+
+ *allow_directives*, a ``bool`` or an iterable of `str`. If ``True``, the default,
+ then directives are permitted, and the *allow_include* parameter controls whether
+ ``$INCLUDE`` is permitted. If ``False`` or an empty iterable, then no directive
+ processing is done and any directive-like text will be treated as a regular owner
+ name. If a non-empty iterable, then only the listed directives (including the
+ ``$``) are allowed.
+
+ Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
+
+ Raises ``dns.zone.NoNS`` if there is no NS RRset.
+
+ Raises ``KeyError`` if there is no origin node.
+
+ Returns a subclass of ``dns.zone.Zone``.
+ """
+
+ if isinstance(f, str):
+ if filename is None:
+ filename = f
+ cm: contextlib.AbstractContextManager = open(f, encoding="utf-8")
+ else:
+ cm = contextlib.nullcontext(f)
+ with cm as f:
+ return _from_text(
+ f,
+ origin,
+ rdclass,
+ relativize,
+ zone_factory,
+ filename,
+ allow_include,
+ check_origin,
+ idna_codec,
+ allow_directives,
+ )
+ assert False # make mypy happy lgtm[py/unreachable-statement]
+
+
+def from_xfr(
+ xfr: Any,
+ zone_factory: Any = Zone,
+ relativize: bool = True,
+ check_origin: bool = True,
+) -> Zone:
+ """Convert the output of a zone transfer generator into a zone object.
+
+ *xfr*, a generator of ``dns.message.Message`` objects, typically
+ ``dns.query.xfr()``.
+
+ *relativize*, a ``bool``, determine's whether domain names are
+ relativized to the zone's origin. The default is ``True``.
+ It is essential that the relativize setting matches the one specified
+ to the generator.
+
+ *check_origin*, a ``bool``. If ``True``, the default, then sanity
+ checks of the origin node will be made by calling the zone's
+ ``check_origin()`` method.
+
+ Raises ``dns.zone.NoSOA`` if there is no SOA RRset.
+
+ Raises ``dns.zone.NoNS`` if there is no NS RRset.
+
+ Raises ``KeyError`` if there is no origin node.
+
+ Raises ``ValueError`` if no messages are yielded by the generator.
+
+ Returns a subclass of ``dns.zone.Zone``.
+ """
+
+ z = None
+ for r in xfr:
+ if z is None:
+ if relativize:
+ origin = r.origin
+ else:
+ origin = r.answer[0].name
+ rdclass = r.answer[0].rdclass
+ z = zone_factory(origin, rdclass, relativize=relativize)
+ for rrset in r.answer:
+ znode = z.nodes.get(rrset.name)
+ if not znode:
+ znode = z.node_factory()
+ z.nodes[rrset.name] = znode
+ zrds = znode.find_rdataset(rrset.rdclass, rrset.rdtype, rrset.covers, True)
+ zrds.update_ttl(rrset.ttl)
+ for rd in rrset:
+ zrds.add(rd)
+ if z is None:
+ raise ValueError("empty transfer")
+ if check_origin:
+ z.check_origin()
+ return z
diff --git a/tapdown/lib/python3.11/site-packages/dns/zonefile.py b/tapdown/lib/python3.11/site-packages/dns/zonefile.py
new file mode 100644
index 0000000..7a81454
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/zonefile.py
@@ -0,0 +1,756 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
+#
+# Permission to use, copy, modify, and distribute this software and its
+# documentation for any purpose with or without fee is hereby granted,
+# provided that the above copyright notice and this permission notice
+# appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""DNS Zones."""
+
+import re
+import sys
+from typing import Any, Iterable, List, Set, Tuple, cast
+
+import dns.exception
+import dns.grange
+import dns.name
+import dns.node
+import dns.rdata
+import dns.rdataclass
+import dns.rdatatype
+import dns.rdtypes.ANY.SOA
+import dns.rrset
+import dns.tokenizer
+import dns.transaction
+import dns.ttl
+
+
+class UnknownOrigin(dns.exception.DNSException):
+ """Unknown origin"""
+
+
+class CNAMEAndOtherData(dns.exception.DNSException):
+ """A node has a CNAME and other data"""
+
+
+def _check_cname_and_other_data(txn, name, rdataset):
+ rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)
+ node = txn.get_node(name)
+ if node is None:
+ # empty nodes are neutral.
+ return
+ node_kind = node.classify()
+ if (
+ node_kind == dns.node.NodeKind.CNAME
+ and rdataset_kind == dns.node.NodeKind.REGULAR
+ ):
+ raise CNAMEAndOtherData("rdataset type is not compatible with a CNAME node")
+ elif (
+ node_kind == dns.node.NodeKind.REGULAR
+ and rdataset_kind == dns.node.NodeKind.CNAME
+ ):
+ raise CNAMEAndOtherData(
+ "CNAME rdataset is not compatible with a regular data node"
+ )
+ # Otherwise at least one of the node and the rdataset is neutral, so
+ # adding the rdataset is ok
+
+
+SavedStateType = Tuple[
+ dns.tokenizer.Tokenizer,
+ dns.name.Name | None, # current_origin
+ dns.name.Name | None, # last_name
+ Any | None, # current_file
+ int, # last_ttl
+ bool, # last_ttl_known
+ int, # default_ttl
+ bool,
+] # default_ttl_known
+
+
+def _upper_dollarize(s):
+ s = s.upper()
+ if not s.startswith("$"):
+ s = "$" + s
+ return s
+
+
+class Reader:
+ """Read a DNS zone file into a transaction."""
+
+ def __init__(
+ self,
+ tok: dns.tokenizer.Tokenizer,
+ rdclass: dns.rdataclass.RdataClass,
+ txn: dns.transaction.Transaction,
+ allow_include: bool = False,
+ allow_directives: bool | Iterable[str] = True,
+ force_name: dns.name.Name | None = None,
+ force_ttl: int | None = None,
+ force_rdclass: dns.rdataclass.RdataClass | None = None,
+ force_rdtype: dns.rdatatype.RdataType | None = None,
+ default_ttl: int | None = None,
+ ):
+ self.tok = tok
+ (self.zone_origin, self.relativize, _) = txn.manager.origin_information()
+ self.current_origin = self.zone_origin
+ self.last_ttl = 0
+ self.last_ttl_known = False
+ if force_ttl is not None:
+ default_ttl = force_ttl
+ if default_ttl is None:
+ self.default_ttl = 0
+ self.default_ttl_known = False
+ else:
+ self.default_ttl = default_ttl
+ self.default_ttl_known = True
+ self.last_name = self.current_origin
+ self.zone_rdclass = rdclass
+ self.txn = txn
+ self.saved_state: List[SavedStateType] = []
+ self.current_file: Any | None = None
+ self.allowed_directives: Set[str]
+ if allow_directives is True:
+ self.allowed_directives = {"$GENERATE", "$ORIGIN", "$TTL"}
+ if allow_include:
+ self.allowed_directives.add("$INCLUDE")
+ elif allow_directives is False:
+ # allow_include was ignored in earlier releases if allow_directives was
+ # False, so we continue that.
+ self.allowed_directives = set()
+ else:
+ # Note that if directives are explicitly specified, then allow_include
+ # is ignored.
+ self.allowed_directives = set(_upper_dollarize(d) for d in allow_directives)
+ self.force_name = force_name
+ self.force_ttl = force_ttl
+ self.force_rdclass = force_rdclass
+ self.force_rdtype = force_rdtype
+ self.txn.check_put_rdataset(_check_cname_and_other_data)
+
+ def _eat_line(self):
+ while 1:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ break
+
+ def _get_identifier(self):
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ return token
+
+ def _rr_line(self):
+ """Process one line from a DNS zone file."""
+ token = None
+ # Name
+ if self.force_name is not None:
+ name = self.force_name
+ else:
+ if self.current_origin is None:
+ raise UnknownOrigin
+ token = self.tok.get(want_leading=True)
+ if not token.is_whitespace():
+ self.last_name = self.tok.as_name(token, self.current_origin)
+ else:
+ token = self.tok.get()
+ if token.is_eol_or_eof():
+ # treat leading WS followed by EOL/EOF as if they were EOL/EOF.
+ return
+ self.tok.unget(token)
+ name = self.last_name
+ if name is None:
+ raise dns.exception.SyntaxError("the last used name is undefined")
+ assert self.zone_origin is not None
+ if not name.is_subdomain(self.zone_origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone_origin)
+
+ # TTL
+ if self.force_ttl is not None:
+ ttl = self.force_ttl
+ self.last_ttl = ttl
+ self.last_ttl_known = True
+ else:
+ token = self._get_identifier()
+ ttl = None
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ self.last_ttl = ttl
+ self.last_ttl_known = True
+ token = None
+ except dns.ttl.BadTTL:
+ self.tok.unget(token)
+
+ # Class
+ if self.force_rdclass is not None:
+ rdclass = self.force_rdclass
+ else:
+ token = self._get_identifier()
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ except dns.exception.SyntaxError:
+ raise
+ except Exception:
+ rdclass = self.zone_rdclass
+ self.tok.unget(token)
+ if rdclass != self.zone_rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+
+ if ttl is None:
+ # support for syntax
+ token = self._get_identifier()
+ ttl = None
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ self.last_ttl = ttl
+ self.last_ttl_known = True
+ token = None
+ except dns.ttl.BadTTL:
+ if self.default_ttl_known:
+ ttl = self.default_ttl
+ elif self.last_ttl_known:
+ ttl = self.last_ttl
+ self.tok.unget(token)
+
+ # Type
+ if self.force_rdtype is not None:
+ rdtype = self.force_rdtype
+ else:
+ token = self._get_identifier()
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ except Exception:
+ raise dns.exception.SyntaxError(f"unknown rdatatype '{token.value}'")
+
+ try:
+ rd = dns.rdata.from_text(
+ rdclass,
+ rdtype,
+ self.tok,
+ self.current_origin,
+ self.relativize,
+ self.zone_origin,
+ )
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ raise
+ except Exception:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError(f"caught exception {str(ty)}: {str(va)}")
+
+ if not self.default_ttl_known and rdtype == dns.rdatatype.SOA:
+ # The pre-RFC2308 and pre-BIND9 behavior inherits the zone default
+ # TTL from the SOA minttl if no $TTL statement is present before the
+ # SOA is parsed.
+ soa_rd = cast(dns.rdtypes.ANY.SOA.SOA, rd)
+ self.default_ttl = soa_rd.minimum
+ self.default_ttl_known = True
+ if ttl is None:
+ # if we didn't have a TTL on the SOA, set it!
+ ttl = soa_rd.minimum
+
+ # TTL check. We had to wait until now to do this as the SOA RR's
+ # own TTL can be inferred from its minimum.
+ if ttl is None:
+ raise dns.exception.SyntaxError("Missing default TTL value")
+
+ self.txn.add(name, ttl, rd)
+
+ def _parse_modify(self, side: str) -> Tuple[str, str, int, int, str]:
+ # Here we catch everything in '{' '}' in a group so we can replace it
+ # with ''.
+ is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
+ is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$")
+ is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$")
+ # Sometimes there are modifiers in the hostname. These come after
+ # the dollar sign. They are in the form: ${offset[,width[,base]]}.
+ # Make names
+ mod = ""
+ sign = "+"
+ offset = "0"
+ width = "0"
+ base = "d"
+ g1 = is_generate1.match(side)
+ if g1:
+ mod, sign, offset, width, base = g1.groups()
+ if sign == "":
+ sign = "+"
+ else:
+ g2 = is_generate2.match(side)
+ if g2:
+ mod, sign, offset = g2.groups()
+ if sign == "":
+ sign = "+"
+ width = "0"
+ base = "d"
+ else:
+ g3 = is_generate3.match(side)
+ if g3:
+ mod, sign, offset, width = g3.groups()
+ if sign == "":
+ sign = "+"
+ base = "d"
+
+ ioffset = int(offset)
+ iwidth = int(width)
+
+ if sign not in ["+", "-"]:
+ raise dns.exception.SyntaxError(f"invalid offset sign {sign}")
+ if base not in ["d", "o", "x", "X", "n", "N"]:
+ raise dns.exception.SyntaxError(f"invalid type {base}")
+
+ return mod, sign, ioffset, iwidth, base
+
+ def _generate_line(self):
+ # range lhs [ttl] [class] type rhs [ comment ]
+ """Process one line containing the GENERATE statement from a DNS
+ zone file."""
+ if self.current_origin is None:
+ raise UnknownOrigin
+
+ token = self.tok.get()
+ # Range (required)
+ try:
+ start, stop, step = dns.grange.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except Exception:
+ raise dns.exception.SyntaxError
+
+ # lhs (required)
+ try:
+ lhs = token.value
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except Exception:
+ raise dns.exception.SyntaxError
+
+ # TTL
+ try:
+ ttl = dns.ttl.from_text(token.value)
+ self.last_ttl = ttl
+ self.last_ttl_known = True
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.ttl.BadTTL:
+ if not (self.last_ttl_known or self.default_ttl_known):
+ raise dns.exception.SyntaxError("Missing default TTL value")
+ if self.default_ttl_known:
+ ttl = self.default_ttl
+ elif self.last_ttl_known:
+ ttl = self.last_ttl
+ else:
+ # We don't go to the extra "look at the SOA" level of effort for
+ # $GENERATE, because the user really ought to have defined a TTL
+ # somehow!
+ raise dns.exception.SyntaxError("Missing default TTL value")
+
+ # Class
+ try:
+ rdclass = dns.rdataclass.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except dns.exception.SyntaxError:
+ raise dns.exception.SyntaxError
+ except Exception:
+ rdclass = self.zone_rdclass
+ if rdclass != self.zone_rdclass:
+ raise dns.exception.SyntaxError("RR class is not zone's class")
+ # Type
+ try:
+ rdtype = dns.rdatatype.from_text(token.value)
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError
+ except Exception:
+ raise dns.exception.SyntaxError(f"unknown rdatatype '{token.value}'")
+
+ # rhs (required)
+ rhs = token.value
+
+ def _calculate_index(counter: int, offset_sign: str, offset: int) -> int:
+ """Calculate the index from the counter and offset."""
+ if offset_sign == "-":
+ offset *= -1
+ return counter + offset
+
+ def _format_index(index: int, base: str, width: int) -> str:
+ """Format the index with the given base, and zero-fill it
+ to the given width."""
+ if base in ["d", "o", "x", "X"]:
+ return format(index, base).zfill(width)
+
+ # base can only be n or N here
+ hexa = _format_index(index, "x", width)
+ nibbles = ".".join(hexa[::-1])[:width]
+ if base == "N":
+ nibbles = nibbles.upper()
+ return nibbles
+
+ lmod, lsign, loffset, lwidth, lbase = self._parse_modify(lhs)
+ rmod, rsign, roffset, rwidth, rbase = self._parse_modify(rhs)
+ for i in range(start, stop + 1, step):
+ # +1 because bind is inclusive and python is exclusive
+
+ lindex = _calculate_index(i, lsign, loffset)
+ rindex = _calculate_index(i, rsign, roffset)
+
+ lzfindex = _format_index(lindex, lbase, lwidth)
+ rzfindex = _format_index(rindex, rbase, rwidth)
+
+ name = lhs.replace(f"${lmod}", lzfindex)
+ rdata = rhs.replace(f"${rmod}", rzfindex)
+
+ self.last_name = dns.name.from_text(
+ name, self.current_origin, self.tok.idna_codec
+ )
+ name = self.last_name
+ assert self.zone_origin is not None
+ if not name.is_subdomain(self.zone_origin):
+ self._eat_line()
+ return
+ if self.relativize:
+ name = name.relativize(self.zone_origin)
+
+ try:
+ rd = dns.rdata.from_text(
+ rdclass,
+ rdtype,
+ rdata,
+ self.current_origin,
+ self.relativize,
+ self.zone_origin,
+ )
+ except dns.exception.SyntaxError:
+ # Catch and reraise.
+ raise
+ except Exception:
+ # All exceptions that occur in the processing of rdata
+ # are treated as syntax errors. This is not strictly
+ # correct, but it is correct almost all of the time.
+ # We convert them to syntax errors so that we can emit
+ # helpful filename:line info.
+ (ty, va) = sys.exc_info()[:2]
+ raise dns.exception.SyntaxError(
+ f"caught exception {str(ty)}: {str(va)}"
+ )
+
+ self.txn.add(name, ttl, rd)
+
+ def read(self) -> None:
+ """Read a DNS zone file and build a zone object.
+
+ @raises dns.zone.NoSOA: No SOA RR was found at the zone origin
+ @raises dns.zone.NoNS: No NS RRset was found at the zone origin
+ """
+
+ try:
+ while 1:
+ token = self.tok.get(True, True)
+ if token.is_eof():
+ if self.current_file is not None:
+ self.current_file.close()
+ if len(self.saved_state) > 0:
+ (
+ self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.last_ttl,
+ self.last_ttl_known,
+ self.default_ttl,
+ self.default_ttl_known,
+ ) = self.saved_state.pop(-1)
+ continue
+ break
+ elif token.is_eol():
+ continue
+ elif token.is_comment():
+ self.tok.get_eol()
+ continue
+ elif token.value[0] == "$" and len(self.allowed_directives) > 0:
+ # Note that we only run directive processing code if at least
+ # one directive is allowed in order to be backwards compatible
+ c = token.value.upper()
+ if c not in self.allowed_directives:
+ raise dns.exception.SyntaxError(
+ f"zone file directive '{c}' is not allowed"
+ )
+ if c == "$TTL":
+ token = self.tok.get()
+ if not token.is_identifier():
+ raise dns.exception.SyntaxError("bad $TTL")
+ self.default_ttl = dns.ttl.from_text(token.value)
+ self.default_ttl_known = True
+ self.tok.get_eol()
+ elif c == "$ORIGIN":
+ self.current_origin = self.tok.get_name()
+ self.tok.get_eol()
+ if self.zone_origin is None:
+ self.zone_origin = self.current_origin
+ self.txn._set_origin(self.current_origin)
+ elif c == "$INCLUDE":
+ token = self.tok.get()
+ filename = token.value
+ token = self.tok.get()
+ new_origin: dns.name.Name | None
+ if token.is_identifier():
+ new_origin = dns.name.from_text(
+ token.value, self.current_origin, self.tok.idna_codec
+ )
+ self.tok.get_eol()
+ elif not token.is_eol_or_eof():
+ raise dns.exception.SyntaxError("bad origin in $INCLUDE")
+ else:
+ new_origin = self.current_origin
+ self.saved_state.append(
+ (
+ self.tok,
+ self.current_origin,
+ self.last_name,
+ self.current_file,
+ self.last_ttl,
+ self.last_ttl_known,
+ self.default_ttl,
+ self.default_ttl_known,
+ )
+ )
+ self.current_file = open(filename, encoding="utf-8")
+ self.tok = dns.tokenizer.Tokenizer(self.current_file, filename)
+ self.current_origin = new_origin
+ elif c == "$GENERATE":
+ self._generate_line()
+ else:
+ raise dns.exception.SyntaxError(
+ f"Unknown zone file directive '{c}'"
+ )
+ continue
+ self.tok.unget(token)
+ self._rr_line()
+ except dns.exception.SyntaxError as detail:
+ (filename, line_number) = self.tok.where()
+ if detail is None:
+ detail = "syntax error"
+ ex = dns.exception.SyntaxError(f"{filename}:{line_number}: {detail}")
+ tb = sys.exc_info()[2]
+ raise ex.with_traceback(tb) from None
+
+
+class RRsetsReaderTransaction(dns.transaction.Transaction):
+ def __init__(self, manager, replacement, read_only):
+ assert not read_only
+ super().__init__(manager, replacement, read_only)
+ self.rdatasets = {}
+
+ def _get_rdataset(self, name, rdtype, covers):
+ return self.rdatasets.get((name, rdtype, covers))
+
+ def _get_node(self, name):
+ rdatasets = []
+ for (rdataset_name, _, _), rdataset in self.rdatasets.items():
+ if name == rdataset_name:
+ rdatasets.append(rdataset)
+ if len(rdatasets) == 0:
+ return None
+ node = dns.node.Node()
+ node.rdatasets = rdatasets
+ return node
+
+ def _put_rdataset(self, name, rdataset):
+ self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
+
+ def _delete_name(self, name):
+ # First remove any changes involving the name
+ remove = []
+ for key in self.rdatasets:
+ if key[0] == name:
+ remove.append(key)
+ if len(remove) > 0:
+ for key in remove:
+ del self.rdatasets[key]
+
+ def _delete_rdataset(self, name, rdtype, covers):
+ try:
+ del self.rdatasets[(name, rdtype, covers)]
+ except KeyError:
+ pass
+
+ def _name_exists(self, name):
+ for n, _, _ in self.rdatasets:
+ if n == name:
+ return True
+ return False
+
+ def _changed(self):
+ return len(self.rdatasets) > 0
+
+ def _end_transaction(self, commit):
+ if commit and self._changed():
+ rrsets = []
+ for (name, _, _), rdataset in self.rdatasets.items():
+ rrset = dns.rrset.RRset(
+ name, rdataset.rdclass, rdataset.rdtype, rdataset.covers
+ )
+ rrset.update(rdataset)
+ rrsets.append(rrset)
+ self.manager.set_rrsets(rrsets) # pyright: ignore
+
+ def _set_origin(self, origin):
+ pass
+
+ def _iterate_rdatasets(self):
+ raise NotImplementedError # pragma: no cover
+
+ def _iterate_names(self):
+ raise NotImplementedError # pragma: no cover
+
+
+class RRSetsReaderManager(dns.transaction.TransactionManager):
+ def __init__(
+ self,
+ origin: dns.name.Name | None = dns.name.root,
+ relativize: bool = False,
+ rdclass: dns.rdataclass.RdataClass = dns.rdataclass.IN,
+ ):
+ self.origin = origin
+ self.relativize = relativize
+ self.rdclass = rdclass
+ self.rrsets: List[dns.rrset.RRset] = []
+
+ def reader(self): # pragma: no cover
+ raise NotImplementedError
+
+ def writer(self, replacement=False):
+ assert replacement is True
+ return RRsetsReaderTransaction(self, True, False)
+
+ def get_class(self):
+ return self.rdclass
+
+ def origin_information(self):
+ if self.relativize:
+ effective = dns.name.empty
+ else:
+ effective = self.origin
+ return (self.origin, self.relativize, effective)
+
+ def set_rrsets(self, rrsets: List[dns.rrset.RRset]) -> None:
+ self.rrsets = rrsets
+
+
+def read_rrsets(
+ text: Any,
+ name: dns.name.Name | str | None = None,
+ ttl: int | None = None,
+ rdclass: dns.rdataclass.RdataClass | str | None = dns.rdataclass.IN,
+ default_rdclass: dns.rdataclass.RdataClass | str = dns.rdataclass.IN,
+ rdtype: dns.rdatatype.RdataType | str | None = None,
+ default_ttl: int | str | None = None,
+ idna_codec: dns.name.IDNACodec | None = None,
+ origin: dns.name.Name | str | None = dns.name.root,
+ relativize: bool = False,
+) -> List[dns.rrset.RRset]:
+ """Read one or more rrsets from the specified text, possibly subject
+ to restrictions.
+
+ *text*, a file object or a string, is the input to process.
+
+ *name*, a string, ``dns.name.Name``, or ``None``, is the owner name of
+ the rrset. If not ``None``, then the owner name is "forced", and the
+ input must not specify an owner name. If ``None``, then any owner names
+ are allowed and must be present in the input.
+
+ *ttl*, an ``int``, string, or None. If not ``None``, the the TTL is
+ forced to be the specified value and the input must not specify a TTL.
+ If ``None``, then a TTL may be specified in the input. If it is not
+ specified, then the *default_ttl* will be used.
+
+ *rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If
+ not ``None``, then the class is forced to the specified value, and the
+ input must not specify a class. If ``None``, then the input may specify
+ a class that matches *default_rdclass*. Note that it is not possible to
+ return rrsets with differing classes; specifying ``None`` for the class
+ simply allows the user to optionally type a class as that may be convenient
+ when cutting and pasting.
+
+ *default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class
+ of the returned rrsets.
+
+ *rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not
+ ``None``, then the type is forced to the specified value, and the
+ input must not specify a type. If ``None``, then a type must be present
+ for each RR.
+
+ *default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if
+ the TTL is not forced and is not specified, then this value will be used.
+ if ``None``, then if the TTL is not forced an error will occur if the TTL
+ is not specified.
+
+ *idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
+ encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
+ is used. Note that codecs only apply to the owner name; dnspython does
+ not do IDNA for names in rdata, as there is no IDNA zonefile format.
+
+ *origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any
+ relative names in the input, and also the origin to relativize to if
+ *relativize* is ``True``.
+
+ *relativize*, a bool. If ``True``, names are relativized to the *origin*;
+ if ``False`` then any relative names in the input are made absolute by
+ appending the *origin*.
+ """
+ if isinstance(origin, str):
+ origin = dns.name.from_text(origin, dns.name.root, idna_codec)
+ if isinstance(name, str):
+ name = dns.name.from_text(name, origin, idna_codec)
+ if isinstance(ttl, str):
+ ttl = dns.ttl.from_text(ttl)
+ if isinstance(default_ttl, str):
+ default_ttl = dns.ttl.from_text(default_ttl)
+ if rdclass is not None:
+ rdclass = dns.rdataclass.RdataClass.make(rdclass)
+ else:
+ rdclass = None
+ default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)
+ if rdtype is not None:
+ rdtype = dns.rdatatype.RdataType.make(rdtype)
+ else:
+ rdtype = None
+ manager = RRSetsReaderManager(origin, relativize, default_rdclass)
+ with manager.writer(True) as txn:
+ tok = dns.tokenizer.Tokenizer(text, "", idna_codec=idna_codec)
+ reader = Reader(
+ tok,
+ default_rdclass,
+ txn,
+ allow_directives=False,
+ force_name=name,
+ force_ttl=ttl,
+ force_rdclass=rdclass,
+ force_rdtype=rdtype,
+ default_ttl=default_ttl,
+ )
+ reader.read()
+ return manager.rrsets
diff --git a/tapdown/lib/python3.11/site-packages/dns/zonetypes.py b/tapdown/lib/python3.11/site-packages/dns/zonetypes.py
new file mode 100644
index 0000000..195ee2e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dns/zonetypes.py
@@ -0,0 +1,37 @@
+# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
+
+"""Common zone-related types."""
+
+# This is a separate file to avoid import circularity between dns.zone and
+# the implementation of the ZONEMD type.
+
+import hashlib
+
+import dns.enum
+
+
+class DigestScheme(dns.enum.IntEnum):
+ """ZONEMD Scheme"""
+
+ SIMPLE = 1
+
+ @classmethod
+ def _maximum(cls):
+ return 255
+
+
+class DigestHashAlgorithm(dns.enum.IntEnum):
+ """ZONEMD Hash Algorithm"""
+
+ SHA384 = 1
+ SHA512 = 2
+
+ @classmethod
+ def _maximum(cls):
+ return 255
+
+
+_digest_hashers = {
+ DigestHashAlgorithm.SHA384: hashlib.sha384,
+ DigestHashAlgorithm.SHA512: hashlib.sha512,
+}
diff --git a/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/INSTALLER b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/METADATA b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/METADATA
new file mode 100644
index 0000000..eaaf09b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/METADATA
@@ -0,0 +1,149 @@
+Metadata-Version: 2.4
+Name: dnspython
+Version: 2.8.0
+Summary: DNS toolkit
+Project-URL: homepage, https://www.dnspython.org
+Project-URL: repository, https://github.com/rthalley/dnspython.git
+Project-URL: documentation, https://dnspython.readthedocs.io/en/stable/
+Project-URL: issues, https://github.com/rthalley/dnspython/issues
+Author-email: Bob Halley
+License: ISC
+License-File: LICENSE
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: ISC License (ISCL)
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Topic :: Internet :: Name Service (DNS)
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.10
+Provides-Extra: dev
+Requires-Dist: black>=25.1.0; extra == 'dev'
+Requires-Dist: coverage>=7.0; extra == 'dev'
+Requires-Dist: flake8>=7; extra == 'dev'
+Requires-Dist: hypercorn>=0.17.0; extra == 'dev'
+Requires-Dist: mypy>=1.17; extra == 'dev'
+Requires-Dist: pylint>=3; extra == 'dev'
+Requires-Dist: pytest-cov>=6.2.0; extra == 'dev'
+Requires-Dist: pytest>=8.4; extra == 'dev'
+Requires-Dist: quart-trio>=0.12.0; extra == 'dev'
+Requires-Dist: sphinx-rtd-theme>=3.0.0; extra == 'dev'
+Requires-Dist: sphinx>=8.2.0; extra == 'dev'
+Requires-Dist: twine>=6.1.0; extra == 'dev'
+Requires-Dist: wheel>=0.45.0; extra == 'dev'
+Provides-Extra: dnssec
+Requires-Dist: cryptography>=45; extra == 'dnssec'
+Provides-Extra: doh
+Requires-Dist: h2>=4.2.0; extra == 'doh'
+Requires-Dist: httpcore>=1.0.0; extra == 'doh'
+Requires-Dist: httpx>=0.28.0; extra == 'doh'
+Provides-Extra: doq
+Requires-Dist: aioquic>=1.2.0; extra == 'doq'
+Provides-Extra: idna
+Requires-Dist: idna>=3.10; extra == 'idna'
+Provides-Extra: trio
+Requires-Dist: trio>=0.30; extra == 'trio'
+Provides-Extra: wmi
+Requires-Dist: wmi>=1.5.1; (platform_system == 'Windows') and extra == 'wmi'
+Description-Content-Type: text/markdown
+
+# dnspython
+
+[](https://github.com/rthalley/dnspython/actions/)
+[](https://dnspython.readthedocs.io/en/latest/?badge=latest)
+[](https://badge.fury.io/py/dnspython)
+[](https://opensource.org/licenses/ISC)
+[](https://github.com/psf/black)
+
+## INTRODUCTION
+
+`dnspython` is a DNS toolkit for Python. It supports almost all record types. It
+can be used for queries, zone transfers, and dynamic updates. It supports
+TSIG-authenticated messages and EDNS0.
+
+`dnspython` provides both high- and low-level access to DNS. The high-level
+classes perform queries for data of a given name, type, and class, and return an
+answer set. The low-level classes allow direct manipulation of DNS zones,
+messages, names, and records.
+
+To see a few of the ways `dnspython` can be used, look in the `examples/`
+directory.
+
+`dnspython` is a utility to work with DNS, `/etc/hosts` is thus not used. For
+simple forward DNS lookups, it's better to use `socket.getaddrinfo()` or
+`socket.gethostbyname()`.
+
+`dnspython` originated at Nominum where it was developed to facilitate the
+testing of DNS software.
+
+## ABOUT THIS RELEASE
+
+This is of `dnspython` 2.8.0.
+Please read
+[What's New](https://dnspython.readthedocs.io/en/stable/whatsnew.html) for
+information about the changes in this release.
+
+## INSTALLATION
+
+* Many distributions have dnspython packaged for you, so you should check there
+ first.
+* To use a wheel downloaded from PyPi, run:
+
+```
+ pip install dnspython
+```
+
+* To install from the source code, go into the top-level of the source code
+ and run:
+
+```
+ pip install --upgrade pip build
+ python -m build
+ pip install dist/*.whl
+```
+
+* To install the latest from the main branch, run
+`pip install git+https://github.com/rthalley/dnspython.git`
+
+`dnspython`'s default installation does not depend on any modules other than
+those in the Python standard library. To use some features, additional modules
+must be installed. For convenience, `pip` options are defined for the
+requirements.
+
+If you want to use DNS-over-HTTPS, run
+`pip install dnspython[doh]`.
+
+If you want to use DNSSEC functionality, run
+`pip install dnspython[dnssec]`.
+
+If you want to use internationalized domain names (IDNA)
+functionality, you must run
+`pip install dnspython[idna]`
+
+If you want to use the Trio asynchronous I/O package, run
+`pip install dnspython[trio]`.
+
+If you want to use WMI on Windows to determine the active DNS settings
+instead of the default registry scanning method, run
+`pip install dnspython[wmi]`.
+
+If you want to try the experimental DNS-over-QUIC code, run
+`pip install dnspython[doq]`.
+
+Note that you can install any combination of the above, e.g.:
+`pip install dnspython[doh,dnssec,idna]`
+
+### Notices
+
+Python 2.x support ended with the release of 1.16.0. `dnspython` supports Python 3.10
+and later. Future support is aligned with the lifetime of the Python 3 versions.
+
+Documentation has moved to
+[dnspython.readthedocs.io](https://dnspython.readthedocs.io).
diff --git a/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/RECORD b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/RECORD
new file mode 100644
index 0000000..397075c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/RECORD
@@ -0,0 +1,304 @@
+dns/__init__.py,sha256=2TTaN3FRnBIkYhrrkDUs7XYnu4h9zTlfOWdQ4nLuxnA,1693
+dns/__pycache__/__init__.cpython-311.pyc,,
+dns/__pycache__/_asyncbackend.cpython-311.pyc,,
+dns/__pycache__/_asyncio_backend.cpython-311.pyc,,
+dns/__pycache__/_ddr.cpython-311.pyc,,
+dns/__pycache__/_features.cpython-311.pyc,,
+dns/__pycache__/_immutable_ctx.cpython-311.pyc,,
+dns/__pycache__/_no_ssl.cpython-311.pyc,,
+dns/__pycache__/_tls_util.cpython-311.pyc,,
+dns/__pycache__/_trio_backend.cpython-311.pyc,,
+dns/__pycache__/asyncbackend.cpython-311.pyc,,
+dns/__pycache__/asyncquery.cpython-311.pyc,,
+dns/__pycache__/asyncresolver.cpython-311.pyc,,
+dns/__pycache__/btree.cpython-311.pyc,,
+dns/__pycache__/btreezone.cpython-311.pyc,,
+dns/__pycache__/dnssec.cpython-311.pyc,,
+dns/__pycache__/dnssectypes.cpython-311.pyc,,
+dns/__pycache__/e164.cpython-311.pyc,,
+dns/__pycache__/edns.cpython-311.pyc,,
+dns/__pycache__/entropy.cpython-311.pyc,,
+dns/__pycache__/enum.cpython-311.pyc,,
+dns/__pycache__/exception.cpython-311.pyc,,
+dns/__pycache__/flags.cpython-311.pyc,,
+dns/__pycache__/grange.cpython-311.pyc,,
+dns/__pycache__/immutable.cpython-311.pyc,,
+dns/__pycache__/inet.cpython-311.pyc,,
+dns/__pycache__/ipv4.cpython-311.pyc,,
+dns/__pycache__/ipv6.cpython-311.pyc,,
+dns/__pycache__/message.cpython-311.pyc,,
+dns/__pycache__/name.cpython-311.pyc,,
+dns/__pycache__/namedict.cpython-311.pyc,,
+dns/__pycache__/nameserver.cpython-311.pyc,,
+dns/__pycache__/node.cpython-311.pyc,,
+dns/__pycache__/opcode.cpython-311.pyc,,
+dns/__pycache__/query.cpython-311.pyc,,
+dns/__pycache__/rcode.cpython-311.pyc,,
+dns/__pycache__/rdata.cpython-311.pyc,,
+dns/__pycache__/rdataclass.cpython-311.pyc,,
+dns/__pycache__/rdataset.cpython-311.pyc,,
+dns/__pycache__/rdatatype.cpython-311.pyc,,
+dns/__pycache__/renderer.cpython-311.pyc,,
+dns/__pycache__/resolver.cpython-311.pyc,,
+dns/__pycache__/reversename.cpython-311.pyc,,
+dns/__pycache__/rrset.cpython-311.pyc,,
+dns/__pycache__/serial.cpython-311.pyc,,
+dns/__pycache__/set.cpython-311.pyc,,
+dns/__pycache__/tokenizer.cpython-311.pyc,,
+dns/__pycache__/transaction.cpython-311.pyc,,
+dns/__pycache__/tsig.cpython-311.pyc,,
+dns/__pycache__/tsigkeyring.cpython-311.pyc,,
+dns/__pycache__/ttl.cpython-311.pyc,,
+dns/__pycache__/update.cpython-311.pyc,,
+dns/__pycache__/version.cpython-311.pyc,,
+dns/__pycache__/versioned.cpython-311.pyc,,
+dns/__pycache__/win32util.cpython-311.pyc,,
+dns/__pycache__/wire.cpython-311.pyc,,
+dns/__pycache__/xfr.cpython-311.pyc,,
+dns/__pycache__/zone.cpython-311.pyc,,
+dns/__pycache__/zonefile.cpython-311.pyc,,
+dns/__pycache__/zonetypes.cpython-311.pyc,,
+dns/_asyncbackend.py,sha256=bv-2iaDTEDH4Esx2tc2GeVCnaqHtsQqb3WWqoYZngzA,2403
+dns/_asyncio_backend.py,sha256=08Ezq3L8G190Sdr8qMgjwnWNhbyMa1MFB3pWYkGQ0a0,9147
+dns/_ddr.py,sha256=rHXKC8kncCTT9N4KBh1flicl79nyDjQ-DDvq30MJ3B8,5247
+dns/_features.py,sha256=VYTUetGL5x8IEtxMUQk9_ftat2cvyYJw8HfIfpMM8D8,2493
+dns/_immutable_ctx.py,sha256=Schj9tuGUAQ_QMh612H7Uq6XcvPo5AkVwoBxZJJ8liA,2478
+dns/_no_ssl.py,sha256=M8mj_xYkpsuhny_vHaTWCjI1pNvekYG6V52kdqFkUYY,1502
+dns/_tls_util.py,sha256=kcvrPdGnSGP1fP9sNKekBZ3j-599HwZkmAk6ybyCebM,528
+dns/_trio_backend.py,sha256=Tqzm46FuRSYkUJDYL8qp6Qk8hbc6ZxiLBc8z-NsTULg,8597
+dns/asyncbackend.py,sha256=82fXTFls_m7F_ekQbgUGOkoBbs4BI-GBLDZAWNGUvJ0,2796
+dns/asyncquery.py,sha256=34B1EIekX3oSg0jF8ZSqEiUbNZTsJa3r2oqC01OIY7U,32329
+dns/asyncresolver.py,sha256=TncJ7UukzA0vF79AwNa2gel0y9UO02tCdQf3zUHbygg,17728
+dns/btree.py,sha256=QPz4IzW_yTtSmz_DC6LKvZdJvTs50CQRKbAa0UAFMTs,30757
+dns/btreezone.py,sha256=H9orKjQaMhnPjtAhHpRZlV5wd91N17iuqOmTUVzv6sU,13082
+dns/dnssec.py,sha256=zXqhmUM4k6M-9YVR49crEI6Jc0zhZSk7NX9BWDafhTQ,41356
+dns/dnssecalgs/__init__.py,sha256=B4hebjElugf8zhCauhH6kvACqI50iYLSKxEqUfL6970,4350
+dns/dnssecalgs/__pycache__/__init__.cpython-311.pyc,,
+dns/dnssecalgs/__pycache__/base.cpython-311.pyc,,
+dns/dnssecalgs/__pycache__/cryptography.cpython-311.pyc,,
+dns/dnssecalgs/__pycache__/dsa.cpython-311.pyc,,
+dns/dnssecalgs/__pycache__/ecdsa.cpython-311.pyc,,
+dns/dnssecalgs/__pycache__/eddsa.cpython-311.pyc,,
+dns/dnssecalgs/__pycache__/rsa.cpython-311.pyc,,
+dns/dnssecalgs/base.py,sha256=4Oq9EhKBEYupojZ3hENBiuq2Js3Spimy_NeDb9Rl1a8,2497
+dns/dnssecalgs/cryptography.py,sha256=utsBa_s8OOOKUeudvFullBNMRMjHmeoa66RNA6UiJMw,2428
+dns/dnssecalgs/dsa.py,sha256=ONilkD8Hhartj3Mwe7LKBT0vXS4E0KgfvTtV2ysZLhM,3605
+dns/dnssecalgs/ecdsa.py,sha256=TK8PclMAt7xVQTv6FIse9jZwXVCv_B-_AAgfhK0rTWQ,3283
+dns/dnssecalgs/eddsa.py,sha256=Yc0L9O2A_ySOSSalJiq5h7TU1LWtJgW1JIJWsGx96FI,2000
+dns/dnssecalgs/rsa.py,sha256=YOPPtpfOKdgBfBJvOcDofYTiC4mGmwCfqdYUvEbdHf8,3663
+dns/dnssectypes.py,sha256=CyeuGTS_rM3zXr8wD9qMT9jkzvVfTY2JWckUcogG83E,1799
+dns/e164.py,sha256=Sc-Ctv8lXpaDot_Su02wLFxLpxLReVW7_23YiGrnMC4,3937
+dns/edns.py,sha256=E5HRHMJNGGOyNvkR4iKY2jkaoQasa4K61Feuko9uY5s,17436
+dns/entropy.py,sha256=dSbsNoNVoypURvOu-clqMiD-dFQ-fsKOPYSHwoTjaec,4247
+dns/enum.py,sha256=PBphGzrIWOi8l3MgvkEMpsJapKIejkaQUqFuMWUcZXc,3685
+dns/exception.py,sha256=zEdlBUUsjb3dqk0etKxbFXUng0lLB7TPj7JFsNN7HzQ,5936
+dns/flags.py,sha256=cQ3kTFyvcKiWHAxI5AwchNqxVOrsIrgJ6brgrH42Wq8,2750
+dns/grange.py,sha256=ZqjNVDtb7i6E9D3ai6mcWR_nFNHyCXPp7j3dLFidtvY,2154
+dns/immutable.py,sha256=InrtpKvPxl-74oYbzsyneZwAuX78hUqeG22f2aniZbk,2017
+dns/inet.py,sha256=DbkUeb4PNLmxgUVPXX1GeWQH6e7a5WZ2AP_-befdg-o,5753
+dns/ipv4.py,sha256=dRiZRfyZAOlwlj3YlfbvZChRQAKstYh9k0ibNZwHu5U,2487
+dns/ipv6.py,sha256=GccOccOFZGFlwNFgV79GffZJv6u1GW28jM_amdiLqeM,6517
+dns/message.py,sha256=YVNQjYYFDSY6ttuwz_zvJnsCGuY1t11DdchsNlcBHG0,69152
+dns/name.py,sha256=rHvrUjhkCoR0_ANOH3fHJcY1swefx62SfBTDRvoGTsI,42910
+dns/namedict.py,sha256=hJRYpKeQv6Bd2LaUOPV0L_a0eXEIuqgggPXaH4c3Tow,4000
+dns/nameserver.py,sha256=LLOUGTjdAcj4cs-zAXeaH7Pf90IW0P64MQOrAb9PAPE,10007
+dns/node.py,sha256=Z2lzeqvPjqoR-Pbevp0OJqI_bGxwYzJIIevUccTElaM,12627
+dns/opcode.py,sha256=2EgPHQaGBRXN5q4C0KslagWbmWAbyT9Cw_cBj_sMXeA,2774
+dns/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+dns/query.py,sha256=85EWlMD1hDJO5xozZ7tFazMbZldpQ04L0sQFoQfBZiI,61686
+dns/quic/__init__.py,sha256=eqHPKj8SUk5rdeQxJSS-x3XSVqwcUPZlzTUio8mOpSg,2575
+dns/quic/__pycache__/__init__.cpython-311.pyc,,
+dns/quic/__pycache__/_asyncio.cpython-311.pyc,,
+dns/quic/__pycache__/_common.cpython-311.pyc,,
+dns/quic/__pycache__/_sync.cpython-311.pyc,,
+dns/quic/__pycache__/_trio.cpython-311.pyc,,
+dns/quic/_asyncio.py,sha256=YgoU65THKtpHfV8UPAnNr-HkpbkR7XY01E7R3oh5apg,10314
+dns/quic/_common.py,sha256=M7lfxwUfr07fHkefo9BbRogQmwB_lEbittc7ZAQ_ulI,11087
+dns/quic/_sync.py,sha256=Ixj0BR6ngRWaKqTUiTrYbLw0rWVsUE6uJuNJB5oUlI0,10982
+dns/quic/_trio.py,sha256=NdClJJ80TY4kg8wM34JCfzX75fhhDb0vLy-WZkSyW6E,9452
+dns/rcode.py,sha256=A7UyvwbaFDz1PZaoYcAmXcerpZV-bRC2Zv3uJepiXa4,4181
+dns/rdata.py,sha256=7OAmPoSVEysCF84bjvaGXrfB1K69bpswaKtM1X89tXQ,31977
+dns/rdataclass.py,sha256=TK4W4ywB1L_X7EZqk2Gmwnu7vdQpolQF5DtQWyNk5xo,2984
+dns/rdataset.py,sha256=aoOatp7pbWhs2JieS0vcHnNc4dfwA0SBuvXAoqe3vxE,16627
+dns/rdatatype.py,sha256=W7r_B43ja4ZTHIJgqbb2eR99lXOYntf3ngGj396AvKg,7487
+dns/rdtypes/ANY/AFSDB.py,sha256=k75wMwreF1DAfDymu4lHh16BUx7ulVP3PLeQBZnkurY,1661
+dns/rdtypes/ANY/AMTRELAY.py,sha256=zE5xls02_NvbQwXUy-MnpV-uVVSJJuaKtZ86H8_X4ic,3355
+dns/rdtypes/ANY/AVC.py,sha256=SpsXYzlBirRWN0mGnQe0MdN6H8fvlgXPJX5PjOHnEak,1024
+dns/rdtypes/ANY/CAA.py,sha256=Hq1tHBrFW-BdxkjrGCq9u6ezaUHj6nFspBD5ClpkRYc,2456
+dns/rdtypes/ANY/CDNSKEY.py,sha256=bJAdrBMsFHIJz8TF1AxZoNbdxVWBCRTG-bR_uR_r_G4,1225
+dns/rdtypes/ANY/CDS.py,sha256=Y9nIRUCAabztVLbxm2SXAdYapFemCOUuGh5JqroCDUs,1163
+dns/rdtypes/ANY/CERT.py,sha256=OAYbtDdcwRhW8w_lbxHbgyWUHxYkTHV2zbiQff00X74,3547
+dns/rdtypes/ANY/CNAME.py,sha256=IHGGq2BDpeKUahTr1pvyBQgm0NGBI_vQ3Vs5mKTXO4w,1206
+dns/rdtypes/ANY/CSYNC.py,sha256=TnO2TjHfc9Cccfsz8dSsuH9Y53o-HllMVeU2DSAglrc,2431
+dns/rdtypes/ANY/DLV.py,sha256=J-pOrw5xXsDoaB9G0r6znlYXJtqtcqhsl1OXs6CPRU4,986
+dns/rdtypes/ANY/DNAME.py,sha256=yqXRtx4dAWwB4YCCv-qW6uaxeGhg2LPQ2uyKwWaMdXs,1150
+dns/rdtypes/ANY/DNSKEY.py,sha256=MD8HUVH5XXeAGOnFWg5aVz_w-2tXYwCeVXmzExhiIeQ,1223
+dns/rdtypes/ANY/DS.py,sha256=_gf8vk1O_uY8QXFjsfUw-bny-fm6e-QpCk3PT0JCyoM,995
+dns/rdtypes/ANY/DSYNC.py,sha256=q-26ceC4f2A2A6OmVaiOwDwAe_LAHvRsra1PZ4GyotA,2154
+dns/rdtypes/ANY/EUI48.py,sha256=x0BkK0sY_tgzuCwfDYpw6tyuChHjjtbRpAgYhO0Y44o,1151
+dns/rdtypes/ANY/EUI64.py,sha256=1jCff2-SXHJLDnNDnMW8Cd_o-ok0P3x6zKy_bcCU5h4,1161
+dns/rdtypes/ANY/GPOS.py,sha256=u4qwiDBVoC7bsKfxDKGbPjnOKddpdjy2p1AhziDWcPw,4439
+dns/rdtypes/ANY/HINFO.py,sha256=D2WvjTsvD_XqT8BepBIyjPL2iYGMgYqb1VQa9ApO0qE,2217
+dns/rdtypes/ANY/HIP.py,sha256=WSw31w96y1JM6ufasx7gRHUPTQuI5ejtyLxpD7vcINE,3216
+dns/rdtypes/ANY/ISDN.py,sha256=L4C2Rxrr4JJN17lmJRbZN8RhM_ujjwIskY_4V4Gd3r4,2723
+dns/rdtypes/ANY/L32.py,sha256=I0HcPHmvRUz2_yeDd0c5uueNKwcxmbz6V-7upNOc1GA,1302
+dns/rdtypes/ANY/L64.py,sha256=rbdYukNdezhQGH6vowKu1VbUWwi5cYSg_VbWEDWyYGA,1609
+dns/rdtypes/ANY/LOC.py,sha256=jxbB0bmbnMW8AVrElmoSW0SOmLPoEf5AwQLwUeAyMsY,11962
+dns/rdtypes/ANY/LP.py,sha256=X0xGo9vr1b3AQ8J8LPMyn_ooKRuEmjwdi7TGE2mqK_k,1332
+dns/rdtypes/ANY/MX.py,sha256=qQk83idY0-SbRMDmB15JOpJi7cSyiheF-ALUD0Ev19E,995
+dns/rdtypes/ANY/NID.py,sha256=8D8RDttb0BPObs0dXbFKajAhA05iZlqAq-51b6wusEI,1561
+dns/rdtypes/ANY/NINFO.py,sha256=bdL_-6Bejb2EH-xwR1rfSr_9E3SDXLTAnov7x2924FI,1041
+dns/rdtypes/ANY/NS.py,sha256=ThfaPalUlhbyZyNyvBM3k-7onl3eJKq5wCORrOGtkMM,995
+dns/rdtypes/ANY/NSEC.py,sha256=kicEYxcKaLBpV6C_M8cHdDaqBoiYl6EYtPvjyR6kExI,2465
+dns/rdtypes/ANY/NSEC3.py,sha256=NUG3AT626zu3My8QeNMiPVfpn3PRK9AGBkKW3cIZDzM,4250
+dns/rdtypes/ANY/NSEC3PARAM.py,sha256=-r5rBTMezSh7J9Wb7bWng_TXPKIETs2AXY4WFdhz7tM,2625
+dns/rdtypes/ANY/OPENPGPKEY.py,sha256=3LHryx1g0g-WrOI19PhGzGZG0anIJw2CCn93P4aT-Lk,1870
+dns/rdtypes/ANY/OPT.py,sha256=W36RslT_Psp95OPUC70knumOYjKpaRHvGT27I-NV2qc,2561
+dns/rdtypes/ANY/PTR.py,sha256=5HcR1D77Otyk91vVY4tmqrfZfSxSXWyWvwIW-rIH5gc,997
+dns/rdtypes/ANY/RESINFO.py,sha256=Kf2NcKbkeI5gFE1bJfQNqQCaitYyXfV_9nQYl1luUZ0,1008
+dns/rdtypes/ANY/RP.py,sha256=8doJlhjYDYiAT6KNF1mAaemJ20YJFUPvit8LOx4-I-U,2174
+dns/rdtypes/ANY/RRSIG.py,sha256=_ohbap8Dp_3VMU4w7ozVWGyFCtpm8A-l1F1wQiFZogA,4941
+dns/rdtypes/ANY/RT.py,sha256=2t9q3FZQ28iEyceeU25KU2Ur0T5JxELAu8BTwfOUgVw,1013
+dns/rdtypes/ANY/SMIMEA.py,sha256=6yjHuVDfIEodBU9wxbCGCDZ5cWYwyY6FCk-aq2VNU0s,222
+dns/rdtypes/ANY/SOA.py,sha256=tbbpP7RK2kpTTYCgdAWGCxlIMcX9U5MTOhz7vLP4p0I,3034
+dns/rdtypes/ANY/SPF.py,sha256=rA3Srs9ECQx-37lqm7Zf7aYmMpp_asv4tGS8_fSQ-CU,1022
+dns/rdtypes/ANY/SSHFP.py,sha256=F5vrZB-MAmeGJFAgEwRjXxgxerhoAd6kT9AcNNmkcF4,2550
+dns/rdtypes/ANY/TKEY.py,sha256=qvMJd0HGQF1wHGk1eWdITBVnAkj1oTHHbP5zSzV4cTc,4848
+dns/rdtypes/ANY/TLSA.py,sha256=cytzebS3W7FFr9qeJ9gFSHq_bOwUk9aRVlXWHfnVrRs,218
+dns/rdtypes/ANY/TSIG.py,sha256=4fNQJSNWZXUKZejCciwQuUJtTw2g-YbPmqHrEj_pitg,4750
+dns/rdtypes/ANY/TXT.py,sha256=F1U9gIAhwXIV4UVT7CwOCEn_su6G1nJIdgWJsLktk20,1000
+dns/rdtypes/ANY/URI.py,sha256=JyPYKh2RXzI34oABDiJ2oDh3TE_l-zmut4jBNA-ONt4,2913
+dns/rdtypes/ANY/WALLET.py,sha256=IaP2g7Nq26jWGKa8MVxvJjWXLQ0wrNR1IWJVyyMG8oU,219
+dns/rdtypes/ANY/X25.py,sha256=BzEM7uOY7CMAm7QN-dSLj-_LvgnnohwJDUjMstzwqYo,1942
+dns/rdtypes/ANY/ZONEMD.py,sha256=DjBYvHY13nF70uxTM77zf3R9n0Uy8Frbj1LuBXbC7jU,2389
+dns/rdtypes/ANY/__init__.py,sha256=2UKaYp81SLH6ofE021on9pR7jzmB47D1iXjQ3M7FXrw,1539
+dns/rdtypes/ANY/__pycache__/AFSDB.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/AMTRELAY.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/AVC.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/CAA.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/CDNSKEY.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/CDS.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/CERT.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/CNAME.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/CSYNC.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/DLV.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/DNAME.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/DNSKEY.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/DS.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/DSYNC.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/EUI48.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/EUI64.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/GPOS.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/HINFO.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/HIP.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/ISDN.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/L32.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/L64.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/LOC.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/LP.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/MX.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/NID.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/NINFO.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/NS.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/NSEC.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/NSEC3.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/NSEC3PARAM.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/OPENPGPKEY.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/OPT.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/PTR.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/RESINFO.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/RP.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/RRSIG.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/RT.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/SMIMEA.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/SOA.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/SPF.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/SSHFP.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/TKEY.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/TLSA.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/TSIG.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/TXT.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/URI.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/WALLET.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/X25.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/ZONEMD.cpython-311.pyc,,
+dns/rdtypes/ANY/__pycache__/__init__.cpython-311.pyc,,
+dns/rdtypes/CH/A.py,sha256=Iq82L3RLM-OwB5hyvtX1Das9oToiZMzNgs979cAkDz8,2229
+dns/rdtypes/CH/__init__.py,sha256=GD9YeDKb9VBDo-J5rrChX1MWEGyQXuR9Htnbhg_iYLc,923
+dns/rdtypes/CH/__pycache__/A.cpython-311.pyc,,
+dns/rdtypes/CH/__pycache__/__init__.cpython-311.pyc,,
+dns/rdtypes/IN/A.py,sha256=FfFn3SqbpneL9Ky63COP50V2ZFxqS1ldCKJh39Enwug,1814
+dns/rdtypes/IN/AAAA.py,sha256=AxrOlYy-1TTTWeQypDKeXrDCrdHGor0EKCE4fxzSQGo,1820
+dns/rdtypes/IN/APL.py,sha256=4Kz56antsRGu-cfV2MCHN8rmVo90wnZXnLWA6uQpnk4,5081
+dns/rdtypes/IN/DHCID.py,sha256=x9vedfzJ3vvxPC1ihWTTcxXBMYL0Q24Wmj6O67aY5og,1875
+dns/rdtypes/IN/HTTPS.py,sha256=P-IjwcvDQMmtoBgsDHglXF7KgLX73G6jEDqCKsnaGpQ,220
+dns/rdtypes/IN/IPSECKEY.py,sha256=jMO-aGl1eglWDqMxAkM2BvKDjfe9O1X0avBoWCtWi30,3261
+dns/rdtypes/IN/KX.py,sha256=K1JwItL0n5G-YGFCjWeh0C9DyDD8G8VzicsBeQiNAv0,1013
+dns/rdtypes/IN/NAPTR.py,sha256=JhGpvtCn_qlNWWlW9ilrWh9PNElBgNq1SWJPqD3LRzA,3741
+dns/rdtypes/IN/NSAP.py,sha256=6YfWCVSIPTTBmRAzG8nVBj3LnohncXUhSFJHgp-TRdc,2163
+dns/rdtypes/IN/NSAP_PTR.py,sha256=iTxlV6fr_Y9lqivLLncSHxEhmFqz5UEElDW3HMBtuCU,1015
+dns/rdtypes/IN/PX.py,sha256=zRg_5eGQdpzCRUsXIccxJOs7xoTAn7i4PIrj0Zwv-1A,2748
+dns/rdtypes/IN/SRV.py,sha256=TVai6Rtfx0_73wH999uPGuz-p2m6BTVIleXy1Tlm5Dc,2759
+dns/rdtypes/IN/SVCB.py,sha256=HeFmi2v01F00Hott8FlvQ4R7aPxFmT7RF-gt45R5K_M,218
+dns/rdtypes/IN/WKS.py,sha256=4_dLY3Bh6ePkfgku11QzLJv74iSyoSpt8EflIp_AMNc,3644
+dns/rdtypes/IN/__init__.py,sha256=HbI8aw9HWroI6SgEvl8Sx6FdkDswCCXMbSRuJy5o8LQ,1083
+dns/rdtypes/IN/__pycache__/A.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/AAAA.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/APL.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/DHCID.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/HTTPS.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/IPSECKEY.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/KX.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/NAPTR.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/NSAP.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/NSAP_PTR.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/PX.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/SRV.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/SVCB.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/WKS.cpython-311.pyc,,
+dns/rdtypes/IN/__pycache__/__init__.cpython-311.pyc,,
+dns/rdtypes/__init__.py,sha256=NYizfGglJfhqt_GMtSSXf7YQXIEHHCiJ_Y_qaLVeiOI,1073
+dns/rdtypes/__pycache__/__init__.cpython-311.pyc,,
+dns/rdtypes/__pycache__/dnskeybase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/dsbase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/euibase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/mxbase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/nsbase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/svcbbase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/tlsabase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/txtbase.cpython-311.pyc,,
+dns/rdtypes/__pycache__/util.cpython-311.pyc,,
+dns/rdtypes/dnskeybase.py,sha256=GXSOvGtiRjY3fhqlI_T-4ukF4JQvvh3sk7UF0vipmPc,2824
+dns/rdtypes/dsbase.py,sha256=elOLkRb45vYzyh36_1FSJWWO9AI2wnK3GpddmQNdj3Y,3423
+dns/rdtypes/euibase.py,sha256=2DluC_kTi2io2ICgzFEdSxKGPFx3ib3ZXnA6YaAhAp0,2675
+dns/rdtypes/mxbase.py,sha256=N_3EX_2BgY0wMdGADL6_5nxBRUdx4ZcdNIYfGg5rMP8,3190
+dns/rdtypes/nsbase.py,sha256=tueXVV6E8lelebOmrmoOPq47eeRvOpsxHVXH4cOFxcs,2323
+dns/rdtypes/svcbbase.py,sha256=0VnPpt7fSCNt_MtGnWOiYtkY-6jQRWIli8JTRROakys,17717
+dns/rdtypes/tlsabase.py,sha256=hHuRO_MQ5g_tWBIDyTNArAWwbUc-MdZlXcjQxy5defA,2588
+dns/rdtypes/txtbase.py,sha256=lEzlKS6dx6UnhgoBPGIzqC3G0e8iWBetrkDtkwM16Ic,3723
+dns/rdtypes/util.py,sha256=WjiRlxsu_sq40XpSdR6wN54WWavKe7PLh-V9UaNhk7A,9680
+dns/renderer.py,sha256=sj_m9NRJoY8gdQ9zOhSVu0pTAUyBtM5AGpfea83jGpQ,11500
+dns/resolver.py,sha256=FRa-pJApeV_DFgLEwiwZP-2g7RHAg0kVCbg9EdNYLnc,73967
+dns/reversename.py,sha256=pPDGRfg7iq09cjEhKLKEcahdoyViS0y0ORip--r5vk8,3845
+dns/rrset.py,sha256=f8avzbtBb-y93jdyhhTJ8EJx1zOTcNTK3DtiK84eGNY,9129
+dns/serial.py,sha256=-t5rPW-TcJwzBMfIJo7Tl-uDtaYtpqOfCVYx9dMaDCY,3606
+dns/set.py,sha256=hublMKCIhd9zp5Hz_fvQTwF-Ze28jn7mjqei6vTGWfs,9213
+dns/tokenizer.py,sha256=dqQvBF3oUjP7URC7ZzBuQVLMVXhvf1gJusIpkV-IQ6U,23490
+dns/transaction.py,sha256=HnHa4nKL_ddtuWH4FaiKPEt81ImELL1fumZb3ll4KbI,22579
+dns/tsig.py,sha256=mWjZGZL75atl-jf3va1FhP9LfLGWT5g9Y9DgsSan4Mo,11576
+dns/tsigkeyring.py,sha256=1xSBgaV1KLR_9FQGsGWbkBD3XJjK8IFQx-H_olH1qyQ,2650
+dns/ttl.py,sha256=Rl8UOKV0_QyZzOdQ-JoB7nSHvBFehZXe_M0cxIBVc3Y,2937
+dns/update.py,sha256=iqZEO-_U0ooAqLlIRo1OhAKI8d-jpwPhBy-vC8v1dtY,12236
+dns/version.py,sha256=d7ViavUC8gYfrWbeyH8WMAldyGk_WVF5_zkCmCJv0ZQ,1763
+dns/versioned.py,sha256=yJ76QfKdIEKBtKX_DLA_IZGUZoFB1id1mMKzIj2eRm8,11841
+dns/win32util.py,sha256=iz5Gw0CTHAIqumdE25xdYUbhhSFiaZTRM-HXskglB2o,16799
+dns/wire.py,sha256=hylnQ30yjA3UcJSElhSAqYKMt5HICYqQ_N5b71K2smA,3155
+dns/xfr.py,sha256=UE4xAyfRDNH14x4os8yC-4Tl8brc_kCpBLxT0h6x-AM,13637
+dns/zone.py,sha256=ZferSA6wMN46uuBNkrgbRcSM8FSCCxMrNiLT3WoISbw,53098
+dns/zonefile.py,sha256=Xz24A8wH97NoA_iTbastSzUZ-S-DmLFG0SgIfVzQinY,28517
+dns/zonetypes.py,sha256=HrQNZxZ_gWLWI9dskix71msi9wkYK5pgrBBbPb1T74Y,690
+dnspython-2.8.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+dnspython-2.8.0.dist-info/METADATA,sha256=dPdZU5uJ4pkVGy1pfGEjBzRbdm27fpQ1z4Y6Bpgf04U,5680
+dnspython-2.8.0.dist-info/RECORD,,
+dnspython-2.8.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
+dnspython-2.8.0.dist-info/licenses/LICENSE,sha256=w-o_9WVLMpwZ07xfdIGvYjw93tSmFFWFSZ-EOtPXQc0,1526
diff --git a/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/WHEEL b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/WHEEL
new file mode 100644
index 0000000..12228d4
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.27.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/licenses/LICENSE b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..390a726
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/dnspython-2.8.0.dist-info/licenses/LICENSE
@@ -0,0 +1,35 @@
+ISC License
+
+Copyright (C) Dnspython Contributors
+
+Permission to use, copy, modify, and/or distribute this software for
+any purpose with or without fee is hereby granted, provided that the
+above copyright notice and this permission notice appear in all
+copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+
+
+Copyright (C) 2001-2017 Nominum, Inc.
+Copyright (C) Google Inc.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose with or without fee is hereby granted,
+provided that the above copyright notice and this permission notice
+appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/tapdown/lib/python3.11/site-packages/engineio/__init__.py b/tapdown/lib/python3.11/site-packages/engineio/__init__.py
new file mode 100644
index 0000000..4919efd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/__init__.py
@@ -0,0 +1,13 @@
+from .client import Client
+from .middleware import WSGIApp, Middleware
+from .server import Server
+from .async_server import AsyncServer
+from .async_client import AsyncClient
+from .async_drivers.asgi import ASGIApp
+try:
+ from .async_drivers.tornado import get_tornado_handler
+except ImportError: # pragma: no cover
+ get_tornado_handler = None
+
+__all__ = ['Server', 'WSGIApp', 'Middleware', 'Client',
+ 'AsyncServer', 'ASGIApp', 'get_tornado_handler', 'AsyncClient']
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_client.py b/tapdown/lib/python3.11/site-packages/engineio/async_client.py
new file mode 100644
index 0000000..43f3a56
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_client.py
@@ -0,0 +1,689 @@
+import asyncio
+import signal
+import ssl
+import threading
+
+try:
+ import aiohttp
+except ImportError: # pragma: no cover
+ aiohttp = None
+
+from . import base_client
+from . import exceptions
+from . import packet
+from . import payload
+
+async_signal_handler_set = False
+
+# this set is used to keep references to background tasks to prevent them from
+# being garbage collected mid-execution. Solution taken from
+# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
+task_reference_holder = set()
+
+
+def async_signal_handler():
+ """SIGINT handler.
+
+ Disconnect all active async clients.
+ """
+ async def _handler(): # pragma: no cover
+ for c in base_client.connected_clients[:]:
+ if c.is_asyncio_based():
+ await c.disconnect()
+
+ # cancel all running tasks
+ tasks = [task for task in asyncio.all_tasks() if task is not
+ asyncio.current_task()]
+ for task in tasks:
+ task.cancel()
+ await asyncio.gather(*tasks, return_exceptions=True)
+ asyncio.get_running_loop().stop()
+
+ asyncio.ensure_future(_handler())
+
+
+class AsyncClient(base_client.BaseClient):
+ """An Engine.IO client for asyncio.
+
+ This class implements a fully compliant Engine.IO web client with support
+ for websocket and long-polling transports, compatible with the asyncio
+ framework on Python 3.5 or newer.
+
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors are logged even when
+ ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param request_timeout: A timeout in seconds for requests. The default is
+ 5 seconds.
+ :param http_session: an initialized ``aiohttp.ClientSession`` object to be
+ used when sending requests to the server. Use it if
+ you need to add special client options such as proxy
+ servers, SSL certificates, custom CA bundle, etc.
+ :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
+ skip SSL certificate verification, allowing
+ connections to servers with self signed certificates.
+ The default is ``True``.
+ :param handle_sigint: Set to ``True`` to automatically handle disconnection
+ when the process is interrupted, or to ``False`` to
+ leave interrupt handling to the calling application.
+ Interrupt handling can only be enabled when the
+ client instance is created in the main thread.
+ :param websocket_extra_options: Dictionary containing additional keyword
+ arguments passed to
+ ``aiohttp.ws_connect()``.
+ :param timestamp_requests: If ``True`` a timestamp is added to the query
+ string of Socket.IO requests as a cache-busting
+ measure. Set to ``False`` to disable.
+ """
+ def is_asyncio_based(self):
+ return True
+
+ async def connect(self, url, headers=None, transports=None,
+ engineio_path='engine.io'):
+ """Connect to an Engine.IO server.
+
+ :param url: The URL of the Engine.IO server. It can include custom
+ query string parameters if required by the server.
+ :param headers: A dictionary with custom headers to send with the
+ connection request.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. If not
+ given, the polling transport is connected first,
+ then an upgrade to websocket is attempted.
+ :param engineio_path: The endpoint where the Engine.IO server is
+ installed. The default value is appropriate for
+ most cases.
+
+ Note: this method is a coroutine.
+
+ Example usage::
+
+ eio = engineio.Client()
+ await eio.connect('http://localhost:5000')
+ """
+ global async_signal_handler_set
+ if self.handle_sigint and not async_signal_handler_set and \
+ threading.current_thread() == threading.main_thread():
+ try:
+ asyncio.get_running_loop().add_signal_handler(
+ signal.SIGINT, async_signal_handler)
+ except NotImplementedError: # pragma: no cover
+ self.logger.warning('Signal handler is unsupported')
+ async_signal_handler_set = True
+
+ if self.state != 'disconnected':
+ raise ValueError('Client is not in a disconnected state')
+ valid_transports = ['polling', 'websocket']
+ if transports is not None:
+ if isinstance(transports, str):
+ transports = [transports]
+ transports = [transport for transport in transports
+ if transport in valid_transports]
+ if not transports:
+ raise ValueError('No valid transports provided')
+ self.transports = transports or valid_transports
+ return await getattr(self, '_connect_' + self.transports[0])(
+ url, headers or {}, engineio_path)
+
+ async def wait(self):
+ """Wait until the connection with the server ends.
+
+ Client applications can use this function to block the main thread
+ during the life of the connection.
+
+ Note: this method is a coroutine.
+ """
+ if self.read_loop_task:
+ await self.read_loop_task
+
+ async def send(self, data):
+ """Send a message to the server.
+
+ :param data: The data to send to the server. Data can be of type
+ ``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
+ or ``dict``, the data will be serialized as JSON.
+
+ Note: this method is a coroutine.
+ """
+ await self._send_packet(packet.Packet(packet.MESSAGE, data=data))
+
+ async def disconnect(self, abort=False, reason=None):
+ """Disconnect from the server.
+
+ :param abort: If set to ``True``, do not wait for background tasks
+ associated with the connection to end.
+
+ Note: this method is a coroutine.
+ """
+ if self.state == 'connected':
+ await self._send_packet(packet.Packet(packet.CLOSE))
+ await self.queue.put(None)
+ self.state = 'disconnecting'
+ await self._trigger_event('disconnect',
+ reason or self.reason.CLIENT_DISCONNECT,
+ run_async=False)
+ if self.current_transport == 'websocket':
+ await self.ws.close()
+ if not abort:
+ await self.read_loop_task
+ self.state = 'disconnected'
+ try:
+ base_client.connected_clients.remove(self)
+ except ValueError: # pragma: no cover
+ pass
+ await self._reset()
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task.
+
+ This is a utility function that applications can use to start a
+ background task.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ The return value is a ``asyncio.Task`` object.
+ """
+ return asyncio.ensure_future(target(*args, **kwargs))
+
+ async def sleep(self, seconds=0):
+ """Sleep for the requested amount of time.
+
+ Note: this method is a coroutine.
+ """
+ return await asyncio.sleep(seconds)
+
+ def create_queue(self, *args, **kwargs):
+ """Create a queue object."""
+ return asyncio.Queue(*args, **kwargs)
+
+ def get_queue_empty_exception(self):
+ """Return the queue empty exception raised by queues created by the
+ ``create_queue()`` method.
+ """
+ return asyncio.QueueEmpty
+
+ def create_event(self):
+ """Create an event object."""
+ return asyncio.Event()
+
+ async def _reset(self):
+ super()._reset()
+ while True: # pragma: no cover
+ try:
+ self.queue.get_nowait()
+ self.queue.task_done()
+ except self.queue_empty:
+ break
+ if not self.external_http: # pragma: no cover
+ if self.http and not self.http.closed:
+ await self.http.close()
+
+ def __del__(self): # pragma: no cover
+ # try to close the aiohttp session if it is still open
+ if self.http and not self.http.closed:
+ try:
+ loop = asyncio.get_event_loop()
+ if loop.is_running():
+ loop.ensure_future(self.http.close())
+ else:
+ loop.run_until_complete(self.http.close())
+ except:
+ pass
+
+ async def _connect_polling(self, url, headers, engineio_path):
+ """Establish a long-polling connection to the Engine.IO server."""
+ if aiohttp is None: # pragma: no cover
+ self.logger.error('aiohttp not installed -- cannot make HTTP '
+ 'requests!')
+ return
+ self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
+ self.logger.info('Attempting polling connection to ' + self.base_url)
+ r = await self._send_request(
+ 'GET', self.base_url + self._get_url_timestamp(), headers=headers,
+ timeout=self.request_timeout)
+ if r is None or isinstance(r, str):
+ await self._reset()
+ raise exceptions.ConnectionError(
+ r or 'Connection refused by the server')
+ if r.status < 200 or r.status >= 300:
+ await self._reset()
+ try:
+ arg = await r.json()
+ except aiohttp.ClientError:
+ arg = None
+ raise exceptions.ConnectionError(
+ 'Unexpected status code {} in server response'.format(
+ r.status), arg)
+ try:
+ p = payload.Payload(encoded_payload=(await r.read()).decode(
+ 'utf-8'))
+ except ValueError:
+ raise exceptions.ConnectionError(
+ 'Unexpected response from server') from None
+ open_packet = p.packets[0]
+ if open_packet.packet_type != packet.OPEN:
+ raise exceptions.ConnectionError(
+ 'OPEN packet not returned by server')
+ self.logger.info(
+ 'Polling connection accepted with ' + str(open_packet.data))
+ self.sid = open_packet.data['sid']
+ self.upgrades = open_packet.data['upgrades']
+ self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
+ self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
+ self.current_transport = 'polling'
+ self.base_url += '&sid=' + self.sid
+
+ self.state = 'connected'
+ base_client.connected_clients.append(self)
+ await self._trigger_event('connect', run_async=False)
+
+ for pkt in p.packets[1:]:
+ await self._receive_packet(pkt)
+
+ if 'websocket' in self.upgrades and 'websocket' in self.transports:
+ # attempt to upgrade to websocket
+ if await self._connect_websocket(url, headers, engineio_path):
+ # upgrade to websocket succeeded, we're done here
+ return
+
+ self.write_loop_task = self.start_background_task(self._write_loop)
+ self.read_loop_task = self.start_background_task(
+ self._read_loop_polling)
+
+ async def _connect_websocket(self, url, headers, engineio_path):
+ """Establish or upgrade to a WebSocket connection with the server."""
+ if aiohttp is None: # pragma: no cover
+ self.logger.error('aiohttp package not installed')
+ return False
+ websocket_url = self._get_engineio_url(url, engineio_path,
+ 'websocket')
+ if self.sid:
+ self.logger.info(
+ 'Attempting WebSocket upgrade to ' + websocket_url)
+ upgrade = True
+ websocket_url += '&sid=' + self.sid
+ else:
+ upgrade = False
+ self.base_url = websocket_url
+ self.logger.info(
+ 'Attempting WebSocket connection to ' + websocket_url)
+
+ if self.http is None or self.http.closed: # pragma: no cover
+ self.http = aiohttp.ClientSession()
+
+ # extract any new cookies passed in a header so that they can also be
+ # sent the the WebSocket route
+ cookies = {}
+ for header, value in headers.items():
+ if header.lower() == 'cookie':
+ cookies = dict(
+ [cookie.split('=', 1) for cookie in value.split('; ')])
+ del headers[header]
+ break
+ self.http.cookie_jar.update_cookies(cookies)
+
+ extra_options = {'timeout': self.request_timeout}
+ if not self.ssl_verify:
+ ssl_context = ssl.create_default_context()
+ ssl_context.check_hostname = False
+ ssl_context.verify_mode = ssl.CERT_NONE
+ extra_options['ssl'] = ssl_context
+
+ # combine internally generated options with the ones supplied by the
+ # caller. The caller's options take precedence.
+ headers.update(self.websocket_extra_options.pop('headers', {}))
+ extra_options['headers'] = headers
+ extra_options.update(self.websocket_extra_options)
+
+ try:
+ ws = await self.http.ws_connect(
+ websocket_url + self._get_url_timestamp(), **extra_options)
+ except (aiohttp.client_exceptions.WSServerHandshakeError,
+ aiohttp.client_exceptions.ServerConnectionError,
+ aiohttp.client_exceptions.ClientConnectionError):
+ if upgrade:
+ self.logger.warning(
+ 'WebSocket upgrade failed: connection error')
+ return False
+ else:
+ raise exceptions.ConnectionError('Connection error')
+ if upgrade:
+ p = packet.Packet(packet.PING, data='probe').encode()
+ try:
+ await ws.send_str(p)
+ except Exception as e: # pragma: no cover
+ self.logger.warning(
+ 'WebSocket upgrade failed: unexpected send exception: %s',
+ str(e))
+ return False
+ try:
+ p = (await ws.receive()).data
+ except Exception as e: # pragma: no cover
+ self.logger.warning(
+ 'WebSocket upgrade failed: unexpected recv exception: %s',
+ str(e))
+ return False
+ pkt = packet.Packet(encoded_packet=p)
+ if pkt.packet_type != packet.PONG or pkt.data != 'probe':
+ self.logger.warning(
+ 'WebSocket upgrade failed: no PONG packet')
+ return False
+ p = packet.Packet(packet.UPGRADE).encode()
+ try:
+ await ws.send_str(p)
+ except Exception as e: # pragma: no cover
+ self.logger.warning(
+ 'WebSocket upgrade failed: unexpected send exception: %s',
+ str(e))
+ return False
+ self.current_transport = 'websocket'
+ self.logger.info('WebSocket upgrade was successful')
+ else:
+ try:
+ p = (await ws.receive()).data
+ except Exception as e: # pragma: no cover
+ raise exceptions.ConnectionError(
+ 'Unexpected recv exception: ' + str(e))
+ open_packet = packet.Packet(encoded_packet=p)
+ if open_packet.packet_type != packet.OPEN:
+ raise exceptions.ConnectionError('no OPEN packet')
+ self.logger.info(
+ 'WebSocket connection accepted with ' + str(open_packet.data))
+ self.sid = open_packet.data['sid']
+ self.upgrades = open_packet.data['upgrades']
+ self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
+ self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
+ self.current_transport = 'websocket'
+
+ self.state = 'connected'
+ base_client.connected_clients.append(self)
+ await self._trigger_event('connect', run_async=False)
+
+ self.ws = ws
+ self.write_loop_task = self.start_background_task(self._write_loop)
+ self.read_loop_task = self.start_background_task(
+ self._read_loop_websocket)
+ return True
+
+ async def _receive_packet(self, pkt):
+ """Handle incoming packets from the server."""
+ packet_name = packet.packet_names[pkt.packet_type] \
+ if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
+ self.logger.info(
+ 'Received packet %s data %s', packet_name,
+ pkt.data if not isinstance(pkt.data, bytes) else '')
+ if pkt.packet_type == packet.MESSAGE:
+ await self._trigger_event('message', pkt.data, run_async=True)
+ elif pkt.packet_type == packet.PING:
+ await self._send_packet(packet.Packet(packet.PONG, pkt.data))
+ elif pkt.packet_type == packet.CLOSE:
+ await self.disconnect(abort=True,
+ reason=self.reason.SERVER_DISCONNECT)
+ elif pkt.packet_type == packet.NOOP:
+ pass
+ else:
+ self.logger.error('Received unexpected packet of type %s',
+ pkt.packet_type)
+
+ async def _send_packet(self, pkt):
+ """Queue a packet to be sent to the server."""
+ if self.state != 'connected':
+ return
+ await self.queue.put(pkt)
+ self.logger.info(
+ 'Sending packet %s data %s',
+ packet.packet_names[pkt.packet_type],
+ pkt.data if not isinstance(pkt.data, bytes) else '')
+
+ async def _send_request(
+ self, method, url, headers=None, body=None,
+ timeout=None): # pragma: no cover
+ if self.http is None or self.http.closed:
+ self.http = aiohttp.ClientSession()
+ http_method = getattr(self.http, method.lower())
+
+ try:
+ if not self.ssl_verify:
+ return await http_method(
+ url, headers=headers, data=body,
+ timeout=aiohttp.ClientTimeout(total=timeout), ssl=False)
+ else:
+ return await http_method(
+ url, headers=headers, data=body,
+ timeout=aiohttp.ClientTimeout(total=timeout))
+
+ except (aiohttp.ClientError, asyncio.TimeoutError) as exc:
+ self.logger.info('HTTP %s request to %s failed with error %s.',
+ method, url, exc)
+ return str(exc)
+
+ async def _trigger_event(self, event, *args, **kwargs):
+ """Invoke an event handler."""
+ run_async = kwargs.pop('run_async', False)
+ ret = None
+ if event in self.handlers:
+ if asyncio.iscoroutinefunction(self.handlers[event]) is True:
+ if run_async:
+ task = self.start_background_task(self.handlers[event],
+ *args)
+ task_reference_holder.add(task)
+ task.add_done_callback(task_reference_holder.discard)
+ return task
+ else:
+ try:
+ try:
+ ret = await self.handlers[event](*args)
+ except TypeError:
+ if event == 'disconnect' and \
+ len(args) == 1: # pragma: no branch
+ # legacy disconnect events do not have a reason
+ # argument
+ return await self.handlers[event]()
+ else: # pragma: no cover
+ raise
+ except asyncio.CancelledError: # pragma: no cover
+ pass
+ except:
+ self.logger.exception(event + ' async handler error')
+ if event == 'connect':
+ # if connect handler raised error we reject the
+ # connection
+ return False
+ else:
+ if run_async:
+ async def async_handler():
+ return self.handlers[event](*args)
+
+ task = self.start_background_task(async_handler)
+ task_reference_holder.add(task)
+ task.add_done_callback(task_reference_holder.discard)
+ return task
+ else:
+ try:
+ try:
+ ret = self.handlers[event](*args)
+ except TypeError:
+ if event == 'disconnect' and \
+ len(args) == 1: # pragma: no branch
+ # legacy disconnect events do not have a reason
+ # argument
+ ret = self.handlers[event]()
+ else: # pragma: no cover
+ raise
+ except:
+ self.logger.exception(event + ' handler error')
+ if event == 'connect':
+ # if connect handler raised error we reject the
+ # connection
+ return False
+ return ret
+
+ async def _read_loop_polling(self):
+ """Read packets by polling the Engine.IO server."""
+ while self.state == 'connected' and self.write_loop_task:
+ self.logger.info(
+ 'Sending polling GET request to ' + self.base_url)
+ r = await self._send_request(
+ 'GET', self.base_url + self._get_url_timestamp(),
+ timeout=max(self.ping_interval, self.ping_timeout) + 5)
+ if r is None or isinstance(r, str):
+ self.logger.warning(
+ r or 'Connection refused by the server, aborting')
+ await self.queue.put(None)
+ break
+ if r.status < 200 or r.status >= 300:
+ self.logger.warning('Unexpected status code %s in server '
+ 'response, aborting', r.status)
+ await self.queue.put(None)
+ break
+ try:
+ p = payload.Payload(encoded_payload=(await r.read()).decode(
+ 'utf-8'))
+ except ValueError:
+ self.logger.warning(
+ 'Unexpected packet from server, aborting')
+ await self.queue.put(None)
+ break
+ for pkt in p.packets:
+ await self._receive_packet(pkt)
+
+ if self.write_loop_task: # pragma: no branch
+ self.logger.info('Waiting for write loop task to end')
+ await self.write_loop_task
+ if self.state == 'connected':
+ await self._trigger_event(
+ 'disconnect', self.reason.TRANSPORT_ERROR, run_async=False)
+ try:
+ base_client.connected_clients.remove(self)
+ except ValueError: # pragma: no cover
+ pass
+ await self._reset()
+ self.logger.info('Exiting read loop task')
+
+ async def _read_loop_websocket(self):
+ """Read packets from the Engine.IO WebSocket connection."""
+ while self.state == 'connected':
+ p = None
+ try:
+ p = await asyncio.wait_for(
+ self.ws.receive(),
+ timeout=self.ping_interval + self.ping_timeout)
+ if not isinstance(p.data, (str, bytes)): # pragma: no cover
+ self.logger.warning(
+ 'Server sent %s packet data %s, aborting',
+ 'close' if p.type in [aiohttp.WSMsgType.CLOSE,
+ aiohttp.WSMsgType.CLOSING]
+ else str(p.type), str(p.data))
+ await self.queue.put(None)
+ break # the connection is broken
+ p = p.data
+ except asyncio.TimeoutError:
+ self.logger.warning(
+ 'Server has stopped communicating, aborting')
+ await self.queue.put(None)
+ break
+ except aiohttp.client_exceptions.ServerDisconnectedError:
+ self.logger.info(
+ 'Read loop: WebSocket connection was closed, aborting')
+ await self.queue.put(None)
+ break
+ except Exception as e:
+ self.logger.info(
+ 'Unexpected error receiving packet: "%s", aborting',
+ str(e))
+ await self.queue.put(None)
+ break
+ try:
+ pkt = packet.Packet(encoded_packet=p)
+ except Exception as e: # pragma: no cover
+ self.logger.info(
+ 'Unexpected error decoding packet: "%s", aborting', str(e))
+ await self.queue.put(None)
+ break
+ await self._receive_packet(pkt)
+
+ if self.write_loop_task: # pragma: no branch
+ self.logger.info('Waiting for write loop task to end')
+ await self.write_loop_task
+ if self.state == 'connected':
+ await self._trigger_event(
+ 'disconnect', self.reason.TRANSPORT_ERROR, run_async=False)
+ try:
+ base_client.connected_clients.remove(self)
+ except ValueError: # pragma: no cover
+ pass
+ await self._reset()
+ self.logger.info('Exiting read loop task')
+
+ async def _write_loop(self):
+ """This background task sends packages to the server as they are
+ pushed to the send queue.
+ """
+ while self.state == 'connected':
+ # to simplify the timeout handling, use the maximum of the
+ # ping interval and ping timeout as timeout, with an extra 5
+ # seconds grace period
+ timeout = max(self.ping_interval, self.ping_timeout) + 5
+ packets = None
+ try:
+ packets = [await asyncio.wait_for(self.queue.get(), timeout)]
+ except (self.queue_empty, asyncio.TimeoutError):
+ self.logger.error('packet queue is empty, aborting')
+ break
+ except asyncio.CancelledError: # pragma: no cover
+ break
+ if packets == [None]:
+ self.queue.task_done()
+ packets = []
+ else:
+ while True:
+ try:
+ packets.append(self.queue.get_nowait())
+ except self.queue_empty:
+ break
+ if packets[-1] is None:
+ packets = packets[:-1]
+ self.queue.task_done()
+ break
+ if not packets:
+ # empty packet list returned -> connection closed
+ break
+ if self.current_transport == 'polling':
+ p = payload.Payload(packets=packets)
+ r = await self._send_request(
+ 'POST', self.base_url, body=p.encode(),
+ headers={'Content-Type': 'text/plain'},
+ timeout=self.request_timeout)
+ for pkt in packets:
+ self.queue.task_done()
+ if r is None or isinstance(r, str):
+ self.logger.warning(
+ r or 'Connection refused by the server, aborting')
+ break
+ if r.status < 200 or r.status >= 300:
+ self.logger.warning('Unexpected status code %s in server '
+ 'response, aborting', r.status)
+ self.write_loop_task = None
+ break
+ else:
+ # websocket
+ try:
+ for pkt in packets:
+ if pkt.binary:
+ await self.ws.send_bytes(pkt.encode())
+ else:
+ await self.ws.send_str(pkt.encode())
+ self.queue.task_done()
+ except (aiohttp.client_exceptions.ServerDisconnectedError,
+ BrokenPipeError, OSError):
+ self.logger.info(
+ 'Write loop: WebSocket connection was closed, '
+ 'aborting')
+ break
+ self.logger.info('Exiting write loop task')
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/__init__.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/_websocket_wsgi.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/_websocket_wsgi.py
new file mode 100644
index 0000000..aca30dc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/_websocket_wsgi.py
@@ -0,0 +1,34 @@
+import simple_websocket
+
+
+class SimpleWebSocketWSGI: # pragma: no cover
+ """
+ This wrapper class provides a threading WebSocket interface that is
+ compatible with eventlet's implementation.
+ """
+ def __init__(self, handler, server, **kwargs):
+ self.app = handler
+ self.server_args = kwargs
+
+ def __call__(self, environ, start_response):
+ self.ws = simple_websocket.Server(environ, **self.server_args)
+ ret = self.app(self)
+ if self.ws.mode == 'gunicorn':
+ raise StopIteration()
+ return ret
+
+ def close(self):
+ if self.ws.connected:
+ self.ws.close()
+
+ def send(self, message):
+ try:
+ return self.ws.send(message)
+ except simple_websocket.ConnectionClosed:
+ raise OSError()
+
+ def wait(self):
+ try:
+ return self.ws.receive()
+ except simple_websocket.ConnectionClosed:
+ return None
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/aiohttp.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/aiohttp.py
new file mode 100644
index 0000000..7c3440f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/aiohttp.py
@@ -0,0 +1,127 @@
+import asyncio
+import sys
+from urllib.parse import urlsplit
+
+from aiohttp.web import Response, WebSocketResponse
+
+
+def create_route(app, engineio_server, engineio_endpoint):
+ """This function sets up the engine.io endpoint as a route for the
+ application.
+
+ Note that both GET and POST requests must be hooked up on the engine.io
+ endpoint.
+ """
+ app.router.add_get(engineio_endpoint, engineio_server.handle_request)
+ app.router.add_post(engineio_endpoint, engineio_server.handle_request)
+ app.router.add_route('OPTIONS', engineio_endpoint,
+ engineio_server.handle_request)
+
+
+def translate_request(request):
+ """This function takes the arguments passed to the request handler and
+ uses them to generate a WSGI compatible environ dictionary.
+ """
+ message = request._message
+ payload = request._payload
+
+ uri_parts = urlsplit(message.path)
+ environ = {
+ 'wsgi.input': payload,
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.version': (1, 0),
+ 'wsgi.async': True,
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'SERVER_SOFTWARE': 'aiohttp',
+ 'REQUEST_METHOD': message.method,
+ 'QUERY_STRING': uri_parts.query or '',
+ 'RAW_URI': message.path,
+ 'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version,
+ 'REMOTE_ADDR': '127.0.0.1',
+ 'REMOTE_PORT': '0',
+ 'SERVER_NAME': 'aiohttp',
+ 'SERVER_PORT': '0',
+ 'aiohttp.request': request
+ }
+
+ for hdr_name, hdr_value in message.headers.items():
+ hdr_name = hdr_name.upper()
+ if hdr_name == 'CONTENT-TYPE':
+ environ['CONTENT_TYPE'] = hdr_value
+ continue
+ elif hdr_name == 'CONTENT-LENGTH':
+ environ['CONTENT_LENGTH'] = hdr_value
+ continue
+
+ key = 'HTTP_%s' % hdr_name.replace('-', '_')
+ if key in environ:
+ hdr_value = f'{environ[key]},{hdr_value}'
+
+ environ[key] = hdr_value
+
+ environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
+
+ path_info = uri_parts.path
+
+ environ['PATH_INFO'] = path_info
+ environ['SCRIPT_NAME'] = ''
+
+ return environ
+
+
+def make_response(status, headers, payload, environ):
+ """This function generates an appropriate response object for this async
+ mode.
+ """
+ return Response(body=payload, status=int(status.split()[0]),
+ headers=headers)
+
+
+class WebSocket: # pragma: no cover
+ """
+ This wrapper class provides a aiohttp WebSocket interface that is
+ somewhat compatible with eventlet's implementation.
+ """
+ def __init__(self, handler, server):
+ self.handler = handler
+ self._sock = None
+
+ async def __call__(self, environ):
+ request = environ['aiohttp.request']
+ self._sock = WebSocketResponse(max_msg_size=0)
+ await self._sock.prepare(request)
+
+ self.environ = environ
+ await self.handler(self)
+ return self._sock
+
+ async def close(self):
+ await self._sock.close()
+
+ async def send(self, message):
+ if isinstance(message, bytes):
+ f = self._sock.send_bytes
+ else:
+ f = self._sock.send_str
+ if asyncio.iscoroutinefunction(f):
+ await f(message)
+ else:
+ f(message)
+
+ async def wait(self):
+ msg = await self._sock.receive()
+ if not isinstance(msg.data, bytes) and \
+ not isinstance(msg.data, str):
+ raise OSError()
+ return msg.data
+
+
+_async = {
+ 'asyncio': True,
+ 'create_route': create_route,
+ 'translate_request': translate_request,
+ 'make_response': make_response,
+ 'websocket': WebSocket,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/asgi.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/asgi.py
new file mode 100644
index 0000000..ad7447e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/asgi.py
@@ -0,0 +1,296 @@
+import os
+import sys
+import asyncio
+
+from engineio.static_files import get_static_file
+
+
+class ASGIApp:
+ """ASGI application middleware for Engine.IO.
+
+ This middleware dispatches traffic to an Engine.IO application. It can
+ also serve a list of static files to the client, or forward unrelated
+ HTTP traffic to another ASGI application.
+
+ :param engineio_server: The Engine.IO server. Must be an instance of the
+ ``engineio.AsyncServer`` class.
+ :param static_files: A dictionary with static file mapping rules. See the
+ documentation for details on this argument.
+ :param other_asgi_app: A separate ASGI app that receives all other traffic.
+ :param engineio_path: The endpoint where the Engine.IO application should
+ be installed. The default value is appropriate for
+ most cases. With a value of ``None``, all incoming
+ traffic is directed to the Engine.IO server, with the
+ assumption that routing, if necessary, is handled by
+ a different layer. When this option is set to
+ ``None``, ``static_files`` and ``other_asgi_app`` are
+ ignored.
+ :param on_startup: function to be called on application startup; can be
+ coroutine
+ :param on_shutdown: function to be called on application shutdown; can be
+ coroutine
+
+ Example usage::
+
+ import engineio
+ import uvicorn
+
+ eio = engineio.AsyncServer()
+ app = engineio.ASGIApp(eio, static_files={
+ '/': {'content_type': 'text/html', 'filename': 'index.html'},
+ '/index.html': {'content_type': 'text/html',
+ 'filename': 'index.html'},
+ })
+ uvicorn.run(app, '127.0.0.1', 5000)
+ """
+ def __init__(self, engineio_server, other_asgi_app=None,
+ static_files=None, engineio_path='engine.io',
+ on_startup=None, on_shutdown=None):
+ self.engineio_server = engineio_server
+ self.other_asgi_app = other_asgi_app
+ self.engineio_path = engineio_path
+ if self.engineio_path is not None:
+ if not self.engineio_path.startswith('/'):
+ self.engineio_path = '/' + self.engineio_path
+ if not self.engineio_path.endswith('/'):
+ self.engineio_path += '/'
+ self.static_files = static_files or {}
+ self.on_startup = on_startup
+ self.on_shutdown = on_shutdown
+
+ async def __call__(self, scope, receive, send):
+ if scope['type'] == 'lifespan':
+ await self.lifespan(scope, receive, send)
+ elif scope['type'] in ['http', 'websocket'] and (
+ self.engineio_path is None
+ or self._ensure_trailing_slash(scope['path']).startswith(
+ self.engineio_path)):
+ await self.engineio_server.handle_request(scope, receive, send)
+ else:
+ static_file = get_static_file(scope['path'], self.static_files) \
+ if scope['type'] == 'http' and self.static_files else None
+ if static_file and os.path.exists(static_file['filename']):
+ await self.serve_static_file(static_file, receive, send)
+ elif self.other_asgi_app is not None:
+ await self.other_asgi_app(scope, receive, send)
+ else:
+ await self.not_found(receive, send)
+
+ async def serve_static_file(self, static_file, receive,
+ send): # pragma: no cover
+ event = await receive()
+ if event['type'] == 'http.request':
+ with open(static_file['filename'], 'rb') as f:
+ payload = f.read()
+ await send({'type': 'http.response.start',
+ 'status': 200,
+ 'headers': [(b'Content-Type', static_file[
+ 'content_type'].encode('utf-8'))]})
+ await send({'type': 'http.response.body',
+ 'body': payload})
+
+ async def lifespan(self, scope, receive, send):
+ if self.other_asgi_app is not None and self.on_startup is None and \
+ self.on_shutdown is None:
+ # let the other ASGI app handle lifespan events
+ await self.other_asgi_app(scope, receive, send)
+ return
+
+ while True:
+ event = await receive()
+ if event['type'] == 'lifespan.startup':
+ if self.on_startup:
+ try:
+ await self.on_startup() \
+ if asyncio.iscoroutinefunction(self.on_startup) \
+ else self.on_startup()
+ except:
+ await send({'type': 'lifespan.startup.failed'})
+ return
+ await send({'type': 'lifespan.startup.complete'})
+ elif event['type'] == 'lifespan.shutdown':
+ if self.on_shutdown:
+ try:
+ await self.on_shutdown() \
+ if asyncio.iscoroutinefunction(self.on_shutdown) \
+ else self.on_shutdown()
+ except:
+ await send({'type': 'lifespan.shutdown.failed'})
+ return
+ await send({'type': 'lifespan.shutdown.complete'})
+ return
+
+ async def not_found(self, receive, send):
+ """Return a 404 Not Found error to the client."""
+ await send({'type': 'http.response.start',
+ 'status': 404,
+ 'headers': [(b'Content-Type', b'text/plain')]})
+ await send({'type': 'http.response.body',
+ 'body': b'Not Found'})
+
+ def _ensure_trailing_slash(self, path):
+ if not path.endswith('/'):
+ path += '/'
+ return path
+
+
+async def translate_request(scope, receive, send):
+ class AwaitablePayload: # pragma: no cover
+ def __init__(self, payload):
+ self.payload = payload or b''
+
+ async def read(self, length=None):
+ if length is None:
+ r = self.payload
+ self.payload = b''
+ else:
+ r = self.payload[:length]
+ self.payload = self.payload[length:]
+ return r
+
+ event = await receive()
+ payload = b''
+ if event['type'] == 'http.request':
+ payload += event.get('body') or b''
+ while event.get('more_body'):
+ event = await receive()
+ if event['type'] == 'http.request':
+ payload += event.get('body') or b''
+ elif event['type'] == 'websocket.connect':
+ pass
+ else:
+ return {}
+
+ raw_uri = scope['path']
+ query_string = ''
+ if 'query_string' in scope and scope['query_string']:
+ try:
+ query_string = scope['query_string'].decode('utf-8')
+ except UnicodeDecodeError:
+ pass
+ else:
+ raw_uri += '?' + query_string
+ environ = {
+ 'wsgi.input': AwaitablePayload(payload),
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.version': (1, 0),
+ 'wsgi.async': True,
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'SERVER_SOFTWARE': 'asgi',
+ 'REQUEST_METHOD': scope.get('method', 'GET'),
+ 'PATH_INFO': scope['path'],
+ 'QUERY_STRING': query_string,
+ 'RAW_URI': raw_uri,
+ 'SCRIPT_NAME': '',
+ 'SERVER_PROTOCOL': 'HTTP/1.1',
+ 'REMOTE_ADDR': '127.0.0.1',
+ 'REMOTE_PORT': '0',
+ 'SERVER_NAME': 'asgi',
+ 'SERVER_PORT': '0',
+ 'asgi.receive': receive,
+ 'asgi.send': send,
+ 'asgi.scope': scope,
+ }
+
+ for hdr_name, hdr_value in scope['headers']:
+ try:
+ hdr_name = hdr_name.upper().decode('utf-8')
+ hdr_value = hdr_value.decode('utf-8')
+ except UnicodeDecodeError:
+ # skip header if it cannot be decoded
+ continue
+ if hdr_name == 'CONTENT-TYPE':
+ environ['CONTENT_TYPE'] = hdr_value
+ continue
+ elif hdr_name == 'CONTENT-LENGTH':
+ environ['CONTENT_LENGTH'] = hdr_value
+ continue
+
+ key = 'HTTP_%s' % hdr_name.replace('-', '_')
+ if key in environ:
+ hdr_value = f'{environ[key]},{hdr_value}'
+
+ environ[key] = hdr_value
+
+ environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
+ return environ
+
+
+async def make_response(status, headers, payload, environ):
+ headers = [(h[0].encode('utf-8'), h[1].encode('utf-8')) for h in headers]
+ if environ['asgi.scope']['type'] == 'websocket':
+ if status.startswith('200 '):
+ await environ['asgi.send']({'type': 'websocket.accept',
+ 'headers': headers})
+ else:
+ if payload:
+ reason = payload.decode('utf-8') \
+ if isinstance(payload, bytes) else str(payload)
+ await environ['asgi.send']({'type': 'websocket.close',
+ 'reason': reason})
+ else:
+ await environ['asgi.send']({'type': 'websocket.close'})
+ return
+
+ await environ['asgi.send']({'type': 'http.response.start',
+ 'status': int(status.split(' ')[0]),
+ 'headers': headers})
+ await environ['asgi.send']({'type': 'http.response.body',
+ 'body': payload})
+
+
+class WebSocket: # pragma: no cover
+ """
+ This wrapper class provides an asgi WebSocket interface that is
+ somewhat compatible with eventlet's implementation.
+ """
+ def __init__(self, handler, server):
+ self.handler = handler
+ self.asgi_receive = None
+ self.asgi_send = None
+
+ async def __call__(self, environ):
+ self.asgi_receive = environ['asgi.receive']
+ self.asgi_send = environ['asgi.send']
+ await self.asgi_send({'type': 'websocket.accept'})
+ await self.handler(self)
+ return '' # send nothing as response
+
+ async def close(self):
+ try:
+ await self.asgi_send({'type': 'websocket.close'})
+ except Exception:
+ # if the socket is already close we don't care
+ pass
+
+ async def send(self, message):
+ msg_bytes = None
+ msg_text = None
+ if isinstance(message, bytes):
+ msg_bytes = message
+ else:
+ msg_text = message
+ await self.asgi_send({'type': 'websocket.send',
+ 'bytes': msg_bytes,
+ 'text': msg_text})
+
+ async def wait(self):
+ event = await self.asgi_receive()
+ if event['type'] != 'websocket.receive':
+ raise OSError()
+ if event.get('bytes', None) is not None:
+ return event['bytes']
+ elif event.get('text', None) is not None:
+ return event['text']
+ else: # pragma: no cover
+ raise OSError()
+
+
+_async = {
+ 'asyncio': True,
+ 'translate_request': translate_request,
+ 'make_response': make_response,
+ 'websocket': WebSocket,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/eventlet.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/eventlet.py
new file mode 100644
index 0000000..6361c4d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/eventlet.py
@@ -0,0 +1,52 @@
+from eventlet.green.threading import Event
+from eventlet import queue, sleep, spawn
+from eventlet.websocket import WebSocketWSGI as _WebSocketWSGI
+
+
+class EventletThread: # pragma: no cover
+ """Thread class that uses eventlet green threads.
+
+ Eventlet's own Thread class has a strange bug that causes _DummyThread
+ objects to be created and leaked, since they are never garbage collected.
+ """
+ def __init__(self, target, args=None, kwargs=None):
+ self.target = target
+ self.args = args or ()
+ self.kwargs = kwargs or {}
+ self.g = None
+
+ def start(self):
+ self.g = spawn(self.target, *self.args, **self.kwargs)
+
+ def join(self):
+ if self.g:
+ return self.g.wait()
+
+
+class WebSocketWSGI(_WebSocketWSGI): # pragma: no cover
+ def __init__(self, handler, server):
+ try:
+ super().__init__(
+ handler, max_frame_length=int(server.max_http_buffer_size))
+ except TypeError: # pragma: no cover
+ # older versions of eventlet do not support a max frame size
+ super().__init__(handler)
+ self._sock = None
+
+ def __call__(self, environ, start_response):
+ if 'eventlet.input' not in environ:
+ raise RuntimeError('You need to use the eventlet server. '
+ 'See the Deployment section of the '
+ 'documentation for more information.')
+ self._sock = environ['eventlet.input'].get_socket()
+ return super().__call__(environ, start_response)
+
+
+_async = {
+ 'thread': EventletThread,
+ 'queue': queue.Queue,
+ 'queue_empty': queue.Empty,
+ 'event': Event,
+ 'websocket': WebSocketWSGI,
+ 'sleep': sleep,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/gevent.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/gevent.py
new file mode 100644
index 0000000..db284a5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/gevent.py
@@ -0,0 +1,83 @@
+import gevent
+from gevent import queue
+from gevent.event import Event
+try:
+ # use gevent-websocket if installed
+ import geventwebsocket # noqa
+ SimpleWebSocketWSGI = None
+except ImportError: # pragma: no cover
+ # fallback to simple_websocket when gevent-websocket is not installed
+ from engineio.async_drivers._websocket_wsgi import SimpleWebSocketWSGI
+
+
+class Thread(gevent.Greenlet): # pragma: no cover
+ """
+ This wrapper class provides gevent Greenlet interface that is compatible
+ with the standard library's Thread class.
+ """
+ def __init__(self, target, args=[], kwargs={}):
+ super().__init__(target, *args, **kwargs)
+
+ def _run(self):
+ return self.run()
+
+
+if SimpleWebSocketWSGI is not None:
+ class WebSocketWSGI(SimpleWebSocketWSGI): # pragma: no cover
+ """
+ This wrapper class provides a gevent WebSocket interface that is
+ compatible with eventlet's implementation, using the simple-websocket
+ package.
+ """
+ def __init__(self, handler, server):
+ # to avoid the requirement that the standard library is
+ # monkey-patched, here we pass the gevent versions of the
+ # concurrency and networking classes required by simple-websocket
+ import gevent.event
+ import gevent.selectors
+ super().__init__(handler, server,
+ thread_class=Thread,
+ event_class=gevent.event.Event,
+ selector_class=gevent.selectors.DefaultSelector)
+else:
+ class WebSocketWSGI: # pragma: no cover
+ """
+ This wrapper class provides a gevent WebSocket interface that is
+ compatible with eventlet's implementation, using the gevent-websocket
+ package.
+ """
+ def __init__(self, handler, server):
+ self.app = handler
+
+ def __call__(self, environ, start_response):
+ if 'wsgi.websocket' not in environ:
+ raise RuntimeError('The gevent-websocket server is not '
+ 'configured appropriately. '
+ 'See the Deployment section of the '
+ 'documentation for more information.')
+ self._sock = environ['wsgi.websocket']
+ self.environ = environ
+ self.version = self._sock.version
+ self.path = self._sock.path
+ self.origin = self._sock.origin
+ self.protocol = self._sock.protocol
+ return self.app(self)
+
+ def close(self):
+ return self._sock.close()
+
+ def send(self, message):
+ return self._sock.send(message)
+
+ def wait(self):
+ return self._sock.receive()
+
+
+_async = {
+ 'thread': Thread,
+ 'queue': queue.JoinableQueue,
+ 'queue_empty': queue.Empty,
+ 'event': Event,
+ 'websocket': WebSocketWSGI,
+ 'sleep': gevent.sleep,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/gevent_uwsgi.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/gevent_uwsgi.py
new file mode 100644
index 0000000..b5ccefc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/gevent_uwsgi.py
@@ -0,0 +1,168 @@
+import gevent
+from gevent import queue
+from gevent.event import Event
+from gevent import selectors
+import uwsgi
+_websocket_available = hasattr(uwsgi, 'websocket_handshake')
+
+
+class Thread(gevent.Greenlet): # pragma: no cover
+ """
+ This wrapper class provides gevent Greenlet interface that is compatible
+ with the standard library's Thread class.
+ """
+ def __init__(self, target, args=[], kwargs={}):
+ super().__init__(target, *args, **kwargs)
+
+ def _run(self):
+ return self.run()
+
+
+class uWSGIWebSocket: # pragma: no cover
+ """
+ This wrapper class provides a uWSGI WebSocket interface that is
+ compatible with eventlet's implementation.
+ """
+ def __init__(self, handler, server):
+ self.app = handler
+ self._sock = None
+ self.received_messages = []
+
+ def __call__(self, environ, start_response):
+ self._sock = uwsgi.connection_fd()
+ self.environ = environ
+
+ uwsgi.websocket_handshake()
+
+ self._req_ctx = None
+ if hasattr(uwsgi, 'request_context'):
+ # uWSGI >= 2.1.x with support for api access across-greenlets
+ self._req_ctx = uwsgi.request_context()
+ else:
+ # use event and queue for sending messages
+ self._event = Event()
+ self._send_queue = queue.Queue()
+
+ # spawn a select greenlet
+ def select_greenlet_runner(fd, event):
+ """Sets event when data becomes available to read on fd."""
+ sel = selectors.DefaultSelector()
+ sel.register(fd, selectors.EVENT_READ)
+ try:
+ while True:
+ sel.select()
+ event.set()
+ except gevent.GreenletExit:
+ sel.unregister(fd)
+ self._select_greenlet = gevent.spawn(
+ select_greenlet_runner,
+ self._sock,
+ self._event)
+
+ self.app(self)
+ uwsgi.disconnect()
+ return '' # send nothing as response
+
+ def close(self):
+ """Disconnects uWSGI from the client."""
+ if self._req_ctx is None:
+ # better kill it here in case wait() is not called again
+ self._select_greenlet.kill()
+ self._event.set()
+
+ def _send(self, msg):
+ """Transmits message either in binary or UTF-8 text mode,
+ depending on its type."""
+ if isinstance(msg, bytes):
+ method = uwsgi.websocket_send_binary
+ else:
+ method = uwsgi.websocket_send
+ if self._req_ctx is not None:
+ method(msg, request_context=self._req_ctx)
+ else:
+ method(msg)
+
+ def _decode_received(self, msg):
+ """Returns either bytes or str, depending on message type."""
+ if not isinstance(msg, bytes):
+ # already decoded - do nothing
+ return msg
+ # only decode from utf-8 if message is not binary data
+ type = ord(msg[0:1])
+ if type >= 48: # no binary
+ return msg.decode('utf-8')
+ # binary message, don't try to decode
+ return msg
+
+ def send(self, msg):
+ """Queues a message for sending. Real transmission is done in
+ wait method.
+ Sends directly if uWSGI version is new enough."""
+ if self._req_ctx is not None:
+ self._send(msg)
+ else:
+ self._send_queue.put(msg)
+ self._event.set()
+
+ def wait(self):
+ """Waits and returns received messages.
+ If running in compatibility mode for older uWSGI versions,
+ it also sends messages that have been queued by send().
+ A return value of None means that connection was closed.
+ This must be called repeatedly. For uWSGI < 2.1.x it must
+ be called from the main greenlet."""
+ while True:
+ if self._req_ctx is not None:
+ try:
+ msg = uwsgi.websocket_recv(request_context=self._req_ctx)
+ except OSError: # connection closed
+ self.close()
+ return None
+ return self._decode_received(msg)
+ else:
+ if self.received_messages:
+ return self.received_messages.pop(0)
+
+ # we wake up at least every 3 seconds to let uWSGI
+ # do its ping/ponging
+ event_set = self._event.wait(timeout=3)
+ if event_set:
+ self._event.clear()
+ # maybe there is something to send
+ msgs = []
+ while True:
+ try:
+ msgs.append(self._send_queue.get(block=False))
+ except gevent.queue.Empty:
+ break
+ for msg in msgs:
+ try:
+ self._send(msg)
+ except OSError:
+ self.close()
+ return None
+ # maybe there is something to receive, if not, at least
+ # ensure uWSGI does its ping/ponging
+ while True:
+ try:
+ msg = uwsgi.websocket_recv_nb()
+ except OSError: # connection closed
+ self.close()
+ return None
+ if msg: # message available
+ self.received_messages.append(
+ self._decode_received(msg))
+ else:
+ break
+ if self.received_messages:
+ return self.received_messages.pop(0)
+
+
+_async = {
+ 'thread': Thread,
+ 'queue': queue.JoinableQueue,
+ 'queue_empty': queue.Empty,
+ 'event': Event,
+ 'websocket': uWSGIWebSocket if _websocket_available else None,
+ 'sleep': gevent.sleep,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/sanic.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/sanic.py
new file mode 100644
index 0000000..4d6a5b8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/sanic.py
@@ -0,0 +1,148 @@
+import sys
+from urllib.parse import urlsplit
+
+try: # pragma: no cover
+ from sanic.response import HTTPResponse
+ try:
+ from sanic.server.protocols.websocket_protocol import WebSocketProtocol
+ except ImportError:
+ from sanic.websocket import WebSocketProtocol
+except ImportError:
+ HTTPResponse = None
+ WebSocketProtocol = None
+
+
+def create_route(app, engineio_server, engineio_endpoint): # pragma: no cover
+ """This function sets up the engine.io endpoint as a route for the
+ application.
+
+ Note that both GET and POST requests must be hooked up on the engine.io
+ endpoint.
+ """
+ app.add_route(engineio_server.handle_request, engineio_endpoint,
+ methods=['GET', 'POST', 'OPTIONS'])
+ try:
+ app.enable_websocket()
+ except AttributeError:
+ # ignore, this version does not support websocket
+ pass
+
+
+def translate_request(request): # pragma: no cover
+ """This function takes the arguments passed to the request handler and
+ uses them to generate a WSGI compatible environ dictionary.
+ """
+ class AwaitablePayload:
+ def __init__(self, payload):
+ self.payload = payload or b''
+
+ async def read(self, length=None):
+ if length is None:
+ r = self.payload
+ self.payload = b''
+ else:
+ r = self.payload[:length]
+ self.payload = self.payload[length:]
+ return r
+
+ uri_parts = urlsplit(request.url)
+ environ = {
+ 'wsgi.input': AwaitablePayload(request.body),
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.version': (1, 0),
+ 'wsgi.async': True,
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'SERVER_SOFTWARE': 'sanic',
+ 'REQUEST_METHOD': request.method,
+ 'QUERY_STRING': uri_parts.query or '',
+ 'RAW_URI': request.url,
+ 'SERVER_PROTOCOL': 'HTTP/' + request.version,
+ 'REMOTE_ADDR': '127.0.0.1',
+ 'REMOTE_PORT': '0',
+ 'SERVER_NAME': 'sanic',
+ 'SERVER_PORT': '0',
+ 'sanic.request': request
+ }
+
+ for hdr_name, hdr_value in request.headers.items():
+ hdr_name = hdr_name.upper()
+ if hdr_name == 'CONTENT-TYPE':
+ environ['CONTENT_TYPE'] = hdr_value
+ continue
+ elif hdr_name == 'CONTENT-LENGTH':
+ environ['CONTENT_LENGTH'] = hdr_value
+ continue
+
+ key = 'HTTP_%s' % hdr_name.replace('-', '_')
+ if key in environ:
+ hdr_value = f'{environ[key]},{hdr_value}'
+
+ environ[key] = hdr_value
+
+ environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
+
+ path_info = uri_parts.path
+
+ environ['PATH_INFO'] = path_info
+ environ['SCRIPT_NAME'] = ''
+
+ return environ
+
+
+def make_response(status, headers, payload, environ): # pragma: no cover
+ """This function generates an appropriate response object for this async
+ mode.
+ """
+ headers_dict = {}
+ content_type = None
+ for h in headers:
+ if h[0].lower() == 'content-type':
+ content_type = h[1]
+ else:
+ headers_dict[h[0]] = h[1]
+ return HTTPResponse(body=payload, content_type=content_type,
+ status=int(status.split()[0]), headers=headers_dict)
+
+
+class WebSocket: # pragma: no cover
+ """
+ This wrapper class provides a sanic WebSocket interface that is
+ somewhat compatible with eventlet's implementation.
+ """
+ def __init__(self, handler, server):
+ self.handler = handler
+ self.server = server
+ self._sock = None
+
+ async def __call__(self, environ):
+ request = environ['sanic.request']
+ protocol = request.transport.get_protocol()
+ self._sock = await protocol.websocket_handshake(request)
+
+ self.environ = environ
+ await self.handler(self)
+ return self.server._ok()
+
+ async def close(self):
+ await self._sock.close()
+
+ async def send(self, message):
+ await self._sock.send(message)
+
+ async def wait(self):
+ data = await self._sock.recv()
+ if not isinstance(data, bytes) and \
+ not isinstance(data, str):
+ raise OSError()
+ return data
+
+
+_async = {
+ 'asyncio': True,
+ 'create_route': create_route,
+ 'translate_request': translate_request,
+ 'make_response': make_response,
+ 'websocket': WebSocket if WebSocketProtocol else None,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/threading.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/threading.py
new file mode 100644
index 0000000..1615579
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/threading.py
@@ -0,0 +1,19 @@
+import queue
+import threading
+import time
+from engineio.async_drivers._websocket_wsgi import SimpleWebSocketWSGI
+
+
+class DaemonThread(threading.Thread): # pragma: no cover
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs, daemon=True)
+
+
+_async = {
+ 'thread': DaemonThread,
+ 'queue': queue.Queue,
+ 'queue_empty': queue.Empty,
+ 'event': threading.Event,
+ 'websocket': SimpleWebSocketWSGI,
+ 'sleep': time.sleep,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_drivers/tornado.py b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/tornado.py
new file mode 100644
index 0000000..abb1e2b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_drivers/tornado.py
@@ -0,0 +1,182 @@
+import asyncio
+import sys
+from urllib.parse import urlsplit
+from .. import exceptions
+
+import tornado.web
+import tornado.websocket
+
+
+def get_tornado_handler(engineio_server):
+ class Handler(tornado.websocket.WebSocketHandler): # pragma: no cover
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ if isinstance(engineio_server.cors_allowed_origins, str):
+ if engineio_server.cors_allowed_origins == '*':
+ self.allowed_origins = None
+ else:
+ self.allowed_origins = [
+ engineio_server.cors_allowed_origins]
+ else:
+ self.allowed_origins = engineio_server.cors_allowed_origins
+ self.receive_queue = asyncio.Queue()
+
+ async def get(self, *args, **kwargs):
+ if self.request.headers.get('Upgrade', '').lower() == 'websocket':
+ ret = super().get(*args, **kwargs)
+ if asyncio.iscoroutine(ret):
+ await ret
+ else:
+ await engineio_server.handle_request(self)
+
+ async def open(self, *args, **kwargs):
+ # this is the handler for the websocket request
+ asyncio.ensure_future(engineio_server.handle_request(self))
+
+ async def post(self, *args, **kwargs):
+ await engineio_server.handle_request(self)
+
+ async def options(self, *args, **kwargs):
+ await engineio_server.handle_request(self)
+
+ async def on_message(self, message):
+ await self.receive_queue.put(message)
+
+ async def get_next_message(self):
+ return await self.receive_queue.get()
+
+ def on_close(self):
+ self.receive_queue.put_nowait(None)
+
+ def check_origin(self, origin):
+ if self.allowed_origins is None or origin in self.allowed_origins:
+ return True
+ return super().check_origin(origin)
+
+ def get_compression_options(self):
+ # enable compression
+ return {}
+
+ return Handler
+
+
+def translate_request(handler):
+ """This function takes the arguments passed to the request handler and
+ uses them to generate a WSGI compatible environ dictionary.
+ """
+ class AwaitablePayload:
+ def __init__(self, payload):
+ self.payload = payload or b''
+
+ async def read(self, length=None):
+ if length is None:
+ r = self.payload
+ self.payload = b''
+ else:
+ r = self.payload[:length]
+ self.payload = self.payload[length:]
+ return r
+
+ payload = handler.request.body
+
+ uri_parts = urlsplit(handler.request.path)
+ full_uri = handler.request.path
+ if handler.request.query: # pragma: no cover
+ full_uri += '?' + handler.request.query
+ environ = {
+ 'wsgi.input': AwaitablePayload(payload),
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.version': (1, 0),
+ 'wsgi.async': True,
+ 'wsgi.multithread': False,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'SERVER_SOFTWARE': 'aiohttp',
+ 'REQUEST_METHOD': handler.request.method,
+ 'QUERY_STRING': handler.request.query or '',
+ 'RAW_URI': full_uri,
+ 'SERVER_PROTOCOL': 'HTTP/%s' % handler.request.version,
+ 'REMOTE_ADDR': '127.0.0.1',
+ 'REMOTE_PORT': '0',
+ 'SERVER_NAME': 'aiohttp',
+ 'SERVER_PORT': '0',
+ 'tornado.handler': handler
+ }
+
+ for hdr_name, hdr_value in handler.request.headers.items():
+ hdr_name = hdr_name.upper()
+ if hdr_name == 'CONTENT-TYPE':
+ environ['CONTENT_TYPE'] = hdr_value
+ continue
+ elif hdr_name == 'CONTENT-LENGTH':
+ environ['CONTENT_LENGTH'] = hdr_value
+ continue
+
+ key = 'HTTP_%s' % hdr_name.replace('-', '_')
+ environ[key] = hdr_value
+
+ environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
+
+ path_info = uri_parts.path
+
+ environ['PATH_INFO'] = path_info
+ environ['SCRIPT_NAME'] = ''
+
+ return environ
+
+
+def make_response(status, headers, payload, environ):
+ """This function generates an appropriate response object for this async
+ mode.
+ """
+ tornado_handler = environ['tornado.handler']
+ try:
+ tornado_handler.set_status(int(status.split()[0]))
+ except RuntimeError: # pragma: no cover
+ # for websocket connections Tornado does not accept a response, since
+ # it already emitted the 101 status code
+ return
+ for header, value in headers:
+ tornado_handler.set_header(header, value)
+ tornado_handler.write(payload)
+ tornado_handler.finish()
+
+
+class WebSocket: # pragma: no cover
+ """
+ This wrapper class provides a tornado WebSocket interface that is
+ somewhat compatible with eventlet's implementation.
+ """
+ def __init__(self, handler, server):
+ self.handler = handler
+ self.tornado_handler = None
+
+ async def __call__(self, environ):
+ self.tornado_handler = environ['tornado.handler']
+ self.environ = environ
+ await self.handler(self)
+
+ async def close(self):
+ self.tornado_handler.close()
+
+ async def send(self, message):
+ try:
+ self.tornado_handler.write_message(
+ message, binary=isinstance(message, bytes))
+ except tornado.websocket.WebSocketClosedError:
+ raise exceptions.EngineIOError()
+
+ async def wait(self):
+ msg = await self.tornado_handler.get_next_message()
+ if not isinstance(msg, bytes) and \
+ not isinstance(msg, str):
+ raise OSError()
+ return msg
+
+
+_async = {
+ 'asyncio': True,
+ 'translate_request': translate_request,
+ 'make_response': make_response,
+ 'websocket': WebSocket,
+}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_server.py b/tapdown/lib/python3.11/site-packages/engineio/async_server.py
new file mode 100644
index 0000000..c417067
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_server.py
@@ -0,0 +1,611 @@
+import asyncio
+import urllib
+
+from . import base_server
+from . import exceptions
+from . import packet
+from . import async_socket
+
+# this set is used to keep references to background tasks to prevent them from
+# being garbage collected mid-execution. Solution taken from
+# https://docs.python.org/3/library/asyncio-task.html#asyncio.create_task
+task_reference_holder = set()
+
+
+class AsyncServer(base_server.BaseServer):
+ """An Engine.IO server for asyncio.
+
+ This class implements a fully compliant Engine.IO web server with support
+ for websocket and long-polling transports, compatible with the asyncio
+ framework on Python 3.5 or newer.
+
+ :param async_mode: The asynchronous model to use. See the Deployment
+ section in the documentation for a description of the
+ available options. Valid async modes are "aiohttp",
+ "sanic", "tornado" and "asgi". If this argument is not
+ given, "aiohttp" is tried first, followed by "sanic",
+ "tornado", and finally "asgi". The first async mode that
+ has all its dependencies installed is the one that is
+ chosen.
+ :param ping_interval: The interval in seconds at which the server pings
+ the client. The default is 25 seconds. For advanced
+ control, a two element tuple can be given, where
+ the first number is the ping interval and the second
+ is a grace period added by the server.
+ :param ping_timeout: The time in seconds that the client waits for the
+ server to respond before disconnecting. The default
+ is 20 seconds.
+ :param max_http_buffer_size: The maximum size that is accepted for incoming
+ messages. The default is 1,000,000 bytes. In
+ spite of its name, the value set in this
+ argument is enforced for HTTP long-polling and
+ WebSocket connections.
+ :param allow_upgrades: Whether to allow transport upgrades or not.
+ :param http_compression: Whether to compress packages when using the
+ polling transport.
+ :param compression_threshold: Only compress messages when their byte size
+ is greater than this value.
+ :param cookie: If set to a string, it is the name of the HTTP cookie the
+ server sends back tot he client containing the client
+ session id. If set to a dictionary, the ``'name'`` key
+ contains the cookie name and other keys define cookie
+ attributes, where the value of each attribute can be a
+ string, a callable with no arguments, or a boolean. If set
+ to ``None`` (the default), a cookie is not sent to the
+ client.
+ :param cors_allowed_origins: Origin or list of origins that are allowed to
+ connect to this server. Only the same origin
+ is allowed by default. Set this argument to
+ ``'*'`` or ``['*']`` to allow all origins, or
+ to ``[]`` to disable CORS handling.
+ :param cors_credentials: Whether credentials (cookies, authentication) are
+ allowed in requests to this server.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. Note that fatal
+ errors are logged even when ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param async_handlers: If set to ``True``, run message event handlers in
+ non-blocking threads. To run handlers synchronously,
+ set to ``False``. The default is ``True``.
+ :param monitor_clients: If set to ``True``, a background task will ensure
+ inactive clients are closed. Set to ``False`` to
+ disable the monitoring task (not recommended). The
+ default is ``True``.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. Defaults to
+ ``['polling', 'websocket']``.
+ :param kwargs: Reserved for future extensions, any additional parameters
+ given as keyword arguments will be silently ignored.
+ """
+ def is_asyncio_based(self):
+ return True
+
+ def async_modes(self):
+ return ['aiohttp', 'sanic', 'tornado', 'asgi']
+
+ def attach(self, app, engineio_path='engine.io'):
+ """Attach the Engine.IO server to an application."""
+ engineio_path = engineio_path.strip('/')
+ self._async['create_route'](app, self, f'/{engineio_path}/')
+
+ async def send(self, sid, data):
+ """Send a message to a client.
+
+ :param sid: The session id of the recipient client.
+ :param data: The data to send to the client. Data can be of type
+ ``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
+ or ``dict``, the data will be serialized as JSON.
+
+ Note: this method is a coroutine.
+ """
+ await self.send_packet(sid, packet.Packet(packet.MESSAGE, data=data))
+
+ async def send_packet(self, sid, pkt):
+ """Send a raw packet to a client.
+
+ :param sid: The session id of the recipient client.
+ :param pkt: The packet to send to the client.
+
+ Note: this method is a coroutine.
+ """
+ try:
+ socket = self._get_socket(sid)
+ except KeyError:
+ # the socket is not available
+ self.logger.warning('Cannot send to sid %s', sid)
+ return
+ await socket.send(pkt)
+
+ async def get_session(self, sid):
+ """Return the user session for a client.
+
+ :param sid: The session id of the client.
+
+ The return value is a dictionary. Modifications made to this
+ dictionary are not guaranteed to be preserved. If you want to modify
+ the user session, use the ``session`` context manager instead.
+ """
+ socket = self._get_socket(sid)
+ return socket.session
+
+ async def save_session(self, sid, session):
+ """Store the user session for a client.
+
+ :param sid: The session id of the client.
+ :param session: The session dictionary.
+ """
+ socket = self._get_socket(sid)
+ socket.session = session
+
+ def session(self, sid):
+ """Return the user session for a client with context manager syntax.
+
+ :param sid: The session id of the client.
+
+ This is a context manager that returns the user session dictionary for
+ the client. Any changes that are made to this dictionary inside the
+ context manager block are saved back to the session. Example usage::
+
+ @eio.on('connect')
+ def on_connect(sid, environ):
+ username = authenticate_user(environ)
+ if not username:
+ return False
+ with eio.session(sid) as session:
+ session['username'] = username
+
+ @eio.on('message')
+ def on_message(sid, msg):
+ async with eio.session(sid) as session:
+ print('received message from ', session['username'])
+ """
+ class _session_context_manager:
+ def __init__(self, server, sid):
+ self.server = server
+ self.sid = sid
+ self.session = None
+
+ async def __aenter__(self):
+ self.session = await self.server.get_session(sid)
+ return self.session
+
+ async def __aexit__(self, *args):
+ await self.server.save_session(sid, self.session)
+
+ return _session_context_manager(self, sid)
+
+ async def disconnect(self, sid=None):
+ """Disconnect a client.
+
+ :param sid: The session id of the client to close. If this parameter
+ is not given, then all clients are closed.
+
+ Note: this method is a coroutine.
+ """
+ if sid is not None:
+ try:
+ socket = self._get_socket(sid)
+ except KeyError: # pragma: no cover
+ # the socket was already closed or gone
+ pass
+ else:
+ await socket.close(reason=self.reason.SERVER_DISCONNECT)
+ if sid in self.sockets: # pragma: no cover
+ del self.sockets[sid]
+ else:
+ await asyncio.wait([
+ asyncio.create_task(client.close(
+ reason=self.reason.SERVER_DISCONNECT))
+ for client in self.sockets.values()
+ ])
+ self.sockets = {}
+
+ async def handle_request(self, *args, **kwargs):
+ """Handle an HTTP request from the client.
+
+ This is the entry point of the Engine.IO application. This function
+ returns the HTTP response to deliver to the client.
+
+ Note: this method is a coroutine.
+ """
+ translate_request = self._async['translate_request']
+ if asyncio.iscoroutinefunction(translate_request):
+ environ = await translate_request(*args, **kwargs)
+ else:
+ environ = translate_request(*args, **kwargs)
+
+ if self.cors_allowed_origins != []:
+ # Validate the origin header if present
+ # This is important for WebSocket more than for HTTP, since
+ # browsers only apply CORS controls to HTTP.
+ origin = environ.get('HTTP_ORIGIN')
+ if origin:
+ allowed_origins = self._cors_allowed_origins(environ)
+ if allowed_origins is not None and origin not in \
+ allowed_origins:
+ self._log_error_once(
+ origin + ' is not an accepted origin.', 'bad-origin')
+ return await self._make_response(
+ self._bad_request(
+ origin + ' is not an accepted origin.'),
+ environ)
+
+ method = environ['REQUEST_METHOD']
+ query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
+
+ sid = query['sid'][0] if 'sid' in query else None
+ jsonp = False
+ jsonp_index = None
+
+ # make sure the client uses an allowed transport
+ transport = query.get('transport', ['polling'])[0]
+ if transport not in self.transports:
+ self._log_error_once('Invalid transport', 'bad-transport')
+ return await self._make_response(
+ self._bad_request('Invalid transport'), environ)
+
+ # make sure the client speaks a compatible Engine.IO version
+ sid = query['sid'][0] if 'sid' in query else None
+ if sid is None and query.get('EIO') != ['4']:
+ self._log_error_once(
+ 'The client is using an unsupported version of the Socket.IO '
+ 'or Engine.IO protocols', 'bad-version'
+ )
+ return await self._make_response(self._bad_request(
+ 'The client is using an unsupported version of the Socket.IO '
+ 'or Engine.IO protocols'
+ ), environ)
+
+ if 'j' in query:
+ jsonp = True
+ try:
+ jsonp_index = int(query['j'][0])
+ except (ValueError, KeyError, IndexError):
+ # Invalid JSONP index number
+ pass
+
+ if jsonp and jsonp_index is None:
+ self._log_error_once('Invalid JSONP index number',
+ 'bad-jsonp-index')
+ r = self._bad_request('Invalid JSONP index number')
+ elif method == 'GET':
+ upgrade_header = environ.get('HTTP_UPGRADE').lower() \
+ if 'HTTP_UPGRADE' in environ else None
+ if sid is None:
+ # transport must be one of 'polling' or 'websocket'.
+ # if 'websocket', the HTTP_UPGRADE header must match.
+ if transport == 'polling' \
+ or transport == upgrade_header == 'websocket':
+ r = await self._handle_connect(environ, transport,
+ jsonp_index)
+ else:
+ self._log_error_once('Invalid websocket upgrade',
+ 'bad-upgrade')
+ r = self._bad_request('Invalid websocket upgrade')
+ else:
+ if sid not in self.sockets:
+ self._log_error_once(f'Invalid session {sid}', 'bad-sid')
+ r = self._bad_request(f'Invalid session {sid}')
+ else:
+ try:
+ socket = self._get_socket(sid)
+ except KeyError as e: # pragma: no cover
+ self._log_error_once(f'{e} {sid}', 'bad-sid')
+ r = self._bad_request(f'{e} {sid}')
+ else:
+ if self.transport(sid) != transport and \
+ transport != upgrade_header:
+ self._log_error_once(
+ f'Invalid transport for session {sid}',
+ 'bad-transport')
+ r = self._bad_request('Invalid transport')
+ else:
+ try:
+ packets = await socket.handle_get_request(
+ environ)
+ if isinstance(packets, list):
+ r = self._ok(packets,
+ jsonp_index=jsonp_index)
+ else:
+ r = packets
+ except exceptions.EngineIOError:
+ if sid in self.sockets: # pragma: no cover
+ await self.disconnect(sid)
+ r = self._bad_request()
+ if sid in self.sockets and \
+ self.sockets[sid].closed:
+ del self.sockets[sid]
+ elif method == 'POST':
+ if sid is None or sid not in self.sockets:
+ self._log_error_once(f'Invalid session {sid}', 'bad-sid')
+ r = self._bad_request(f'Invalid session {sid}')
+ else:
+ socket = self._get_socket(sid)
+ try:
+ await socket.handle_post_request(environ)
+ r = self._ok(jsonp_index=jsonp_index)
+ except exceptions.EngineIOError:
+ if sid in self.sockets: # pragma: no cover
+ await self.disconnect(sid)
+ r = self._bad_request()
+ except: # pragma: no cover
+ # for any other unexpected errors, we log the error
+ # and keep going
+ self.logger.exception('post request handler error')
+ r = self._ok(jsonp_index=jsonp_index)
+ elif method == 'OPTIONS':
+ r = self._ok()
+ else:
+ self.logger.warning('Method %s not supported', method)
+ r = self._method_not_found()
+ if not isinstance(r, dict):
+ return r
+ if self.http_compression and \
+ len(r['response']) >= self.compression_threshold:
+ encodings = [e.split(';')[0].strip() for e in
+ environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
+ for encoding in encodings:
+ if encoding in self.compression_methods:
+ r['response'] = \
+ getattr(self, '_' + encoding)(r['response'])
+ r['headers'] += [('Content-Encoding', encoding)]
+ break
+ return await self._make_response(r, environ)
+
+ async def shutdown(self):
+ """Stop Socket.IO background tasks.
+
+ This method stops background activity initiated by the Socket.IO
+ server. It must be called before shutting down the web server.
+ """
+ self.logger.info('Socket.IO is shutting down')
+ if self.service_task_event: # pragma: no cover
+ self.service_task_event.set()
+ await self.service_task_handle
+ self.service_task_handle = None
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ The return value is a ``asyncio.Task`` object.
+ """
+ return asyncio.ensure_future(target(*args, **kwargs))
+
+ async def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+
+ Note: this method is a coroutine.
+ """
+ return await asyncio.sleep(seconds)
+
+ def create_queue(self, *args, **kwargs):
+ """Create a queue object using the appropriate async model.
+
+ This is a utility function that applications can use to create a queue
+ without having to worry about using the correct call for the selected
+ async mode. For asyncio based async modes, this returns an instance of
+ ``asyncio.Queue``.
+ """
+ return asyncio.Queue(*args, **kwargs)
+
+ def get_queue_empty_exception(self):
+ """Return the queue empty exception for the appropriate async model.
+
+ This is a utility function that applications can use to work with a
+ queue without having to worry about using the correct call for the
+ selected async mode. For asyncio based async modes, this returns an
+ instance of ``asyncio.QueueEmpty``.
+ """
+ return asyncio.QueueEmpty
+
+ def create_event(self, *args, **kwargs):
+ """Create an event object using the appropriate async model.
+
+ This is a utility function that applications can use to create an
+ event without having to worry about using the correct call for the
+ selected async mode. For asyncio based async modes, this returns
+ an instance of ``asyncio.Event``.
+ """
+ return asyncio.Event(*args, **kwargs)
+
+ async def _make_response(self, response_dict, environ):
+ cors_headers = self._cors_headers(environ)
+ make_response = self._async['make_response']
+ if asyncio.iscoroutinefunction(make_response):
+ response = await make_response(
+ response_dict['status'],
+ response_dict['headers'] + cors_headers,
+ response_dict['response'], environ)
+ else:
+ response = make_response(
+ response_dict['status'],
+ response_dict['headers'] + cors_headers,
+ response_dict['response'], environ)
+ return response
+
+ async def _handle_connect(self, environ, transport, jsonp_index=None):
+ """Handle a client connection request."""
+ if self.start_service_task:
+ # start the service task to monitor connected clients
+ self.start_service_task = False
+ self.service_task_handle = self.start_background_task(
+ self._service_task)
+
+ sid = self.generate_id()
+ s = async_socket.AsyncSocket(self, sid)
+ self.sockets[sid] = s
+
+ pkt = packet.Packet(packet.OPEN, {
+ 'sid': sid,
+ 'upgrades': self._upgrades(sid, transport),
+ 'pingTimeout': int(self.ping_timeout * 1000),
+ 'pingInterval': int(
+ self.ping_interval + self.ping_interval_grace_period) * 1000,
+ 'maxPayload': self.max_http_buffer_size,
+ })
+ await s.send(pkt)
+ s.schedule_ping()
+
+ ret = await self._trigger_event('connect', sid, environ,
+ run_async=False)
+ if ret is not None and ret is not True:
+ del self.sockets[sid]
+ self.logger.warning('Application rejected connection')
+ return self._unauthorized(ret or None)
+
+ if transport == 'websocket':
+ ret = await s.handle_get_request(environ)
+ if s.closed and sid in self.sockets:
+ # websocket connection ended, so we are done
+ del self.sockets[sid]
+ return ret
+ else:
+ s.connected = True
+ headers = None
+ if self.cookie:
+ if isinstance(self.cookie, dict):
+ headers = [(
+ 'Set-Cookie',
+ self._generate_sid_cookie(sid, self.cookie)
+ )]
+ else:
+ headers = [(
+ 'Set-Cookie',
+ self._generate_sid_cookie(sid, {
+ 'name': self.cookie, 'path': '/', 'SameSite': 'Lax'
+ })
+ )]
+ try:
+ return self._ok(await s.poll(), headers=headers,
+ jsonp_index=jsonp_index)
+ except exceptions.QueueEmpty:
+ return self._bad_request()
+
+ async def _trigger_event(self, event, *args, **kwargs):
+ """Invoke an event handler."""
+ run_async = kwargs.pop('run_async', False)
+ ret = None
+ if event in self.handlers:
+ if asyncio.iscoroutinefunction(self.handlers[event]):
+ async def run_async_handler():
+ try:
+ try:
+ return await self.handlers[event](*args)
+ except TypeError:
+ if event == 'disconnect' and \
+ len(args) == 2: # pragma: no branch
+ # legacy disconnect events do not have a reason
+ # argument
+ return await self.handlers[event](args[0])
+ else: # pragma: no cover
+ raise
+ except asyncio.CancelledError: # pragma: no cover
+ pass
+ except:
+ self.logger.exception(event + ' async handler error')
+ if event == 'connect':
+ # if connect handler raised error we reject the
+ # connection
+ return False
+
+ if run_async:
+ ret = self.start_background_task(run_async_handler)
+ task_reference_holder.add(ret)
+ ret.add_done_callback(task_reference_holder.discard)
+ else:
+ ret = await run_async_handler()
+ else:
+ async def run_sync_handler():
+ try:
+ try:
+ return self.handlers[event](*args)
+ except TypeError:
+ if event == 'disconnect' and \
+ len(args) == 2: # pragma: no branch
+ # legacy disconnect events do not have a reason
+ # argument
+ return self.handlers[event](args[0])
+ else: # pragma: no cover
+ raise
+ except:
+ self.logger.exception(event + ' handler error')
+ if event == 'connect':
+ # if connect handler raised error we reject the
+ # connection
+ return False
+
+ if run_async:
+ ret = self.start_background_task(run_sync_handler)
+ task_reference_holder.add(ret)
+ ret.add_done_callback(task_reference_holder.discard)
+ else:
+ ret = await run_sync_handler()
+ return ret
+
+ async def _service_task(self): # pragma: no cover
+ """Monitor connected clients and clean up those that time out."""
+ loop = asyncio.get_running_loop()
+ self.service_task_event = self.create_event()
+ while not self.service_task_event.is_set():
+ if len(self.sockets) == 0:
+ # nothing to do
+ try:
+ await asyncio.wait_for(self.service_task_event.wait(),
+ timeout=self.ping_timeout)
+ break
+ except asyncio.TimeoutError:
+ continue
+
+ # go through the entire client list in a ping interval cycle
+ sleep_interval = self.ping_timeout / len(self.sockets)
+
+ try:
+ # iterate over the current clients
+ for s in self.sockets.copy().values():
+ if s.closed:
+ try:
+ del self.sockets[s.sid]
+ except KeyError:
+ # the socket could have also been removed by
+ # the _get_socket() method from another thread
+ pass
+ elif not s.closing:
+ await s.check_ping_timeout()
+ try:
+ await asyncio.wait_for(self.service_task_event.wait(),
+ timeout=sleep_interval)
+ raise KeyboardInterrupt()
+ except asyncio.TimeoutError:
+ continue
+ except (
+ SystemExit,
+ KeyboardInterrupt,
+ asyncio.CancelledError,
+ GeneratorExit,
+ ):
+ self.logger.info('service task canceled')
+ break
+ except:
+ if loop.is_closed():
+ self.logger.info('event loop is closed, exiting service '
+ 'task')
+ break
+
+ # an unexpected exception has occurred, log it and continue
+ self.logger.exception('service task exception')
diff --git a/tapdown/lib/python3.11/site-packages/engineio/async_socket.py b/tapdown/lib/python3.11/site-packages/engineio/async_socket.py
new file mode 100644
index 0000000..cfdbe1a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/async_socket.py
@@ -0,0 +1,261 @@
+import asyncio
+import sys
+import time
+
+from . import base_socket
+from . import exceptions
+from . import packet
+from . import payload
+
+
+class AsyncSocket(base_socket.BaseSocket):
+ async def poll(self):
+ """Wait for packets to send to the client."""
+ try:
+ packets = [await asyncio.wait_for(
+ self.queue.get(),
+ self.server.ping_interval + self.server.ping_timeout)]
+ self.queue.task_done()
+ except (asyncio.TimeoutError, asyncio.CancelledError):
+ raise exceptions.QueueEmpty()
+ if packets == [None]:
+ return []
+ while True:
+ try:
+ pkt = self.queue.get_nowait()
+ self.queue.task_done()
+ if pkt is None:
+ self.queue.put_nowait(None)
+ break
+ packets.append(pkt)
+ except asyncio.QueueEmpty:
+ break
+ return packets
+
+ async def receive(self, pkt):
+ """Receive packet from the client."""
+ self.server.logger.info('%s: Received packet %s data %s',
+ self.sid, packet.packet_names[pkt.packet_type],
+ pkt.data if not isinstance(pkt.data, bytes)
+ else '')
+ if pkt.packet_type == packet.PONG:
+ self.schedule_ping()
+ elif pkt.packet_type == packet.MESSAGE:
+ await self.server._trigger_event(
+ 'message', self.sid, pkt.data,
+ run_async=self.server.async_handlers)
+ elif pkt.packet_type == packet.UPGRADE:
+ await self.send(packet.Packet(packet.NOOP))
+ elif pkt.packet_type == packet.CLOSE:
+ await self.close(wait=False, abort=True,
+ reason=self.server.reason.CLIENT_DISCONNECT)
+ else:
+ raise exceptions.UnknownPacketError()
+
+ async def check_ping_timeout(self):
+ """Make sure the client is still sending pings."""
+ if self.closed:
+ raise exceptions.SocketIsClosedError()
+ if self.last_ping and \
+ time.time() - self.last_ping > self.server.ping_timeout:
+ self.server.logger.info('%s: Client is gone, closing socket',
+ self.sid)
+ # Passing abort=False here will cause close() to write a
+ # CLOSE packet. This has the effect of updating half-open sockets
+ # to their correct state of disconnected
+ await self.close(wait=False, abort=False,
+ reason=self.server.reason.PING_TIMEOUT)
+ return False
+ return True
+
+ async def send(self, pkt):
+ """Send a packet to the client."""
+ if not await self.check_ping_timeout():
+ return
+ else:
+ await self.queue.put(pkt)
+ self.server.logger.info('%s: Sending packet %s data %s',
+ self.sid, packet.packet_names[pkt.packet_type],
+ pkt.data if not isinstance(pkt.data, bytes)
+ else '')
+
+ async def handle_get_request(self, environ):
+ """Handle a long-polling GET request from the client."""
+ connections = [
+ s.strip()
+ for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
+ transport = environ.get('HTTP_UPGRADE', '').lower()
+ if 'upgrade' in connections and transport in self.upgrade_protocols:
+ self.server.logger.info('%s: Received request to upgrade to %s',
+ self.sid, transport)
+ return await getattr(self, '_upgrade_' + transport)(environ)
+ if self.upgrading or self.upgraded:
+ # we are upgrading to WebSocket, do not return any more packets
+ # through the polling endpoint
+ return [packet.Packet(packet.NOOP)]
+ try:
+ packets = await self.poll()
+ except exceptions.QueueEmpty:
+ exc = sys.exc_info()
+ await self.close(wait=False,
+ reason=self.server.reason.TRANSPORT_ERROR)
+ raise exc[1].with_traceback(exc[2])
+ return packets
+
+ async def handle_post_request(self, environ):
+ """Handle a long-polling POST request from the client."""
+ length = int(environ.get('CONTENT_LENGTH', '0'))
+ if length > self.server.max_http_buffer_size:
+ raise exceptions.ContentTooLongError()
+ else:
+ body = (await environ['wsgi.input'].read(length)).decode('utf-8')
+ p = payload.Payload(encoded_payload=body)
+ for pkt in p.packets:
+ await self.receive(pkt)
+
+ async def close(self, wait=True, abort=False, reason=None):
+ """Close the socket connection."""
+ if not self.closed and not self.closing:
+ self.closing = True
+ await self.server._trigger_event(
+ 'disconnect', self.sid,
+ reason or self.server.reason.SERVER_DISCONNECT,
+ run_async=False)
+ if not abort:
+ await self.send(packet.Packet(packet.CLOSE))
+ self.closed = True
+ if wait:
+ await self.queue.join()
+
+ def schedule_ping(self):
+ self.server.start_background_task(self._send_ping)
+
+ async def _send_ping(self):
+ self.last_ping = None
+ await asyncio.sleep(self.server.ping_interval)
+ if not self.closing and not self.closed:
+ self.last_ping = time.time()
+ await self.send(packet.Packet(packet.PING))
+
+ async def _upgrade_websocket(self, environ):
+ """Upgrade the connection from polling to websocket."""
+ if self.upgraded:
+ raise OSError('Socket has been upgraded already')
+ if self.server._async['websocket'] is None:
+ # the selected async mode does not support websocket
+ return self.server._bad_request()
+ ws = self.server._async['websocket'](
+ self._websocket_handler, self.server)
+ return await ws(environ)
+
+ async def _websocket_handler(self, ws):
+ """Engine.IO handler for websocket transport."""
+ async def websocket_wait():
+ data = await ws.wait()
+ if data and len(data) > self.server.max_http_buffer_size:
+ raise ValueError('packet is too large')
+ return data
+
+ if self.connected:
+ # the socket was already connected, so this is an upgrade
+ self.upgrading = True # hold packet sends during the upgrade
+
+ try:
+ pkt = await websocket_wait()
+ except OSError: # pragma: no cover
+ return
+ decoded_pkt = packet.Packet(encoded_packet=pkt)
+ if decoded_pkt.packet_type != packet.PING or \
+ decoded_pkt.data != 'probe':
+ self.server.logger.info(
+ '%s: Failed websocket upgrade, no PING packet', self.sid)
+ self.upgrading = False
+ return
+ await ws.send(packet.Packet(packet.PONG, data='probe').encode())
+ await self.queue.put(packet.Packet(packet.NOOP)) # end poll
+
+ try:
+ pkt = await websocket_wait()
+ except OSError: # pragma: no cover
+ self.upgrading = False
+ return
+ decoded_pkt = packet.Packet(encoded_packet=pkt)
+ if decoded_pkt.packet_type != packet.UPGRADE:
+ self.upgraded = False
+ self.server.logger.info(
+ ('%s: Failed websocket upgrade, expected UPGRADE packet, '
+ 'received %s instead.'),
+ self.sid, pkt)
+ self.upgrading = False
+ return
+ self.upgraded = True
+ self.upgrading = False
+ else:
+ self.connected = True
+ self.upgraded = True
+
+ # start separate writer thread
+ async def writer():
+ while True:
+ packets = None
+ try:
+ packets = await self.poll()
+ except exceptions.QueueEmpty:
+ break
+ if not packets:
+ # empty packet list returned -> connection closed
+ break
+ try:
+ for pkt in packets:
+ await ws.send(pkt.encode())
+ except:
+ break
+ await ws.close()
+
+ writer_task = asyncio.ensure_future(writer())
+
+ self.server.logger.info(
+ '%s: Upgrade to websocket successful', self.sid)
+
+ while True:
+ p = None
+ wait_task = asyncio.ensure_future(websocket_wait())
+ try:
+ p = await asyncio.wait_for(
+ wait_task,
+ self.server.ping_interval + self.server.ping_timeout)
+ except asyncio.CancelledError: # pragma: no cover
+ # there is a bug (https://bugs.python.org/issue30508) in
+ # asyncio that causes a "Task exception never retrieved" error
+ # to appear when wait_task raises an exception before it gets
+ # cancelled. Calling wait_task.exception() prevents the error
+ # from being issued in Python 3.6, but causes other errors in
+ # other versions, so we run it with all errors suppressed and
+ # hope for the best.
+ try:
+ wait_task.exception()
+ except:
+ pass
+ break
+ except:
+ break
+ if p is None:
+ # connection closed by client
+ break
+ pkt = packet.Packet(encoded_packet=p)
+ try:
+ await self.receive(pkt)
+ except exceptions.UnknownPacketError: # pragma: no cover
+ pass
+ except exceptions.SocketIsClosedError: # pragma: no cover
+ self.server.logger.info('Receive error -- socket is closed')
+ break
+ except: # pragma: no cover
+ # if we get an unexpected exception we log the error and exit
+ # the connection properly
+ self.server.logger.exception('Unknown receive error')
+
+ await self.queue.put(None) # unlock the writer task so it can exit
+ await asyncio.wait_for(writer_task, timeout=None)
+ await self.close(wait=False, abort=True,
+ reason=self.server.reason.TRANSPORT_CLOSE)
diff --git a/tapdown/lib/python3.11/site-packages/engineio/base_client.py b/tapdown/lib/python3.11/site-packages/engineio/base_client.py
new file mode 100644
index 0000000..01a42c5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/base_client.py
@@ -0,0 +1,169 @@
+import logging
+import signal
+import threading
+import time
+import urllib
+from . import packet
+
+default_logger = logging.getLogger('engineio.client')
+connected_clients = []
+
+
+def signal_handler(sig, frame):
+ """SIGINT handler.
+
+ Disconnect all active clients and then invoke the original signal handler.
+ """
+ for client in connected_clients[:]:
+ if not client.is_asyncio_based():
+ client.disconnect()
+ if callable(original_signal_handler):
+ return original_signal_handler(sig, frame)
+ else: # pragma: no cover
+ # Handle case where no original SIGINT handler was present.
+ return signal.default_int_handler(sig, frame)
+
+
+original_signal_handler = None
+
+
+class BaseClient:
+ event_names = ['connect', 'disconnect', 'message']
+
+ class reason:
+ """Disconnection reasons."""
+ #: Client-initiated disconnection.
+ CLIENT_DISCONNECT = 'client disconnect'
+ #: Server-initiated disconnection.
+ SERVER_DISCONNECT = 'server disconnect'
+ #: Transport error.
+ TRANSPORT_ERROR = 'transport error'
+
+ def __init__(self, logger=False, json=None, request_timeout=5,
+ http_session=None, ssl_verify=True, handle_sigint=True,
+ websocket_extra_options=None, timestamp_requests=True):
+ global original_signal_handler
+ if handle_sigint and original_signal_handler is None and \
+ threading.current_thread() == threading.main_thread():
+ original_signal_handler = signal.signal(signal.SIGINT,
+ signal_handler)
+ self.handlers = {}
+ self.base_url = None
+ self.transports = None
+ self.current_transport = None
+ self.sid = None
+ self.upgrades = None
+ self.ping_interval = None
+ self.ping_timeout = None
+ self.http = http_session
+ self.external_http = http_session is not None
+ self.handle_sigint = handle_sigint
+ self.ws = None
+ self.read_loop_task = None
+ self.write_loop_task = None
+ self.queue = self.create_queue()
+ self.queue_empty = self.get_queue_empty_exception()
+ self.state = 'disconnected'
+ self.ssl_verify = ssl_verify
+ self.websocket_extra_options = websocket_extra_options or {}
+ self.timestamp_requests = timestamp_requests
+
+ if json is not None:
+ packet.Packet.json = json
+ if not isinstance(logger, bool):
+ self.logger = logger
+ else:
+ self.logger = default_logger
+ if self.logger.level == logging.NOTSET:
+ if logger:
+ self.logger.setLevel(logging.INFO)
+ else:
+ self.logger.setLevel(logging.ERROR)
+ self.logger.addHandler(logging.StreamHandler())
+
+ self.request_timeout = request_timeout
+
+ def is_asyncio_based(self):
+ return False
+
+ def on(self, event, handler=None):
+ """Register an event handler.
+
+ :param event: The event name. Can be ``'connect'``, ``'message'`` or
+ ``'disconnect'``.
+ :param handler: The function that should be invoked to handle the
+ event. When this parameter is not given, the method
+ acts as a decorator for the handler function.
+
+ Example usage::
+
+ # as a decorator:
+ @eio.on('connect')
+ def connect_handler():
+ print('Connection request')
+
+ # as a method:
+ def message_handler(msg):
+ print('Received message: ', msg)
+ eio.send('response')
+ eio.on('message', message_handler)
+ """
+ if event not in self.event_names:
+ raise ValueError('Invalid event')
+
+ def set_handler(handler):
+ self.handlers[event] = handler
+ return handler
+
+ if handler is None:
+ return set_handler
+ set_handler(handler)
+
+ def transport(self):
+ """Return the name of the transport currently in use.
+
+ The possible values returned by this function are ``'polling'`` and
+ ``'websocket'``.
+ """
+ return self.current_transport
+
+ def _reset(self):
+ self.state = 'disconnected'
+ self.sid = None
+
+ def _get_engineio_url(self, url, engineio_path, transport):
+ """Generate the Engine.IO connection URL."""
+ engineio_path = engineio_path.strip('/')
+ parsed_url = urllib.parse.urlparse(url)
+
+ if transport == 'polling':
+ scheme = 'http'
+ elif transport == 'websocket':
+ scheme = 'ws'
+ else: # pragma: no cover
+ raise ValueError('invalid transport')
+ if parsed_url.scheme in ['https', 'wss']:
+ scheme += 's'
+
+ return ('{scheme}://{netloc}/{path}/?{query}'
+ '{sep}transport={transport}&EIO=4').format(
+ scheme=scheme, netloc=parsed_url.netloc,
+ path=engineio_path, query=parsed_url.query,
+ sep='&' if parsed_url.query else '',
+ transport=transport)
+
+ def _get_url_timestamp(self):
+ """Generate the Engine.IO query string timestamp."""
+ if not self.timestamp_requests:
+ return ''
+ return '&t=' + str(time.time())
+
+ def create_queue(self, *args, **kwargs): # pragma: no cover
+ """Create a queue object."""
+ raise NotImplementedError('must be implemented in a subclass')
+
+ def get_queue_empty_exception(self): # pragma: no cover
+ """Return the queue empty exception raised by queues created by the
+ ``create_queue()`` method.
+ """
+ raise NotImplementedError('must be implemented in a subclass')
diff --git a/tapdown/lib/python3.11/site-packages/engineio/base_server.py b/tapdown/lib/python3.11/site-packages/engineio/base_server.py
new file mode 100644
index 0000000..7d717fb
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/base_server.py
@@ -0,0 +1,358 @@
+import base64
+import gzip
+import importlib
+import io
+import logging
+import secrets
+import zlib
+
+from . import packet
+from . import payload
+
+default_logger = logging.getLogger('engineio.server')
+
+
+class BaseServer:
+ compression_methods = ['gzip', 'deflate']
+ event_names = ['connect', 'disconnect', 'message']
+ valid_transports = ['polling', 'websocket']
+ _default_monitor_clients = True
+ sequence_number = 0
+
+ class reason:
+ """Disconnection reasons."""
+ #: Server-initiated disconnection.
+ SERVER_DISCONNECT = 'server disconnect'
+ #: Client-initiated disconnection.
+ CLIENT_DISCONNECT = 'client disconnect'
+ #: Ping timeout.
+ PING_TIMEOUT = 'ping timeout'
+ #: Transport close.
+ TRANSPORT_CLOSE = 'transport close'
+ #: Transport error.
+ TRANSPORT_ERROR = 'transport error'
+
+ def __init__(self, async_mode=None, ping_interval=25, ping_timeout=20,
+ max_http_buffer_size=1000000, allow_upgrades=True,
+ http_compression=True, compression_threshold=1024,
+ cookie=None, cors_allowed_origins=None,
+ cors_credentials=True, logger=False, json=None,
+ async_handlers=True, monitor_clients=None, transports=None,
+ **kwargs):
+ self.ping_timeout = ping_timeout
+ if isinstance(ping_interval, tuple):
+ self.ping_interval = ping_interval[0]
+ self.ping_interval_grace_period = ping_interval[1]
+ else:
+ self.ping_interval = ping_interval
+ self.ping_interval_grace_period = 0
+ self.max_http_buffer_size = max_http_buffer_size
+ self.allow_upgrades = allow_upgrades
+ self.http_compression = http_compression
+ self.compression_threshold = compression_threshold
+ self.cookie = cookie
+ self.cors_allowed_origins = cors_allowed_origins
+ self.cors_credentials = cors_credentials
+ self.async_handlers = async_handlers
+ self.sockets = {}
+ self.handlers = {}
+ self.log_message_keys = set()
+ self.start_service_task = monitor_clients \
+ if monitor_clients is not None else self._default_monitor_clients
+ self.service_task_handle = None
+ self.service_task_event = None
+ if json is not None:
+ packet.Packet.json = json
+ if not isinstance(logger, bool):
+ self.logger = logger
+ else:
+ self.logger = default_logger
+ if self.logger.level == logging.NOTSET:
+ if logger:
+ self.logger.setLevel(logging.INFO)
+ else:
+ self.logger.setLevel(logging.ERROR)
+ self.logger.addHandler(logging.StreamHandler())
+ modes = self.async_modes()
+ if async_mode is not None:
+ modes = [async_mode] if async_mode in modes else []
+ self._async = None
+ self.async_mode = None
+ for mode in modes:
+ try:
+ self._async = importlib.import_module(
+ 'engineio.async_drivers.' + mode)._async
+ asyncio_based = self._async['asyncio'] \
+ if 'asyncio' in self._async else False
+ if asyncio_based != self.is_asyncio_based():
+ continue # pragma: no cover
+ self.async_mode = mode
+ break
+ except ImportError:
+ pass
+ if self.async_mode is None:
+ raise ValueError('Invalid async_mode specified')
+ if self.is_asyncio_based() and \
+ ('asyncio' not in self._async or not
+ self._async['asyncio']): # pragma: no cover
+ raise ValueError('The selected async_mode is not asyncio '
+ 'compatible')
+ if not self.is_asyncio_based() and 'asyncio' in self._async and \
+ self._async['asyncio']: # pragma: no cover
+ raise ValueError('The selected async_mode requires asyncio and '
+ 'must use the AsyncServer class')
+ if transports is not None:
+ if isinstance(transports, str):
+ transports = [transports]
+ transports = [transport for transport in transports
+ if transport in self.valid_transports]
+ if not transports:
+ raise ValueError('No valid transports provided')
+ self.transports = transports or self.valid_transports
+ self.logger.info('Server initialized for %s.', self.async_mode)
+
+ def is_asyncio_based(self):
+ return False
+
+ def async_modes(self):
+ return ['eventlet', 'gevent_uwsgi', 'gevent', 'threading']
+
+ def on(self, event, handler=None):
+ """Register an event handler.
+
+ :param event: The event name. Can be ``'connect'``, ``'message'`` or
+ ``'disconnect'``.
+ :param handler: The function that should be invoked to handle the
+ event. When this parameter is not given, the method
+ acts as a decorator for the handler function.
+
+ Example usage::
+
+ # as a decorator:
+ @eio.on('connect')
+ def connect_handler(sid, environ):
+ print('Connection request')
+ if environ['REMOTE_ADDR'] in blacklisted:
+ return False # reject
+
+ # as a method:
+ def message_handler(sid, msg):
+ print('Received message: ', msg)
+ eio.send(sid, 'response')
+ eio.on('message', message_handler)
+
+ The handler function receives the ``sid`` (session ID) for the
+ client as first argument. The ``'connect'`` event handler receives the
+ WSGI environment as a second argument, and can return ``False`` to
+ reject the connection. The ``'message'`` handler receives the message
+ payload as a second argument. The ``'disconnect'`` handler does not
+ take a second argument.
+ """
+ if event not in self.event_names:
+ raise ValueError('Invalid event')
+
+ def set_handler(handler):
+ self.handlers[event] = handler
+ return handler
+
+ if handler is None:
+ return set_handler
+ set_handler(handler)
+
+ def transport(self, sid):
+ """Return the name of the transport used by the client.
+
+ The two possible values returned by this function are ``'polling'``
+ and ``'websocket'``.
+
+ :param sid: The session of the client.
+ """
+ return 'websocket' if self._get_socket(sid).upgraded else 'polling'
+
+ def create_queue(self, *args, **kwargs):
+ """Create a queue object using the appropriate async model.
+
+ This is a utility function that applications can use to create a queue
+ without having to worry about using the correct call for the selected
+ async mode.
+ """
+ return self._async['queue'](*args, **kwargs)
+
+ def get_queue_empty_exception(self):
+ """Return the queue empty exception for the appropriate async model.
+
+ This is a utility function that applications can use to work with a
+ queue without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self._async['queue_empty']
+
+ def create_event(self, *args, **kwargs):
+ """Create an event object using the appropriate async model.
+
+ This is a utility function that applications can use to create an
+ event without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self._async['event'](*args, **kwargs)
+
+ def generate_id(self):
+ """Generate a unique session id."""
+ id = base64.b64encode(
+ secrets.token_bytes(12) + self.sequence_number.to_bytes(3, 'big'))
+ self.sequence_number = (self.sequence_number + 1) & 0xffffff
+ return id.decode('utf-8').replace('/', '_').replace('+', '-')
+
+ def _generate_sid_cookie(self, sid, attributes):
+ """Generate the sid cookie."""
+ cookie = attributes.get('name', 'io') + '=' + sid
+ for attribute, value in attributes.items():
+ if attribute == 'name':
+ continue
+ if callable(value):
+ value = value()
+ if value is True:
+ cookie += '; ' + attribute
+ else:
+ cookie += '; ' + attribute + '=' + value
+ return cookie
+
+ def _upgrades(self, sid, transport):
+ """Return the list of possible upgrades for a client connection."""
+ if not self.allow_upgrades or self._get_socket(sid).upgraded or \
+ transport == 'websocket':
+ return []
+ if self._async['websocket'] is None: # pragma: no cover
+ self._log_error_once(
+ 'The WebSocket transport is not available, you must install a '
+ 'WebSocket server that is compatible with your async mode to '
+ 'enable it. See the documentation for details.',
+ 'no-websocket')
+ return []
+ return ['websocket']
+
+ def _get_socket(self, sid):
+ """Return the socket object for a given session."""
+ try:
+ s = self.sockets[sid]
+ except KeyError:
+ raise KeyError('Session not found')
+ if s.closed:
+ del self.sockets[sid]
+ raise KeyError('Session is disconnected')
+ return s
+
+ def _ok(self, packets=None, headers=None, jsonp_index=None):
+ """Generate a successful HTTP response."""
+ if packets is not None:
+ if headers is None:
+ headers = []
+ headers += [('Content-Type', 'text/plain; charset=UTF-8')]
+ return {'status': '200 OK',
+ 'headers': headers,
+ 'response': payload.Payload(packets=packets).encode(
+ jsonp_index=jsonp_index).encode('utf-8')}
+ else:
+ return {'status': '200 OK',
+ 'headers': [('Content-Type', 'text/plain')],
+ 'response': b'OK'}
+
+ def _bad_request(self, message=None):
+ """Generate a bad request HTTP error response."""
+ if message is None:
+ message = 'Bad Request'
+ message = packet.Packet.json.dumps(message)
+ return {'status': '400 BAD REQUEST',
+ 'headers': [('Content-Type', 'text/plain')],
+ 'response': message.encode('utf-8')}
+
+ def _method_not_found(self):
+ """Generate a method not found HTTP error response."""
+ return {'status': '405 METHOD NOT FOUND',
+ 'headers': [('Content-Type', 'text/plain')],
+ 'response': b'Method Not Found'}
+
+ def _unauthorized(self, message=None):
+ """Generate a unauthorized HTTP error response."""
+ if message is None:
+ message = 'Unauthorized'
+ message = packet.Packet.json.dumps(message)
+ return {'status': '401 UNAUTHORIZED',
+ 'headers': [('Content-Type', 'application/json')],
+ 'response': message.encode('utf-8')}
+
+ def _cors_allowed_origins(self, environ):
+ if self.cors_allowed_origins is None:
+ allowed_origins = []
+ if 'wsgi.url_scheme' in environ and 'HTTP_HOST' in environ:
+ allowed_origins.append('{scheme}://{host}'.format(
+ scheme=environ['wsgi.url_scheme'],
+ host=environ['HTTP_HOST']))
+ if 'HTTP_X_FORWARDED_PROTO' in environ or \
+ 'HTTP_X_FORWARDED_HOST' in environ:
+ scheme = environ.get(
+ 'HTTP_X_FORWARDED_PROTO',
+ environ['wsgi.url_scheme']).split(',')[0].strip()
+ allowed_origins.append('{scheme}://{host}'.format(
+ scheme=scheme, host=environ.get(
+ 'HTTP_X_FORWARDED_HOST',
+ environ['HTTP_HOST']).split(
+ ',')[0].strip()))
+ elif self.cors_allowed_origins == '*':
+ allowed_origins = None
+ elif isinstance(self.cors_allowed_origins, str):
+ allowed_origins = [self.cors_allowed_origins]
+ elif callable(self.cors_allowed_origins):
+ origin = environ.get('HTTP_ORIGIN')
+ try:
+ is_allowed = self.cors_allowed_origins(origin, environ)
+ except TypeError:
+ is_allowed = self.cors_allowed_origins(origin)
+ allowed_origins = [origin] if is_allowed else []
+ else:
+ if '*' in self.cors_allowed_origins:
+ allowed_origins = None
+ else:
+ allowed_origins = self.cors_allowed_origins
+ return allowed_origins
+
+ def _cors_headers(self, environ):
+ """Return the cross-origin-resource-sharing headers."""
+ if self.cors_allowed_origins == []:
+ # special case, CORS handling is completely disabled
+ return []
+ headers = []
+ allowed_origins = self._cors_allowed_origins(environ)
+ if 'HTTP_ORIGIN' in environ and \
+ (allowed_origins is None or environ['HTTP_ORIGIN'] in
+ allowed_origins):
+ headers = [('Access-Control-Allow-Origin', environ['HTTP_ORIGIN'])]
+ if environ['REQUEST_METHOD'] == 'OPTIONS':
+ headers += [('Access-Control-Allow-Methods', 'OPTIONS, GET, POST')]
+ if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in environ:
+ headers += [('Access-Control-Allow-Headers',
+ environ['HTTP_ACCESS_CONTROL_REQUEST_HEADERS'])]
+ if self.cors_credentials:
+ headers += [('Access-Control-Allow-Credentials', 'true')]
+ return headers
+
+ def _gzip(self, response):
+ """Apply gzip compression to a response."""
+ bytesio = io.BytesIO()
+ with gzip.GzipFile(fileobj=bytesio, mode='w') as gz:
+ gz.write(response)
+ return bytesio.getvalue()
+
+ def _deflate(self, response):
+ """Apply deflate compression to a response."""
+ return zlib.compress(response)
+
+ def _log_error_once(self, message, message_key):
+ """Log message with logging.ERROR level the first time, then log
+ with given level."""
+ if message_key not in self.log_message_keys:
+ self.logger.error(message + ' (further occurrences of this error '
+ 'will be logged with level INFO)')
+ self.log_message_keys.add(message_key)
+ else:
+ self.logger.info(message)
diff --git a/tapdown/lib/python3.11/site-packages/engineio/base_socket.py b/tapdown/lib/python3.11/site-packages/engineio/base_socket.py
new file mode 100644
index 0000000..6b5d7dc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/base_socket.py
@@ -0,0 +1,14 @@
+class BaseSocket:
+ upgrade_protocols = ['websocket']
+
+ def __init__(self, server, sid):
+ self.server = server
+ self.sid = sid
+ self.queue = self.server.create_queue()
+ self.last_ping = None
+ self.connected = False
+ self.upgrading = False
+ self.upgraded = False
+ self.closing = False
+ self.closed = False
+ self.session = {}
diff --git a/tapdown/lib/python3.11/site-packages/engineio/client.py b/tapdown/lib/python3.11/site-packages/engineio/client.py
new file mode 100644
index 0000000..c04e080
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/client.py
@@ -0,0 +1,632 @@
+from base64 import b64encode
+from engineio.json import JSONDecodeError
+import logging
+import queue
+import ssl
+import threading
+import time
+import urllib
+
+try:
+ import requests
+except ImportError: # pragma: no cover
+ requests = None
+try:
+ import websocket
+except ImportError: # pragma: no cover
+ websocket = None
+from . import base_client
+from . import exceptions
+from . import packet
+from . import payload
+
+default_logger = logging.getLogger('engineio.client')
+
+
+class Client(base_client.BaseClient):
+ """An Engine.IO client.
+
+ This class implements a fully compliant Engine.IO web client with support
+ for websocket and long-polling transports.
+
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors are logged even when
+ ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param request_timeout: A timeout in seconds for requests. The default is
+ 5 seconds.
+ :param http_session: an initialized ``requests.Session`` object to be used
+ when sending requests to the server. Use it if you
+ need to add special client options such as proxy
+ servers, SSL certificates, custom CA bundle, etc.
+ :param ssl_verify: ``True`` to verify SSL certificates, or ``False`` to
+ skip SSL certificate verification, allowing
+ connections to servers with self signed certificates.
+ The default is ``True``.
+ :param handle_sigint: Set to ``True`` to automatically handle disconnection
+ when the process is interrupted, or to ``False`` to
+ leave interrupt handling to the calling application.
+ Interrupt handling can only be enabled when the
+ client instance is created in the main thread.
+ :param websocket_extra_options: Dictionary containing additional keyword
+ arguments passed to
+ ``websocket.create_connection()``.
+ :param timestamp_requests: If ``True`` a timestamp is added to the query
+ string of Socket.IO requests as a cache-busting
+ measure. Set to ``False`` to disable.
+ """
+ def connect(self, url, headers=None, transports=None,
+ engineio_path='engine.io'):
+ """Connect to an Engine.IO server.
+
+ :param url: The URL of the Engine.IO server. It can include custom
+ query string parameters if required by the server.
+ :param headers: A dictionary with custom headers to send with the
+ connection request.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. If not
+ given, the polling transport is connected first,
+ then an upgrade to websocket is attempted.
+ :param engineio_path: The endpoint where the Engine.IO server is
+ installed. The default value is appropriate for
+ most cases.
+
+ Example usage::
+
+ eio = engineio.Client()
+ eio.connect('http://localhost:5000')
+ """
+ if self.state != 'disconnected':
+ raise ValueError('Client is not in a disconnected state')
+ valid_transports = ['polling', 'websocket']
+ if transports is not None:
+ if isinstance(transports, str):
+ transports = [transports]
+ transports = [transport for transport in transports
+ if transport in valid_transports]
+ if not transports:
+ raise ValueError('No valid transports provided')
+ self.transports = transports or valid_transports
+ return getattr(self, '_connect_' + self.transports[0])(
+ url, headers or {}, engineio_path)
+
+ def wait(self):
+ """Wait until the connection with the server ends.
+
+ Client applications can use this function to block the main thread
+ during the life of the connection.
+ """
+ if self.read_loop_task:
+ self.read_loop_task.join()
+
+ def send(self, data):
+ """Send a message to the server.
+
+ :param data: The data to send to the server. Data can be of type
+ ``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
+ or ``dict``, the data will be serialized as JSON.
+ """
+ self._send_packet(packet.Packet(packet.MESSAGE, data=data))
+
+ def disconnect(self, abort=False, reason=None):
+ """Disconnect from the server.
+
+ :param abort: If set to ``True``, do not wait for background tasks
+ associated with the connection to end.
+ """
+ if self.state == 'connected':
+ self._send_packet(packet.Packet(packet.CLOSE))
+ self.queue.put(None)
+ self.state = 'disconnecting'
+ self._trigger_event('disconnect',
+ reason or self.reason.CLIENT_DISCONNECT,
+ run_async=False)
+ if self.current_transport == 'websocket':
+ self.ws.close()
+ if not abort:
+ self.read_loop_task.join()
+ self.state = 'disconnected'
+ try:
+ base_client.connected_clients.remove(self)
+ except ValueError: # pragma: no cover
+ pass
+ self._reset()
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task.
+
+ This is a utility function that applications can use to start a
+ background task.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ This function returns an object that represents the background task,
+ on which the ``join()`` method can be invoked to wait for the task to
+ complete.
+ """
+ th = threading.Thread(target=target, args=args, kwargs=kwargs,
+ daemon=True)
+ th.start()
+ return th
+
+ def sleep(self, seconds=0):
+ """Sleep for the requested amount of time."""
+ return time.sleep(seconds)
+
+ def create_queue(self, *args, **kwargs):
+ """Create a queue object."""
+ return queue.Queue(*args, **kwargs)
+
+ def get_queue_empty_exception(self):
+ """Return the queue empty exception raised by queues created by the
+ ``create_queue()`` method.
+ """
+ return queue.Empty
+
+ def create_event(self, *args, **kwargs):
+ """Create an event object."""
+ return threading.Event(*args, **kwargs)
+
+ def _reset(self):
+ super()._reset()
+ while True: # pragma: no cover
+ try:
+ self.queue.get_nowait()
+ self.queue.task_done()
+ except self.queue_empty:
+ break
+
+ def _connect_polling(self, url, headers, engineio_path):
+ """Establish a long-polling connection to the Engine.IO server."""
+ if requests is None: # pragma: no cover
+ # not installed
+ self.logger.error('requests package is not installed -- cannot '
+ 'send HTTP requests!')
+ return
+ self.base_url = self._get_engineio_url(url, engineio_path, 'polling')
+ self.logger.info('Attempting polling connection to ' + self.base_url)
+ r = self._send_request(
+ 'GET', self.base_url + self._get_url_timestamp(), headers=headers,
+ timeout=self.request_timeout)
+ if r is None or isinstance(r, str):
+ self._reset()
+ raise exceptions.ConnectionError(
+ r or 'Connection refused by the server')
+ if r.status_code < 200 or r.status_code >= 300:
+ self._reset()
+ try:
+ arg = r.json()
+ except JSONDecodeError:
+ arg = None
+ raise exceptions.ConnectionError(
+ 'Unexpected status code {} in server response'.format(
+ r.status_code), arg)
+ try:
+ p = payload.Payload(encoded_payload=r.content.decode('utf-8'))
+ except ValueError:
+ raise exceptions.ConnectionError(
+ 'Unexpected response from server') from None
+ open_packet = p.packets[0]
+ if open_packet.packet_type != packet.OPEN:
+ raise exceptions.ConnectionError(
+ 'OPEN packet not returned by server')
+ self.logger.info(
+ 'Polling connection accepted with ' + str(open_packet.data))
+ self.sid = open_packet.data['sid']
+ self.upgrades = open_packet.data['upgrades']
+ self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
+ self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
+ self.current_transport = 'polling'
+ self.base_url += '&sid=' + self.sid
+
+ self.state = 'connected'
+ base_client.connected_clients.append(self)
+ self._trigger_event('connect', run_async=False)
+
+ for pkt in p.packets[1:]:
+ self._receive_packet(pkt)
+
+ if 'websocket' in self.upgrades and 'websocket' in self.transports:
+ # attempt to upgrade to websocket
+ if self._connect_websocket(url, headers, engineio_path):
+ # upgrade to websocket succeeded, we're done here
+ return
+
+ # start background tasks associated with this client
+ self.write_loop_task = self.start_background_task(self._write_loop)
+ self.read_loop_task = self.start_background_task(
+ self._read_loop_polling)
+
+ def _connect_websocket(self, url, headers, engineio_path):
+ """Establish or upgrade to a WebSocket connection with the server."""
+ if websocket is None: # pragma: no cover
+ # not installed
+ self.logger.error('websocket-client package not installed, only '
+ 'polling transport is available')
+ return False
+ websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
+ if self.sid:
+ self.logger.info(
+ 'Attempting WebSocket upgrade to ' + websocket_url)
+ upgrade = True
+ websocket_url += '&sid=' + self.sid
+ else:
+ upgrade = False
+ self.base_url = websocket_url
+ self.logger.info(
+ 'Attempting WebSocket connection to ' + websocket_url)
+
+ # get cookies and other settings from the long-polling connection
+ # so that they are preserved when connecting to the WebSocket route
+ cookies = None
+ extra_options = {}
+ if self.http:
+ # cookies
+ cookies = '; '.join([f"{cookie.name}={cookie.value}"
+ for cookie in self.http.cookies])
+ for header, value in headers.items():
+ if header.lower() == 'cookie':
+ if cookies:
+ cookies += '; '
+ cookies += value
+ del headers[header]
+ break
+
+ # auth
+ if 'Authorization' not in headers and self.http.auth is not None:
+ if not isinstance(self.http.auth, tuple): # pragma: no cover
+ raise ValueError('Only basic authentication is supported')
+ basic_auth = '{}:{}'.format(
+ self.http.auth[0], self.http.auth[1]).encode('utf-8')
+ basic_auth = b64encode(basic_auth).decode('utf-8')
+ headers['Authorization'] = 'Basic ' + basic_auth
+
+ # cert
+ # this can be given as ('certfile', 'keyfile') or just 'certfile'
+ if isinstance(self.http.cert, tuple):
+ extra_options['sslopt'] = {
+ 'certfile': self.http.cert[0],
+ 'keyfile': self.http.cert[1]}
+ elif self.http.cert:
+ extra_options['sslopt'] = {'certfile': self.http.cert}
+
+ # proxies
+ if self.http.proxies:
+ proxy_url = None
+ if websocket_url.startswith('ws://'):
+ proxy_url = self.http.proxies.get(
+ 'ws', self.http.proxies.get('http'))
+ else: # wss://
+ proxy_url = self.http.proxies.get(
+ 'wss', self.http.proxies.get('https'))
+ if proxy_url:
+ parsed_url = urllib.parse.urlparse(
+ proxy_url if '://' in proxy_url
+ else 'scheme://' + proxy_url)
+ extra_options['http_proxy_host'] = parsed_url.hostname
+ extra_options['http_proxy_port'] = parsed_url.port
+ extra_options['http_proxy_auth'] = (
+ (parsed_url.username, parsed_url.password)
+ if parsed_url.username or parsed_url.password
+ else None)
+
+ # verify
+ if isinstance(self.http.verify, str):
+ if 'sslopt' in extra_options:
+ extra_options['sslopt']['ca_certs'] = self.http.verify
+ else:
+ extra_options['sslopt'] = {'ca_certs': self.http.verify}
+ elif not self.http.verify:
+ self.ssl_verify = False
+
+ if not self.ssl_verify:
+ if 'sslopt' in extra_options:
+ extra_options['sslopt'].update({"cert_reqs": ssl.CERT_NONE})
+ else:
+ extra_options['sslopt'] = {"cert_reqs": ssl.CERT_NONE}
+
+ # combine internally generated options with the ones supplied by the
+ # caller. The caller's options take precedence.
+ headers.update(self.websocket_extra_options.pop('header', {}))
+ extra_options['header'] = headers
+ extra_options['cookie'] = cookies
+ extra_options['enable_multithread'] = True
+ extra_options['timeout'] = self.request_timeout
+ extra_options.update(self.websocket_extra_options)
+ try:
+ ws = websocket.create_connection(
+ websocket_url + self._get_url_timestamp(), **extra_options)
+ except (ConnectionError, OSError, websocket.WebSocketException):
+ if upgrade:
+ self.logger.warning(
+ 'WebSocket upgrade failed: connection error')
+ return False
+ else:
+ raise exceptions.ConnectionError('Connection error')
+ if upgrade:
+ p = packet.Packet(packet.PING, data='probe').encode()
+ try:
+ ws.send(p)
+ except Exception as e: # pragma: no cover
+ self.logger.warning(
+ 'WebSocket upgrade failed: unexpected send exception: %s',
+ str(e))
+ return False
+ try:
+ p = ws.recv()
+ except Exception as e: # pragma: no cover
+ self.logger.warning(
+ 'WebSocket upgrade failed: unexpected recv exception: %s',
+ str(e))
+ return False
+ pkt = packet.Packet(encoded_packet=p)
+ if pkt.packet_type != packet.PONG or pkt.data != 'probe':
+ self.logger.warning(
+ 'WebSocket upgrade failed: no PONG packet')
+ return False
+ p = packet.Packet(packet.UPGRADE).encode()
+ try:
+ ws.send(p)
+ except Exception as e: # pragma: no cover
+ self.logger.warning(
+ 'WebSocket upgrade failed: unexpected send exception: %s',
+ str(e))
+ return False
+ self.current_transport = 'websocket'
+ self.logger.info('WebSocket upgrade was successful')
+ else:
+ try:
+ p = ws.recv()
+ except Exception as e: # pragma: no cover
+ raise exceptions.ConnectionError(
+ 'Unexpected recv exception: ' + str(e))
+ open_packet = packet.Packet(encoded_packet=p)
+ if open_packet.packet_type != packet.OPEN:
+ raise exceptions.ConnectionError('no OPEN packet')
+ self.logger.info(
+ 'WebSocket connection accepted with ' + str(open_packet.data))
+ self.sid = open_packet.data['sid']
+ self.upgrades = open_packet.data['upgrades']
+ self.ping_interval = int(open_packet.data['pingInterval']) / 1000.0
+ self.ping_timeout = int(open_packet.data['pingTimeout']) / 1000.0
+ self.current_transport = 'websocket'
+
+ self.state = 'connected'
+ base_client.connected_clients.append(self)
+ self._trigger_event('connect', run_async=False)
+ self.ws = ws
+ self.ws.settimeout(self.ping_interval + self.ping_timeout)
+
+ # start background tasks associated with this client
+ self.write_loop_task = self.start_background_task(self._write_loop)
+ self.read_loop_task = self.start_background_task(
+ self._read_loop_websocket)
+ return True
+
+ def _receive_packet(self, pkt):
+ """Handle incoming packets from the server."""
+ packet_name = packet.packet_names[pkt.packet_type] \
+ if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
+ self.logger.info(
+ 'Received packet %s data %s', packet_name,
+ pkt.data if not isinstance(pkt.data, bytes) else '')
+ if pkt.packet_type == packet.MESSAGE:
+ self._trigger_event('message', pkt.data, run_async=True)
+ elif pkt.packet_type == packet.PING:
+ self._send_packet(packet.Packet(packet.PONG, pkt.data))
+ elif pkt.packet_type == packet.CLOSE:
+ self.disconnect(abort=True, reason=self.reason.SERVER_DISCONNECT)
+ elif pkt.packet_type == packet.NOOP:
+ pass
+ else:
+ self.logger.error('Received unexpected packet of type %s',
+ pkt.packet_type)
+
+ def _send_packet(self, pkt):
+ """Queue a packet to be sent to the server."""
+ if self.state != 'connected':
+ return
+ self.queue.put(pkt)
+ self.logger.info(
+ 'Sending packet %s data %s',
+ packet.packet_names[pkt.packet_type],
+ pkt.data if not isinstance(pkt.data, bytes) else '')
+
+ def _send_request(
+ self, method, url, headers=None, body=None,
+ timeout=None): # pragma: no cover
+ if self.http is None:
+ self.http = requests.Session()
+ if not self.ssl_verify:
+ self.http.verify = False
+ try:
+ return self.http.request(method, url, headers=headers, data=body,
+ timeout=timeout)
+ except requests.exceptions.RequestException as exc:
+ self.logger.info('HTTP %s request to %s failed with error %s.',
+ method, url, exc)
+ return str(exc)
+
+ def _trigger_event(self, event, *args, **kwargs):
+ """Invoke an event handler."""
+ run_async = kwargs.pop('run_async', False)
+ if event in self.handlers:
+ if run_async:
+ return self.start_background_task(self.handlers[event], *args)
+ else:
+ try:
+ try:
+ return self.handlers[event](*args)
+ except TypeError:
+ if event == 'disconnect' and \
+ len(args) == 1: # pragma: no branch
+ # legacy disconnect events do not have a reason
+ # argument
+ return self.handlers[event]()
+ else: # pragma: no cover
+ raise
+ except:
+ self.logger.exception(event + ' handler error')
+
+ def _read_loop_polling(self):
+ """Read packets by polling the Engine.IO server."""
+ while self.state == 'connected' and self.write_loop_task:
+ self.logger.info(
+ 'Sending polling GET request to ' + self.base_url)
+ r = self._send_request(
+ 'GET', self.base_url + self._get_url_timestamp(),
+ timeout=max(self.ping_interval, self.ping_timeout) + 5)
+ if r is None or isinstance(r, str):
+ self.logger.warning(
+ r or 'Connection refused by the server, aborting')
+ self.queue.put(None)
+ break
+ if r.status_code < 200 or r.status_code >= 300:
+ self.logger.warning('Unexpected status code %s in server '
+ 'response, aborting', r.status_code)
+ self.queue.put(None)
+ break
+ try:
+ p = payload.Payload(encoded_payload=r.content.decode('utf-8'))
+ except ValueError:
+ self.logger.warning(
+ 'Unexpected packet from server, aborting')
+ self.queue.put(None)
+ break
+ for pkt in p.packets:
+ self._receive_packet(pkt)
+
+ if self.write_loop_task: # pragma: no branch
+ self.logger.info('Waiting for write loop task to end')
+ self.write_loop_task.join()
+ if self.state == 'connected':
+ self._trigger_event('disconnect', self.reason.TRANSPORT_ERROR,
+ run_async=False)
+ try:
+ base_client.connected_clients.remove(self)
+ except ValueError: # pragma: no cover
+ pass
+ self._reset()
+ self.logger.info('Exiting read loop task')
+
+ def _read_loop_websocket(self):
+ """Read packets from the Engine.IO WebSocket connection."""
+ while self.state == 'connected':
+ p = None
+ try:
+ p = self.ws.recv()
+ if len(p) == 0 and not self.ws.connected: # pragma: no cover
+ # websocket client can return an empty string after close
+ raise websocket.WebSocketConnectionClosedException()
+ except websocket.WebSocketTimeoutException:
+ self.logger.warning(
+ 'Server has stopped communicating, aborting')
+ self.queue.put(None)
+ break
+ except websocket.WebSocketConnectionClosedException:
+ self.logger.warning(
+ 'WebSocket connection was closed, aborting')
+ self.queue.put(None)
+ break
+ except Exception as e: # pragma: no cover
+ if type(e) is OSError and e.errno == 9:
+ self.logger.info(
+ 'WebSocket connection is closing, aborting')
+ else:
+ self.logger.info(
+ 'Unexpected error receiving packet: "%s", aborting',
+ str(e))
+ self.queue.put(None)
+ break
+ try:
+ pkt = packet.Packet(encoded_packet=p)
+ except Exception as e: # pragma: no cover
+ self.logger.info(
+ 'Unexpected error decoding packet: "%s", aborting', str(e))
+ self.queue.put(None)
+ break
+ self._receive_packet(pkt)
+
+ if self.write_loop_task: # pragma: no branch
+ self.logger.info('Waiting for write loop task to end')
+ self.write_loop_task.join()
+ if self.state == 'connected':
+ self._trigger_event('disconnect', self.reason.TRANSPORT_ERROR,
+ run_async=False)
+ try:
+ base_client.connected_clients.remove(self)
+ except ValueError: # pragma: no cover
+ pass
+ self._reset()
+ self.logger.info('Exiting read loop task')
+
+ def _write_loop(self):
+ """This background task sends packages to the server as they are
+ pushed to the send queue.
+ """
+ while self.state == 'connected':
+ # to simplify the timeout handling, use the maximum of the
+ # ping interval and ping timeout as timeout, with an extra 5
+ # seconds grace period
+ timeout = max(self.ping_interval, self.ping_timeout) + 5
+ packets = None
+ try:
+ packets = [self.queue.get(timeout=timeout)]
+ except self.queue_empty:
+ self.logger.error('packet queue is empty, aborting')
+ break
+ if packets == [None]:
+ self.queue.task_done()
+ packets = []
+ else:
+ while True:
+ try:
+ packets.append(self.queue.get(block=False))
+ except self.queue_empty:
+ break
+ if packets[-1] is None:
+ packets = packets[:-1]
+ self.queue.task_done()
+ break
+ if not packets:
+ # empty packet list returned -> connection closed
+ break
+ if self.current_transport == 'polling':
+ p = payload.Payload(packets=packets)
+ r = self._send_request(
+ 'POST', self.base_url, body=p.encode(),
+ headers={'Content-Type': 'text/plain'},
+ timeout=self.request_timeout)
+ for pkt in packets:
+ self.queue.task_done()
+ if r is None or isinstance(r, str):
+ self.logger.warning(
+ r or 'Connection refused by the server, aborting')
+ break
+ if r.status_code < 200 or r.status_code >= 300:
+ self.logger.warning('Unexpected status code %s in server '
+ 'response, aborting', r.status_code)
+ self.write_loop_task = None
+ break
+ else:
+ # websocket
+ try:
+ for pkt in packets:
+ encoded_packet = pkt.encode()
+ if pkt.binary:
+ self.ws.send_binary(encoded_packet)
+ else:
+ self.ws.send(encoded_packet)
+ self.queue.task_done()
+ except (websocket.WebSocketConnectionClosedException,
+ BrokenPipeError, OSError):
+ self.logger.warning(
+ 'WebSocket connection was closed, aborting')
+ break
+ self.logger.info('Exiting write loop task')
diff --git a/tapdown/lib/python3.11/site-packages/engineio/exceptions.py b/tapdown/lib/python3.11/site-packages/engineio/exceptions.py
new file mode 100644
index 0000000..fb0b3e0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/exceptions.py
@@ -0,0 +1,22 @@
+class EngineIOError(Exception):
+ pass
+
+
+class ContentTooLongError(EngineIOError):
+ pass
+
+
+class UnknownPacketError(EngineIOError):
+ pass
+
+
+class QueueEmpty(EngineIOError):
+ pass
+
+
+class SocketIsClosedError(EngineIOError):
+ pass
+
+
+class ConnectionError(EngineIOError):
+ pass
diff --git a/tapdown/lib/python3.11/site-packages/engineio/json.py b/tapdown/lib/python3.11/site-packages/engineio/json.py
new file mode 100644
index 0000000..b612556
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/json.py
@@ -0,0 +1,16 @@
+"""JSON-compatible module with sane defaults."""
+
+from json import * # noqa: F401, F403
+from json import loads as original_loads
+
+
+def _safe_int(s):
+ if len(s) > 100:
+ raise ValueError('Integer is too large')
+ return int(s)
+
+
+def loads(*args, **kwargs):
+ if 'parse_int' not in kwargs: # pragma: no cover
+ kwargs['parse_int'] = _safe_int
+ return original_loads(*args, **kwargs)
diff --git a/tapdown/lib/python3.11/site-packages/engineio/middleware.py b/tapdown/lib/python3.11/site-packages/engineio/middleware.py
new file mode 100644
index 0000000..0e34fb0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/middleware.py
@@ -0,0 +1,86 @@
+import os
+from engineio.static_files import get_static_file
+
+
+class WSGIApp:
+ """WSGI application middleware for Engine.IO.
+
+ This middleware dispatches traffic to an Engine.IO application. It can
+ also serve a list of static files to the client, or forward unrelated
+ HTTP traffic to another WSGI application.
+
+ :param engineio_app: The Engine.IO server. Must be an instance of the
+ ``engineio.Server`` class.
+ :param wsgi_app: The WSGI app that receives all other traffic.
+ :param static_files: A dictionary with static file mapping rules. See the
+ documentation for details on this argument.
+ :param engineio_path: The endpoint where the Engine.IO application should
+ be installed. The default value is appropriate for
+ most cases.
+
+ Example usage::
+
+ import engineio
+ import eventlet
+
+ eio = engineio.Server()
+ app = engineio.WSGIApp(eio, static_files={
+ '/': {'content_type': 'text/html', 'filename': 'index.html'},
+ '/index.html': {'content_type': 'text/html',
+ 'filename': 'index.html'},
+ })
+ eventlet.wsgi.server(eventlet.listen(('', 8000)), app)
+ """
+ def __init__(self, engineio_app, wsgi_app=None, static_files=None,
+ engineio_path='engine.io'):
+ self.engineio_app = engineio_app
+ self.wsgi_app = wsgi_app
+ self.engineio_path = engineio_path
+ if not self.engineio_path.startswith('/'):
+ self.engineio_path = '/' + self.engineio_path
+ if not self.engineio_path.endswith('/'):
+ self.engineio_path += '/'
+ self.static_files = static_files or {}
+
+ def __call__(self, environ, start_response):
+ if 'gunicorn.socket' in environ:
+ # gunicorn saves the socket under environ['gunicorn.socket'], while
+ # eventlet saves it under environ['eventlet.input']. Eventlet also
+ # stores the socket inside a wrapper class, while gunicon writes it
+ # directly into the environment. To give eventlet's WebSocket
+ # module access to this socket when running under gunicorn, here we
+ # copy the socket to the eventlet format.
+ class Input:
+ def __init__(self, socket):
+ self.socket = socket
+
+ def get_socket(self):
+ return self.socket
+
+ environ['eventlet.input'] = Input(environ['gunicorn.socket'])
+ path = environ['PATH_INFO']
+ if path is not None and path.startswith(self.engineio_path):
+ return self.engineio_app.handle_request(environ, start_response)
+ else:
+ static_file = get_static_file(path, self.static_files) \
+ if self.static_files else None
+ if static_file and os.path.exists(static_file['filename']):
+ start_response(
+ '200 OK',
+ [('Content-Type', static_file['content_type'])])
+ with open(static_file['filename'], 'rb') as f:
+ return [f.read()]
+ elif self.wsgi_app is not None:
+ return self.wsgi_app(environ, start_response)
+ return self.not_found(start_response)
+
+ def not_found(self, start_response):
+ start_response("404 Not Found", [('Content-Type', 'text/plain')])
+ return [b'Not Found']
+
+
+class Middleware(WSGIApp):
+ """This class has been renamed to ``WSGIApp`` and is now deprecated."""
+ def __init__(self, engineio_app, wsgi_app=None,
+ engineio_path='engine.io'):
+ super().__init__(engineio_app, wsgi_app, engineio_path=engineio_path)
diff --git a/tapdown/lib/python3.11/site-packages/engineio/packet.py b/tapdown/lib/python3.11/site-packages/engineio/packet.py
new file mode 100644
index 0000000..40bb6df
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/packet.py
@@ -0,0 +1,82 @@
+import base64
+from engineio import json as _json
+
+(OPEN, CLOSE, PING, PONG, MESSAGE, UPGRADE, NOOP) = (0, 1, 2, 3, 4, 5, 6)
+packet_names = ['OPEN', 'CLOSE', 'PING', 'PONG', 'MESSAGE', 'UPGRADE', 'NOOP']
+
+binary_types = (bytes, bytearray)
+
+
+class Packet:
+ """Engine.IO packet."""
+
+ json = _json
+
+ def __init__(self, packet_type=NOOP, data=None, encoded_packet=None):
+ self.packet_type = packet_type
+ self.data = data
+ self.encode_cache = None
+ if isinstance(data, str):
+ self.binary = False
+ elif isinstance(data, binary_types):
+ self.binary = True
+ else:
+ self.binary = False
+ if self.binary and self.packet_type != MESSAGE:
+ raise ValueError('Binary packets can only be of type MESSAGE')
+ if encoded_packet is not None:
+ self.decode(encoded_packet)
+
+ def encode(self, b64=False):
+ """Encode the packet for transmission.
+
+ Note: as a performance optimization, subsequent calls to this method
+ will return a cached encoded packet, even if the data has changed.
+ """
+ if self.encode_cache:
+ return self.encode_cache
+ if self.binary:
+ if b64:
+ encoded_packet = 'b' + base64.b64encode(self.data).decode(
+ 'utf-8')
+ else:
+ encoded_packet = self.data
+ else:
+ encoded_packet = str(self.packet_type)
+ if isinstance(self.data, str):
+ encoded_packet += self.data
+ elif isinstance(self.data, dict) or isinstance(self.data, list):
+ encoded_packet += self.json.dumps(self.data,
+ separators=(',', ':'))
+ elif self.data is not None:
+ encoded_packet += str(self.data)
+ self.encode_cache = encoded_packet
+ return encoded_packet
+
+ def decode(self, encoded_packet):
+ """Decode a transmitted package."""
+ self.binary = isinstance(encoded_packet, binary_types)
+ if not self.binary and len(encoded_packet) == 0:
+ raise ValueError('Invalid empty packet received')
+ b64 = not self.binary and encoded_packet[0] == 'b'
+ if b64:
+ self.binary = True
+ self.packet_type = MESSAGE
+ self.data = base64.b64decode(encoded_packet[1:])
+ else:
+ if self.binary and not isinstance(encoded_packet, bytes):
+ encoded_packet = bytes(encoded_packet)
+ if self.binary:
+ self.packet_type = MESSAGE
+ self.data = encoded_packet
+ else:
+ self.packet_type = int(encoded_packet[0])
+ try:
+ if encoded_packet[1].isnumeric():
+ # do not allow integer payloads, see
+ # github.com/miguelgrinberg/python-engineio/issues/75
+ # for background on this decision
+ raise ValueError
+ self.data = self.json.loads(encoded_packet[1:])
+ except (ValueError, IndexError):
+ self.data = encoded_packet[1:]
diff --git a/tapdown/lib/python3.11/site-packages/engineio/payload.py b/tapdown/lib/python3.11/site-packages/engineio/payload.py
new file mode 100644
index 0000000..775241b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/payload.py
@@ -0,0 +1,46 @@
+import urllib
+
+from . import packet
+
+
+class Payload:
+ """Engine.IO payload."""
+ max_decode_packets = 16
+
+ def __init__(self, packets=None, encoded_payload=None):
+ self.packets = packets or []
+ if encoded_payload is not None:
+ self.decode(encoded_payload)
+
+ def encode(self, jsonp_index=None):
+ """Encode the payload for transmission."""
+ encoded_payload = ''
+ for pkt in self.packets:
+ if encoded_payload:
+ encoded_payload += '\x1e'
+ encoded_payload += pkt.encode(b64=True)
+ if jsonp_index is not None:
+ encoded_payload = '___eio[' + \
+ str(jsonp_index) + \
+ ']("' + \
+ encoded_payload.replace('"', '\\"') + \
+ '");'
+ return encoded_payload
+
+ def decode(self, encoded_payload):
+ """Decode a transmitted payload."""
+ self.packets = []
+
+ if len(encoded_payload) == 0:
+ return
+
+ # JSONP POST payload starts with 'd='
+ if encoded_payload.startswith('d='):
+ encoded_payload = urllib.parse.parse_qs(
+ encoded_payload)['d'][0]
+
+ encoded_packets = encoded_payload.split('\x1e')
+ if len(encoded_packets) > self.max_decode_packets:
+ raise ValueError('Too many packets in payload')
+ self.packets = [packet.Packet(encoded_packet=encoded_packet)
+ for encoded_packet in encoded_packets]
diff --git a/tapdown/lib/python3.11/site-packages/engineio/server.py b/tapdown/lib/python3.11/site-packages/engineio/server.py
new file mode 100644
index 0000000..59f690c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/server.py
@@ -0,0 +1,503 @@
+import logging
+import urllib
+
+from . import base_server
+from . import exceptions
+from . import packet
+from . import socket
+
+default_logger = logging.getLogger('engineio.server')
+
+
+class Server(base_server.BaseServer):
+ """An Engine.IO server.
+
+ This class implements a fully compliant Engine.IO web server with support
+ for websocket and long-polling transports.
+
+ :param async_mode: The asynchronous model to use. See the Deployment
+ section in the documentation for a description of the
+ available options. Valid async modes are "threading",
+ "eventlet", "gevent" and "gevent_uwsgi". If this
+ argument is not given, "eventlet" is tried first, then
+ "gevent_uwsgi", then "gevent", and finally "threading".
+ The first async mode that has all its dependencies
+ installed is the one that is chosen.
+ :param ping_interval: The interval in seconds at which the server pings
+ the client. The default is 25 seconds. For advanced
+ control, a two element tuple can be given, where
+ the first number is the ping interval and the second
+ is a grace period added by the server.
+ :param ping_timeout: The time in seconds that the client waits for the
+ server to respond before disconnecting. The default
+ is 20 seconds.
+ :param max_http_buffer_size: The maximum size that is accepted for incoming
+ messages. The default is 1,000,000 bytes. In
+ spite of its name, the value set in this
+ argument is enforced for HTTP long-polling and
+ WebSocket connections.
+ :param allow_upgrades: Whether to allow transport upgrades or not. The
+ default is ``True``.
+ :param http_compression: Whether to compress packages when using the
+ polling transport. The default is ``True``.
+ :param compression_threshold: Only compress messages when their byte size
+ is greater than this value. The default is
+ 1024 bytes.
+ :param cookie: If set to a string, it is the name of the HTTP cookie the
+ server sends back tot he client containing the client
+ session id. If set to a dictionary, the ``'name'`` key
+ contains the cookie name and other keys define cookie
+ attributes, where the value of each attribute can be a
+ string, a callable with no arguments, or a boolean. If set
+ to ``None`` (the default), a cookie is not sent to the
+ client.
+ :param cors_allowed_origins: Origin or list of origins that are allowed to
+ connect to this server. Only the same origin
+ is allowed by default. Set this argument to
+ ``'*'`` or ``['*']`` to allow all origins, or
+ to ``[]`` to disable CORS handling.
+ :param cors_credentials: Whether credentials (cookies, authentication) are
+ allowed in requests to this server. The default
+ is ``True``.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors are logged even when
+ ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions.
+ :param async_handlers: If set to ``True``, run message event handlers in
+ non-blocking threads. To run handlers synchronously,
+ set to ``False``. The default is ``True``.
+ :param monitor_clients: If set to ``True``, a background task will ensure
+ inactive clients are closed. Set to ``False`` to
+ disable the monitoring task (not recommended). The
+ default is ``True``.
+ :param transports: The list of allowed transports. Valid transports
+ are ``'polling'`` and ``'websocket'``. Defaults to
+ ``['polling', 'websocket']``.
+ :param kwargs: Reserved for future extensions, any additional parameters
+ given as keyword arguments will be silently ignored.
+ """
+ def send(self, sid, data):
+ """Send a message to a client.
+
+ :param sid: The session id of the recipient client.
+ :param data: The data to send to the client. Data can be of type
+ ``str``, ``bytes``, ``list`` or ``dict``. If a ``list``
+ or ``dict``, the data will be serialized as JSON.
+ """
+ self.send_packet(sid, packet.Packet(packet.MESSAGE, data=data))
+
+ def send_packet(self, sid, pkt):
+ """Send a raw packet to a client.
+
+ :param sid: The session id of the recipient client.
+ :param pkt: The packet to send to the client.
+ """
+ try:
+ socket = self._get_socket(sid)
+ except KeyError:
+ # the socket is not available
+ self.logger.warning('Cannot send to sid %s', sid)
+ return
+ socket.send(pkt)
+
+ def get_session(self, sid):
+ """Return the user session for a client.
+
+ :param sid: The session id of the client.
+
+ The return value is a dictionary. Modifications made to this
+ dictionary are not guaranteed to be preserved unless
+ ``save_session()`` is called, or when the ``session`` context manager
+ is used.
+ """
+ socket = self._get_socket(sid)
+ return socket.session
+
+ def save_session(self, sid, session):
+ """Store the user session for a client.
+
+ :param sid: The session id of the client.
+ :param session: The session dictionary.
+ """
+ socket = self._get_socket(sid)
+ socket.session = session
+
+ def session(self, sid):
+ """Return the user session for a client with context manager syntax.
+
+ :param sid: The session id of the client.
+
+ This is a context manager that returns the user session dictionary for
+ the client. Any changes that are made to this dictionary inside the
+ context manager block are saved back to the session. Example usage::
+
+ @eio.on('connect')
+ def on_connect(sid, environ):
+ username = authenticate_user(environ)
+ if not username:
+ return False
+ with eio.session(sid) as session:
+ session['username'] = username
+
+ @eio.on('message')
+ def on_message(sid, msg):
+ with eio.session(sid) as session:
+ print('received message from ', session['username'])
+ """
+ class _session_context_manager:
+ def __init__(self, server, sid):
+ self.server = server
+ self.sid = sid
+ self.session = None
+
+ def __enter__(self):
+ self.session = self.server.get_session(sid)
+ return self.session
+
+ def __exit__(self, *args):
+ self.server.save_session(sid, self.session)
+
+ return _session_context_manager(self, sid)
+
+ def disconnect(self, sid=None):
+ """Disconnect a client.
+
+ :param sid: The session id of the client to close. If this parameter
+ is not given, then all clients are closed.
+ """
+ if sid is not None:
+ try:
+ socket = self._get_socket(sid)
+ except KeyError: # pragma: no cover
+ # the socket was already closed or gone
+ pass
+ else:
+ socket.close(reason=self.reason.SERVER_DISCONNECT)
+ if sid in self.sockets: # pragma: no cover
+ del self.sockets[sid]
+ else:
+ for client in self.sockets.copy().values():
+ client.close(reason=self.reason.SERVER_DISCONNECT)
+ self.sockets = {}
+
+ def handle_request(self, environ, start_response):
+ """Handle an HTTP request from the client.
+
+ This is the entry point of the Engine.IO application, using the same
+ interface as a WSGI application. For the typical usage, this function
+ is invoked by the :class:`Middleware` instance, but it can be invoked
+ directly when the middleware is not used.
+
+ :param environ: The WSGI environment.
+ :param start_response: The WSGI ``start_response`` function.
+
+ This function returns the HTTP response body to deliver to the client
+ as a byte sequence.
+ """
+ if self.cors_allowed_origins != []:
+ # Validate the origin header if present
+ # This is important for WebSocket more than for HTTP, since
+ # browsers only apply CORS controls to HTTP.
+ origin = environ.get('HTTP_ORIGIN')
+ if origin:
+ allowed_origins = self._cors_allowed_origins(environ)
+ if allowed_origins is not None and origin not in \
+ allowed_origins:
+ self._log_error_once(
+ origin + ' is not an accepted origin.', 'bad-origin')
+ r = self._bad_request('Not an accepted origin.')
+ start_response(r['status'], r['headers'])
+ return [r['response']]
+
+ method = environ['REQUEST_METHOD']
+ query = urllib.parse.parse_qs(environ.get('QUERY_STRING', ''))
+ jsonp = False
+ jsonp_index = None
+
+ # make sure the client uses an allowed transport
+ transport = query.get('transport', ['polling'])[0]
+ if transport not in self.transports:
+ self._log_error_once('Invalid transport', 'bad-transport')
+ r = self._bad_request('Invalid transport')
+ start_response(r['status'], r['headers'])
+ return [r['response']]
+
+ # make sure the client speaks a compatible Engine.IO version
+ sid = query['sid'][0] if 'sid' in query else None
+ if sid is None and query.get('EIO') != ['4']:
+ self._log_error_once(
+ 'The client is using an unsupported version of the Socket.IO '
+ 'or Engine.IO protocols', 'bad-version')
+ r = self._bad_request(
+ 'The client is using an unsupported version of the Socket.IO '
+ 'or Engine.IO protocols')
+ start_response(r['status'], r['headers'])
+ return [r['response']]
+
+ if 'j' in query:
+ jsonp = True
+ try:
+ jsonp_index = int(query['j'][0])
+ except (ValueError, KeyError, IndexError):
+ # Invalid JSONP index number
+ pass
+
+ if jsonp and jsonp_index is None:
+ self._log_error_once('Invalid JSONP index number',
+ 'bad-jsonp-index')
+ r = self._bad_request('Invalid JSONP index number')
+ elif method == 'GET':
+ upgrade_header = environ.get('HTTP_UPGRADE').lower() \
+ if 'HTTP_UPGRADE' in environ else None
+ if sid is None:
+ # transport must be one of 'polling' or 'websocket'.
+ # if 'websocket', the HTTP_UPGRADE header must match.
+ if transport == 'polling' \
+ or transport == upgrade_header == 'websocket':
+ r = self._handle_connect(environ, start_response,
+ transport, jsonp_index)
+ else:
+ self._log_error_once('Invalid websocket upgrade',
+ 'bad-upgrade')
+ r = self._bad_request('Invalid websocket upgrade')
+ else:
+ if sid not in self.sockets:
+ self._log_error_once(f'Invalid session {sid}', 'bad-sid')
+ r = self._bad_request(f'Invalid session {sid}')
+ else:
+ try:
+ socket = self._get_socket(sid)
+ except KeyError as e: # pragma: no cover
+ self._log_error_once(f'{e} {sid}', 'bad-sid')
+ r = self._bad_request(f'{e} {sid}')
+ else:
+ if self.transport(sid) != transport and \
+ transport != upgrade_header:
+ self._log_error_once(
+ f'Invalid transport for session {sid}',
+ 'bad-transport')
+ r = self._bad_request('Invalid transport')
+ else:
+ try:
+ packets = socket.handle_get_request(
+ environ, start_response)
+ if isinstance(packets, list):
+ r = self._ok(packets,
+ jsonp_index=jsonp_index)
+ else:
+ r = packets
+ except exceptions.EngineIOError:
+ if sid in self.sockets: # pragma: no cover
+ self.disconnect(sid)
+ r = self._bad_request()
+ if sid in self.sockets and \
+ self.sockets[sid].closed:
+ del self.sockets[sid]
+ elif method == 'POST':
+ if sid is None or sid not in self.sockets:
+ self._log_error_once(f'Invalid session {sid}', 'bad-sid')
+ r = self._bad_request(f'Invalid session {sid}')
+ else:
+ socket = self._get_socket(sid)
+ try:
+ socket.handle_post_request(environ)
+ r = self._ok(jsonp_index=jsonp_index)
+ except exceptions.EngineIOError:
+ if sid in self.sockets: # pragma: no cover
+ self.disconnect(sid)
+ r = self._bad_request()
+ except: # pragma: no cover
+ # for any other unexpected errors, we log the error
+ # and keep going
+ self.logger.exception('post request handler error')
+ r = self._ok(jsonp_index=jsonp_index)
+ elif method == 'OPTIONS':
+ r = self._ok()
+ else:
+ self.logger.warning('Method %s not supported', method)
+ r = self._method_not_found()
+
+ if not isinstance(r, dict):
+ return r
+ if self.http_compression and \
+ len(r['response']) >= self.compression_threshold:
+ encodings = [e.split(';')[0].strip() for e in
+ environ.get('HTTP_ACCEPT_ENCODING', '').split(',')]
+ for encoding in encodings:
+ if encoding in self.compression_methods:
+ r['response'] = \
+ getattr(self, '_' + encoding)(r['response'])
+ r['headers'] += [('Content-Encoding', encoding)]
+ break
+ cors_headers = self._cors_headers(environ)
+ start_response(r['status'], r['headers'] + cors_headers)
+ return [r['response']]
+
+ def shutdown(self):
+ """Stop Socket.IO background tasks.
+
+ This method stops background activity initiated by the Socket.IO
+ server. It must be called before shutting down the web server.
+ """
+ self.logger.info('Socket.IO is shutting down')
+ if self.service_task_event: # pragma: no cover
+ self.service_task_event.set()
+ self.service_task_handle.join()
+ self.service_task_handle = None
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ This function returns an object that represents the background task,
+ on which the ``join()`` methond can be invoked to wait for the task to
+ complete.
+ """
+ th = self._async['thread'](target=target, args=args, kwargs=kwargs)
+ th.start()
+ return th # pragma: no cover
+
+ def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self._async['sleep'](seconds)
+
+ def _handle_connect(self, environ, start_response, transport,
+ jsonp_index=None):
+ """Handle a client connection request."""
+ if self.start_service_task:
+ # start the service task to monitor connected clients
+ self.start_service_task = False
+ self.service_task_handle = self.start_background_task(
+ self._service_task)
+
+ sid = self.generate_id()
+ s = socket.Socket(self, sid)
+ self.sockets[sid] = s
+
+ pkt = packet.Packet(packet.OPEN, {
+ 'sid': sid,
+ 'upgrades': self._upgrades(sid, transport),
+ 'pingTimeout': int(self.ping_timeout * 1000),
+ 'pingInterval': int(
+ self.ping_interval + self.ping_interval_grace_period) * 1000,
+ 'maxPayload': self.max_http_buffer_size,
+ })
+ s.send(pkt)
+ s.schedule_ping()
+
+ # NOTE: some sections below are marked as "no cover" to workaround
+ # what seems to be a bug in the coverage package. All the lines below
+ # are covered by tests, but some are not reported as such for some
+ # reason
+ ret = self._trigger_event('connect', sid, environ, run_async=False)
+ if ret is not None and ret is not True: # pragma: no cover
+ del self.sockets[sid]
+ self.logger.warning('Application rejected connection')
+ return self._unauthorized(ret or None)
+
+ if transport == 'websocket': # pragma: no cover
+ ret = s.handle_get_request(environ, start_response)
+ if s.closed and sid in self.sockets:
+ # websocket connection ended, so we are done
+ del self.sockets[sid]
+ return ret
+ else: # pragma: no cover
+ s.connected = True
+ headers = None
+ if self.cookie:
+ if isinstance(self.cookie, dict):
+ headers = [(
+ 'Set-Cookie',
+ self._generate_sid_cookie(sid, self.cookie)
+ )]
+ else:
+ headers = [(
+ 'Set-Cookie',
+ self._generate_sid_cookie(sid, {
+ 'name': self.cookie, 'path': '/', 'SameSite': 'Lax'
+ })
+ )]
+ try:
+ return self._ok(s.poll(), headers=headers,
+ jsonp_index=jsonp_index)
+ except exceptions.QueueEmpty:
+ return self._bad_request()
+
+ def _trigger_event(self, event, *args, **kwargs):
+ """Invoke an event handler."""
+ run_async = kwargs.pop('run_async', False)
+ if event in self.handlers:
+ def run_handler():
+ try:
+ try:
+ return self.handlers[event](*args)
+ except TypeError:
+ if event == 'disconnect' and \
+ len(args) == 2: # pragma: no branch
+ # legacy disconnect events do not have a reason
+ # argument
+ return self.handlers[event](args[0])
+ else: # pragma: no cover
+ raise
+ except:
+ self.logger.exception(event + ' handler error')
+ if event == 'connect':
+ # if connect handler raised error we reject the
+ # connection
+ return False
+
+ if run_async:
+ return self.start_background_task(run_handler)
+ else:
+ return run_handler()
+
+ def _service_task(self): # pragma: no cover
+ """Monitor connected clients and clean up those that time out."""
+ self.service_task_event = self.create_event()
+ while not self.service_task_event.is_set():
+ if len(self.sockets) == 0:
+ # nothing to do
+ if self.service_task_event.wait(timeout=self.ping_timeout):
+ break
+ continue
+
+ # go through the entire client list in a ping interval cycle
+ sleep_interval = float(self.ping_timeout) / len(self.sockets)
+
+ try:
+ # iterate over the current clients
+ for s in self.sockets.copy().values():
+ if s.closed:
+ try:
+ del self.sockets[s.sid]
+ except KeyError:
+ # the socket could have also been removed by
+ # the _get_socket() method from another thread
+ pass
+ elif not s.closing:
+ s.check_ping_timeout()
+ if self.service_task_event.wait(timeout=sleep_interval):
+ raise KeyboardInterrupt()
+ except (SystemExit, KeyboardInterrupt):
+ self.logger.info('service task canceled')
+ break
+ except:
+ # an unexpected exception has occurred, log it and continue
+ self.logger.exception('service task exception')
diff --git a/tapdown/lib/python3.11/site-packages/engineio/socket.py b/tapdown/lib/python3.11/site-packages/engineio/socket.py
new file mode 100644
index 0000000..26bb94b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/socket.py
@@ -0,0 +1,256 @@
+import sys
+import time
+
+from . import base_socket
+from . import exceptions
+from . import packet
+from . import payload
+
+
+class Socket(base_socket.BaseSocket):
+ """An Engine.IO socket."""
+ def poll(self):
+ """Wait for packets to send to the client."""
+ queue_empty = self.server.get_queue_empty_exception()
+ try:
+ packets = [self.queue.get(
+ timeout=self.server.ping_interval + self.server.ping_timeout)]
+ self.queue.task_done()
+ except queue_empty:
+ raise exceptions.QueueEmpty()
+ if packets == [None]:
+ return []
+ while True:
+ try:
+ pkt = self.queue.get(block=False)
+ self.queue.task_done()
+ if pkt is None:
+ self.queue.put(None)
+ break
+ packets.append(pkt)
+ except queue_empty:
+ break
+ return packets
+
+ def receive(self, pkt):
+ """Receive packet from the client."""
+ packet_name = packet.packet_names[pkt.packet_type] \
+ if pkt.packet_type < len(packet.packet_names) else 'UNKNOWN'
+ self.server.logger.info('%s: Received packet %s data %s',
+ self.sid, packet_name,
+ pkt.data if not isinstance(pkt.data, bytes)
+ else '')
+ if pkt.packet_type == packet.PONG:
+ self.schedule_ping()
+ elif pkt.packet_type == packet.MESSAGE:
+ self.server._trigger_event('message', self.sid, pkt.data,
+ run_async=self.server.async_handlers)
+ elif pkt.packet_type == packet.UPGRADE:
+ self.send(packet.Packet(packet.NOOP))
+ elif pkt.packet_type == packet.CLOSE:
+ self.close(wait=False, abort=True,
+ reason=self.server.reason.CLIENT_DISCONNECT)
+ else:
+ raise exceptions.UnknownPacketError()
+
+ def check_ping_timeout(self):
+ """Make sure the client is still responding to pings."""
+ if self.closed:
+ raise exceptions.SocketIsClosedError()
+ if self.last_ping and \
+ time.time() - self.last_ping > self.server.ping_timeout:
+ self.server.logger.info('%s: Client is gone, closing socket',
+ self.sid)
+ # Passing abort=False here will cause close() to write a
+ # CLOSE packet. This has the effect of updating half-open sockets
+ # to their correct state of disconnected
+ self.close(wait=False, abort=False,
+ reason=self.server.reason.PING_TIMEOUT)
+ return False
+ return True
+
+ def send(self, pkt):
+ """Send a packet to the client."""
+ if not self.check_ping_timeout():
+ return
+ else:
+ self.queue.put(pkt)
+ self.server.logger.info('%s: Sending packet %s data %s',
+ self.sid, packet.packet_names[pkt.packet_type],
+ pkt.data if not isinstance(pkt.data, bytes)
+ else '')
+
+ def handle_get_request(self, environ, start_response):
+ """Handle a long-polling GET request from the client."""
+ connections = [
+ s.strip()
+ for s in environ.get('HTTP_CONNECTION', '').lower().split(',')]
+ transport = environ.get('HTTP_UPGRADE', '').lower()
+ if 'upgrade' in connections and transport in self.upgrade_protocols:
+ self.server.logger.info('%s: Received request to upgrade to %s',
+ self.sid, transport)
+ return getattr(self, '_upgrade_' + transport)(environ,
+ start_response)
+ if self.upgrading or self.upgraded:
+ # we are upgrading to WebSocket, do not return any more packets
+ # through the polling endpoint
+ return [packet.Packet(packet.NOOP)]
+ try:
+ packets = self.poll()
+ except exceptions.QueueEmpty:
+ exc = sys.exc_info()
+ self.close(wait=False, reason=self.server.reason.TRANSPORT_ERROR)
+ raise exc[1].with_traceback(exc[2])
+ return packets
+
+ def handle_post_request(self, environ):
+ """Handle a long-polling POST request from the client."""
+ length = int(environ.get('CONTENT_LENGTH', '0'))
+ if length > self.server.max_http_buffer_size:
+ raise exceptions.ContentTooLongError()
+ else:
+ body = environ['wsgi.input'].read(length).decode('utf-8')
+ p = payload.Payload(encoded_payload=body)
+ for pkt in p.packets:
+ self.receive(pkt)
+
+ def close(self, wait=True, abort=False, reason=None):
+ """Close the socket connection."""
+ if not self.closed and not self.closing:
+ self.closing = True
+ self.server._trigger_event(
+ 'disconnect', self.sid,
+ reason or self.server.reason.SERVER_DISCONNECT,
+ run_async=False)
+ if not abort:
+ self.send(packet.Packet(packet.CLOSE))
+ self.closed = True
+ self.queue.put(None)
+ if wait:
+ self.queue.join()
+
+ def schedule_ping(self):
+ self.server.start_background_task(self._send_ping)
+
+ def _send_ping(self):
+ self.last_ping = None
+ self.server.sleep(self.server.ping_interval)
+ if not self.closing and not self.closed:
+ self.last_ping = time.time()
+ self.send(packet.Packet(packet.PING))
+
+ def _upgrade_websocket(self, environ, start_response):
+ """Upgrade the connection from polling to websocket."""
+ if self.upgraded:
+ raise OSError('Socket has been upgraded already')
+ if self.server._async['websocket'] is None:
+ # the selected async mode does not support websocket
+ return self.server._bad_request()
+ ws = self.server._async['websocket'](
+ self._websocket_handler, self.server)
+ return ws(environ, start_response)
+
+ def _websocket_handler(self, ws):
+ """Engine.IO handler for websocket transport."""
+ def websocket_wait():
+ data = ws.wait()
+ if data and len(data) > self.server.max_http_buffer_size:
+ raise ValueError('packet is too large')
+ return data
+
+ # try to set a socket timeout matching the configured ping interval
+ # and timeout
+ for attr in ['_sock', 'socket']: # pragma: no cover
+ if hasattr(ws, attr) and hasattr(getattr(ws, attr), 'settimeout'):
+ getattr(ws, attr).settimeout(
+ self.server.ping_interval + self.server.ping_timeout)
+
+ if self.connected:
+ # the socket was already connected, so this is an upgrade
+ self.upgrading = True # hold packet sends during the upgrade
+
+ pkt = websocket_wait()
+ decoded_pkt = packet.Packet(encoded_packet=pkt)
+ if decoded_pkt.packet_type != packet.PING or \
+ decoded_pkt.data != 'probe':
+ self.server.logger.info(
+ '%s: Failed websocket upgrade, no PING packet', self.sid)
+ self.upgrading = False
+ return []
+ ws.send(packet.Packet(packet.PONG, data='probe').encode())
+ self.queue.put(packet.Packet(packet.NOOP)) # end poll
+
+ pkt = websocket_wait()
+ decoded_pkt = packet.Packet(encoded_packet=pkt)
+ if decoded_pkt.packet_type != packet.UPGRADE:
+ self.upgraded = False
+ self.server.logger.info(
+ ('%s: Failed websocket upgrade, expected UPGRADE packet, '
+ 'received %s instead.'),
+ self.sid, pkt)
+ self.upgrading = False
+ return []
+ self.upgraded = True
+ self.upgrading = False
+ else:
+ self.connected = True
+ self.upgraded = True
+
+ # start separate writer thread
+ def writer():
+ while True:
+ packets = None
+ try:
+ packets = self.poll()
+ except exceptions.QueueEmpty:
+ break
+ if not packets:
+ # empty packet list returned -> connection closed
+ break
+ try:
+ for pkt in packets:
+ ws.send(pkt.encode())
+ except:
+ break
+ ws.close()
+
+ writer_task = self.server.start_background_task(writer)
+
+ self.server.logger.info(
+ '%s: Upgrade to websocket successful', self.sid)
+
+ while True:
+ p = None
+ try:
+ p = websocket_wait()
+ except Exception as e:
+ # if the socket is already closed, we can assume this is a
+ # downstream error of that
+ if not self.closed: # pragma: no cover
+ self.server.logger.info(
+ '%s: Unexpected error "%s", closing connection',
+ self.sid, str(e))
+ break
+ if p is None:
+ # connection closed by client
+ break
+ pkt = packet.Packet(encoded_packet=p)
+ try:
+ self.receive(pkt)
+ except exceptions.UnknownPacketError: # pragma: no cover
+ pass
+ except exceptions.SocketIsClosedError: # pragma: no cover
+ self.server.logger.info('Receive error -- socket is closed')
+ break
+ except: # pragma: no cover
+ # if we get an unexpected exception we log the error and exit
+ # the connection properly
+ self.server.logger.exception('Unknown receive error')
+ break
+
+ self.queue.put(None) # unlock the writer task so that it can exit
+ writer_task.join()
+ self.close(wait=False, abort=True,
+ reason=self.server.reason.TRANSPORT_CLOSE)
+
+ return []
diff --git a/tapdown/lib/python3.11/site-packages/engineio/static_files.py b/tapdown/lib/python3.11/site-packages/engineio/static_files.py
new file mode 100644
index 0000000..77c8915
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/engineio/static_files.py
@@ -0,0 +1,60 @@
+content_types = {
+ 'css': 'text/css',
+ 'gif': 'image/gif',
+ 'html': 'text/html',
+ 'jpg': 'image/jpeg',
+ 'js': 'application/javascript',
+ 'json': 'application/json',
+ 'png': 'image/png',
+ 'txt': 'text/plain',
+}
+
+
+def get_static_file(path, static_files):
+ """Return the local filename and content type for the requested static
+ file URL.
+
+ :param path: the path portion of the requested URL.
+ :param static_files: a static file configuration dictionary.
+
+ This function returns a dictionary with two keys, "filename" and
+ "content_type". If the requested URL does not match any static file, the
+ return value is None.
+ """
+ extra_path = ''
+ if path in static_files:
+ f = static_files[path]
+ else:
+ f = None
+ while path != '':
+ path, last = path.rsplit('/', 1)
+ extra_path = '/' + last + extra_path
+ if path in static_files:
+ f = static_files[path]
+ break
+ elif path + '/' in static_files:
+ f = static_files[path + '/']
+ break
+ if f:
+ if isinstance(f, str):
+ f = {'filename': f}
+ else:
+ f = f.copy() # in case it is mutated below
+ if f['filename'].endswith('/') and extra_path.startswith('/'):
+ extra_path = extra_path[1:]
+ f['filename'] += extra_path
+ if f['filename'].endswith('/'):
+ if '' in static_files:
+ if isinstance(static_files[''], str):
+ f['filename'] += static_files['']
+ else:
+ f['filename'] += static_files['']['filename']
+ if 'content_type' in static_files['']:
+ f['content_type'] = static_files['']['content_type']
+ else:
+ f['filename'] += 'index.html'
+ if 'content_type' not in f:
+ ext = f['filename'].rsplit('.')[-1]
+ f['content_type'] = content_types.get(
+ ext, 'application/octet-stream')
+ return f
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/INSTALLER b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/METADATA b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/METADATA
new file mode 100644
index 0000000..25ce6f6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/METADATA
@@ -0,0 +1,129 @@
+Metadata-Version: 2.4
+Name: eventlet
+Version: 0.40.3
+Summary: Highly concurrent networking library
+Project-URL: Homepage, https://github.com/eventlet/eventlet
+Project-URL: History, https://github.com/eventlet/eventlet/blob/master/NEWS
+Project-URL: Tracker, https://github.com/eventlet/eventlet/issues
+Project-URL: Source, https://github.com/eventlet/eventlet
+Project-URL: Documentation, https://eventlet.readthedocs.io/
+Author-email: Sergey Shepelev , Jakub Stasiak , Tim Burke , Nat Goodspeed , Itamar Turner-Trauring , Hervé Beraud
+License: MIT
+License-File: AUTHORS
+License-File: LICENSE
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Topic :: Internet
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.9
+Requires-Dist: dnspython>=1.15.0
+Requires-Dist: greenlet>=1.0
+Provides-Extra: dev
+Requires-Dist: black; extra == 'dev'
+Requires-Dist: build; extra == 'dev'
+Requires-Dist: commitizen; extra == 'dev'
+Requires-Dist: isort; extra == 'dev'
+Requires-Dist: pip-tools; extra == 'dev'
+Requires-Dist: pre-commit; extra == 'dev'
+Requires-Dist: twine; extra == 'dev'
+Description-Content-Type: text/x-rst
+
+Warning
+=======
+
+**New usages of eventlet are now heavily discouraged! Please read the
+following.**
+
+Eventlet was created almost 18 years ago, at a time where async
+features were absent from the CPython stdlib. With time eventlet evolved and
+CPython too, but since several years the maintenance activity of eventlet
+decreased leading to a growing gap between eventlet and the CPython
+implementation.
+
+This gap is now too high and can lead you to unexpected side effects and bugs
+in your applications.
+
+Eventlet now follows a new maintenance policy. **Only maintenance for
+stability and bug fixing** will be provided. **No new features will be
+accepted**, except those related to the asyncio migration. **Usages in new
+projects are discouraged**. **Our goal is to plan the retirement of eventlet**
+and to give you ways to move away from eventlet.
+
+If you are looking for a library to manage async network programming,
+and if you do not yet use eventlet, then, we encourage you to use `asyncio`_,
+which is the official async library of the CPython stdlib.
+
+If you already use eventlet, we hope to enable migration to asyncio for some use
+cases; see `Migrating off of Eventlet`_. Only new features related to the migration
+solution will be accepted.
+
+If you have questions concerning maintenance goals or concerning
+the migration do not hesitate to `open a new issue`_, we will be happy to
+answer them.
+
+.. _asyncio: https://docs.python.org/3/library/asyncio.html
+.. _open a new issue: https://github.com/eventlet/eventlet/issues/new
+.. _Migrating off of Eventlet: https://eventlet.readthedocs.io/en/latest/asyncio/migration.html#migration-guide
+
+Eventlet
+========
+
+.. image:: https://img.shields.io/pypi/v/eventlet
+ :target: https://pypi.org/project/eventlet/
+
+.. image:: https://img.shields.io/github/actions/workflow/status/eventlet/eventlet/test.yaml?branch=master
+ :target: https://github.com/eventlet/eventlet/actions?query=workflow%3Atest+branch%3Amaster
+
+.. image:: https://codecov.io/gh/eventlet/eventlet/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/eventlet/eventlet
+
+
+Eventlet is a concurrent networking library for Python that allows you to change how you run your code, not how you write it.
+
+It uses epoll or libevent for highly scalable non-blocking I/O. Coroutines ensure that the developer uses a blocking style of programming that is similar to threading, but provide the benefits of non-blocking I/O. The event dispatch is implicit, which means you can easily use Eventlet from the Python interpreter, or as a small part of a larger application.
+
+It's easy to get started using Eventlet, and easy to convert existing
+applications to use it. Start off by looking at the `examples`_,
+`common design patterns`_, and the list of `basic API primitives`_.
+
+.. _examples: https://eventlet.readthedocs.io/en/latest/examples.html
+.. _common design patterns: https://eventlet.readthedocs.io/en/latest/design_patterns.html
+.. _basic API primitives: https://eventlet.readthedocs.io/en/latest/basic_usage.html
+
+
+Getting Eventlet
+================
+
+The easiest way to get Eventlet is to use pip::
+
+ pip install -U eventlet
+
+To install latest development version once::
+
+ pip install -U https://github.com/eventlet/eventlet/archive/master.zip
+
+
+Building the Docs Locally
+=========================
+
+To build a complete set of HTML documentation::
+
+ tox -e docs
+
+The built html files can be found in doc/build/html afterward.
+
+Supported Python versions
+=========================
+
+Python 3.8-3.13 are currently supported.
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/RECORD b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/RECORD
new file mode 100644
index 0000000..edeadb0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/RECORD
@@ -0,0 +1,199 @@
+eventlet-0.40.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+eventlet-0.40.3.dist-info/METADATA,sha256=z8Yz4D_aLs7c0vFY7lMiBNWjRZ6QAhG6Q7vdOJHUa0c,5404
+eventlet-0.40.3.dist-info/RECORD,,
+eventlet-0.40.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+eventlet-0.40.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
+eventlet-0.40.3.dist-info/licenses/AUTHORS,sha256=v3feCO6nQpkhl0T4SMRigKJJk8w4LEOmWY71Je9gvhg,6267
+eventlet-0.40.3.dist-info/licenses/LICENSE,sha256=vOygSX96gUdRFr_0E4cz-yAGC2sitnHmV7YVioYGVuI,1254
+eventlet/__init__.py,sha256=MxZDsg2iH6ceyMSGifwXnLT9QHhhbHJi8Tr2ukxcPMc,2668
+eventlet/__pycache__/__init__.cpython-311.pyc,,
+eventlet/__pycache__/_version.cpython-311.pyc,,
+eventlet/__pycache__/asyncio.cpython-311.pyc,,
+eventlet/__pycache__/backdoor.cpython-311.pyc,,
+eventlet/__pycache__/convenience.cpython-311.pyc,,
+eventlet/__pycache__/corolocal.cpython-311.pyc,,
+eventlet/__pycache__/coros.cpython-311.pyc,,
+eventlet/__pycache__/dagpool.cpython-311.pyc,,
+eventlet/__pycache__/db_pool.cpython-311.pyc,,
+eventlet/__pycache__/debug.cpython-311.pyc,,
+eventlet/__pycache__/event.cpython-311.pyc,,
+eventlet/__pycache__/greenpool.cpython-311.pyc,,
+eventlet/__pycache__/greenthread.cpython-311.pyc,,
+eventlet/__pycache__/lock.cpython-311.pyc,,
+eventlet/__pycache__/patcher.cpython-311.pyc,,
+eventlet/__pycache__/pools.cpython-311.pyc,,
+eventlet/__pycache__/queue.cpython-311.pyc,,
+eventlet/__pycache__/semaphore.cpython-311.pyc,,
+eventlet/__pycache__/timeout.cpython-311.pyc,,
+eventlet/__pycache__/tpool.cpython-311.pyc,,
+eventlet/__pycache__/websocket.cpython-311.pyc,,
+eventlet/__pycache__/wsgi.cpython-311.pyc,,
+eventlet/_version.py,sha256=w48bRxDhf2PRIf2hVYQ83HF60QXVWaxag-pofg_I6WE,706
+eventlet/asyncio.py,sha256=X-eMizlIBJ7z1nQqkZVPQynBgBiYmeIQxqnShe-P4v0,1723
+eventlet/backdoor.py,sha256=Rl0YQMNGRh6Htn5RlcrvgNDyGZ_X8B4rRsqkne0kOFA,4043
+eventlet/convenience.py,sha256=dF_ntllWDM09s-y2hoo987ijEVUK80AEqkto-3FN5aY,7158
+eventlet/corolocal.py,sha256=FbStAfAkBixRiFJaJb8On3RbaXEVx0f25BsFL9AyKTg,1733
+eventlet/coros.py,sha256=0wub8j1GlVX19driNRwzsDeBhINWXHqOBKb0PEqVJ2s,2030
+eventlet/dagpool.py,sha256=SHtsmYkvvo1hVcEejfJYVVQ7mS8lSnR5opAHBwOCX_U,26180
+eventlet/db_pool.py,sha256=fucoCrf2cqGc-uL5IYrQJYAznj61DDWatmY2OMNCMbY,15514
+eventlet/debug.py,sha256=ZKY0yy2GQF6eFVcaXo0bWog1TJ_UcomCgoEjzO3dy-c,8393
+eventlet/event.py,sha256=SmfhkdHozkG2TkKrob-r3lPfSYKKgnmYtRMJxjXW35M,7496
+eventlet/green/BaseHTTPServer.py,sha256=kAwWSvHTKqm-Y-5dtGAVXY84kMFSfeBcT7ucwKx8MXg,302
+eventlet/green/CGIHTTPServer.py,sha256=g6IUEF1p4q7kpAaKVhsqo0L1f8acl_X-_gX0ynP4Y50,466
+eventlet/green/MySQLdb.py,sha256=sTanY41h3vqnh6tum-wYucOgkFqHJBIthtsOjA_qbLw,1196
+eventlet/green/OpenSSL/SSL.py,sha256=1hFS2eB30LGZDgbLTrCMH7htDbRreBVLtXgNmiJ50tk,4534
+eventlet/green/OpenSSL/__init__.py,sha256=h3kX23byJXMSl1rEhBf1oPo5D9LLqmXjWngXmaHpON0,246
+eventlet/green/OpenSSL/__pycache__/SSL.cpython-311.pyc,,
+eventlet/green/OpenSSL/__pycache__/__init__.cpython-311.pyc,,
+eventlet/green/OpenSSL/__pycache__/crypto.cpython-311.pyc,,
+eventlet/green/OpenSSL/__pycache__/tsafe.cpython-311.pyc,,
+eventlet/green/OpenSSL/__pycache__/version.cpython-311.pyc,,
+eventlet/green/OpenSSL/crypto.py,sha256=dcnjSGP6K274eAxalZEOttUZ1djAStBnbRH-wGBSJu4,29
+eventlet/green/OpenSSL/tsafe.py,sha256=DuY1rHdT2R0tiJkD13ECj-IU7_v-zQKjhTsK6CG8UEM,28
+eventlet/green/OpenSSL/version.py,sha256=3Ti2k01zP3lM6r0YuLbLS_QReJBEHaTJt5k0dNdXtI4,49
+eventlet/green/Queue.py,sha256=CsIn5cEJtbge-kTLw2xSFzjNkq5udUY1vyVrf5AS9WM,789
+eventlet/green/SimpleHTTPServer.py,sha256=O8A3gRYO48q3jVxIslyyaLYgjvTJqiHtGAJZPydEZRs,232
+eventlet/green/SocketServer.py,sha256=w1Ge_Zhp-Dm2hG2t06GscLgd7gXZyCg55e45kba28yY,323
+eventlet/green/__init__.py,sha256=upnrKC57DQQBDNvpxXf_IhDapQ6NtEt2hgxIs1pZDao,84
+eventlet/green/__pycache__/BaseHTTPServer.cpython-311.pyc,,
+eventlet/green/__pycache__/CGIHTTPServer.cpython-311.pyc,,
+eventlet/green/__pycache__/MySQLdb.cpython-311.pyc,,
+eventlet/green/__pycache__/Queue.cpython-311.pyc,,
+eventlet/green/__pycache__/SimpleHTTPServer.cpython-311.pyc,,
+eventlet/green/__pycache__/SocketServer.cpython-311.pyc,,
+eventlet/green/__pycache__/__init__.cpython-311.pyc,,
+eventlet/green/__pycache__/_socket_nodns.cpython-311.pyc,,
+eventlet/green/__pycache__/asynchat.cpython-311.pyc,,
+eventlet/green/__pycache__/asyncore.cpython-311.pyc,,
+eventlet/green/__pycache__/builtin.cpython-311.pyc,,
+eventlet/green/__pycache__/ftplib.cpython-311.pyc,,
+eventlet/green/__pycache__/httplib.cpython-311.pyc,,
+eventlet/green/__pycache__/os.cpython-311.pyc,,
+eventlet/green/__pycache__/profile.cpython-311.pyc,,
+eventlet/green/__pycache__/select.cpython-311.pyc,,
+eventlet/green/__pycache__/selectors.cpython-311.pyc,,
+eventlet/green/__pycache__/socket.cpython-311.pyc,,
+eventlet/green/__pycache__/ssl.cpython-311.pyc,,
+eventlet/green/__pycache__/subprocess.cpython-311.pyc,,
+eventlet/green/__pycache__/thread.cpython-311.pyc,,
+eventlet/green/__pycache__/threading.cpython-311.pyc,,
+eventlet/green/__pycache__/time.cpython-311.pyc,,
+eventlet/green/__pycache__/urllib2.cpython-311.pyc,,
+eventlet/green/__pycache__/zmq.cpython-311.pyc,,
+eventlet/green/_socket_nodns.py,sha256=Oc-5EYs3AST-0HH4Hpi24t2tLp_CrzRX3jDFHN_rPH4,795
+eventlet/green/asynchat.py,sha256=IxG7yS4UNv2z8xkbtlnyGrAGpaXIjYGpyxtXjmcgWrI,291
+eventlet/green/asyncore.py,sha256=aKGWNcWSKUJhWS5fC5i9SrcIWyPuHQxaQKks8yw_m50,345
+eventlet/green/builtin.py,sha256=eLrJZgTDwhIFN-Sor8jWjm-D-OLqQ69GDqvjIZHK9As,1013
+eventlet/green/ftplib.py,sha256=d23VMcAPqw7ZILheDJmueM8qOlWHnq0WFjjSgWouRdA,307
+eventlet/green/http/__init__.py,sha256=X0DA5WqAuctSblh2tBviwW5ob1vnVcW6uiT9INsH_1o,8738
+eventlet/green/http/__pycache__/__init__.cpython-311.pyc,,
+eventlet/green/http/__pycache__/client.cpython-311.pyc,,
+eventlet/green/http/__pycache__/cookiejar.cpython-311.pyc,,
+eventlet/green/http/__pycache__/cookies.cpython-311.pyc,,
+eventlet/green/http/__pycache__/server.cpython-311.pyc,,
+eventlet/green/http/client.py,sha256=9aa0jGR4KUd6B-sUrtOKEDQ4tYM8Xr9YBwxkT68obss,59137
+eventlet/green/http/cookiejar.py,sha256=3fB9nFaHOriwgAhASKotuoksOxbKnfGo3N69wiQYzjo,79435
+eventlet/green/http/cookies.py,sha256=2XAyogPiyysieelxS7KjOzXQHAXezQmAiEKesh3L4MQ,24189
+eventlet/green/http/server.py,sha256=jHfdMtiF8_WQHahLCEspBHpm2cCm7wmBKbBRByn7vQs,46596
+eventlet/green/httplib.py,sha256=T9_QVRLiJVBQlVexvnYvf4PXYAZdjclwLzqoX1fbJ38,390
+eventlet/green/os.py,sha256=UAlVogW-ZO2ha5ftCs199RtSz3MV3pgTQB_R_VVTb9Q,3774
+eventlet/green/profile.py,sha256=D7ij2c7MVLqXbjXoZtqTkVFP7bMspmNEr34XYYw8tfM,9514
+eventlet/green/select.py,sha256=wgmGGfUQYg8X8Ov6ayRAikt6v3o-uPL-wPARk-ihqhE,2743
+eventlet/green/selectors.py,sha256=C_aeln-t0FsMG2WosmkIBhGst0KfKglcaJG8U50pxQM,948
+eventlet/green/socket.py,sha256=np5_HqSjA4_y_kYKdSFyHQN0vjzLW_qi_oLFH8bB0T0,1918
+eventlet/green/ssl.py,sha256=BU4mKN5sBnyp6gb7AhCgTYWtl2N9as1ANt9PFFfx94M,19417
+eventlet/green/subprocess.py,sha256=Y7UX-_D-L6LIzM6NNwKyBn1sgcfsOUr8e0Lka26367s,5575
+eventlet/green/thread.py,sha256=QvqpW7sVlCTm4clZoSO4Q_leqLK-sUYkWZ1V7WWmy8U,4964
+eventlet/green/threading.py,sha256=m0XSuVJU-jOcGeJAAqsujznCLVprXr6EbzTlrPv3p6Q,3903
+eventlet/green/time.py,sha256=1W7BKbGrfTI1v2-pDnBvzBn01tbQ8zwyqz458BFrjt0,240
+eventlet/green/urllib/__init__.py,sha256=hjlirvvvuVKMnugnX9PVW6-9zy6E_q85hqvXunAjpqU,164
+eventlet/green/urllib/__pycache__/__init__.cpython-311.pyc,,
+eventlet/green/urllib/__pycache__/error.cpython-311.pyc,,
+eventlet/green/urllib/__pycache__/parse.cpython-311.pyc,,
+eventlet/green/urllib/__pycache__/request.cpython-311.pyc,,
+eventlet/green/urllib/__pycache__/response.cpython-311.pyc,,
+eventlet/green/urllib/error.py,sha256=xlpHJIa8U4QTFolAa3NEy5gEVj_nM3oF2bB-FvdhCQg,157
+eventlet/green/urllib/parse.py,sha256=uJ1R4rbgqlQgINjKm_-oTxveLvCR9anu7U0i7aRS87k,83
+eventlet/green/urllib/request.py,sha256=Z4VR5X776Po-DlOqcA46-T51avbtepo20SMQGkac--M,1611
+eventlet/green/urllib/response.py,sha256=ytsGn0pXE94tlZh75hl9X1cFGagjGNBWm6k_PRXOBmM,86
+eventlet/green/urllib2.py,sha256=Su3dEhDc8VsKK9PqhIXwgFVOOHVI37TTXU_beqzvg44,488
+eventlet/green/zmq.py,sha256=xd88Ao4zuq-a6g8RV6_GLOPgZGC9w6OtQeKJ7AhgY4k,18018
+eventlet/greenio/__init__.py,sha256=d6_QQqaEAPBpE2vNjU-rHWXmZ94emYuwKjclF3XT2gs,88
+eventlet/greenio/__pycache__/__init__.cpython-311.pyc,,
+eventlet/greenio/__pycache__/base.cpython-311.pyc,,
+eventlet/greenio/__pycache__/py3.cpython-311.pyc,,
+eventlet/greenio/base.py,sha256=jPUtjDABa9yMhSkBIHpBHLu3fYOxBHIMXxvBvPJlLGo,17122
+eventlet/greenio/py3.py,sha256=-Gm-n6AYCyKDwDhWm64cZMtthM1pzEXcWa3ZfjD_aiI,6791
+eventlet/greenpool.py,sha256=-Cyi27l0ds8YRXwedUiFsfoyRl8uulHkrek-bukRdL8,9734
+eventlet/greenthread.py,sha256=x7NK66otGsSDYWMRMSFMI6blMUTZlNbRUUdH1k8UtbI,13370
+eventlet/hubs/__init__.py,sha256=i9S4ki1aiTJqLxAkDg16xjWX951Rwk2G8SfoQbzLWEs,6013
+eventlet/hubs/__pycache__/__init__.cpython-311.pyc,,
+eventlet/hubs/__pycache__/asyncio.cpython-311.pyc,,
+eventlet/hubs/__pycache__/epolls.cpython-311.pyc,,
+eventlet/hubs/__pycache__/hub.cpython-311.pyc,,
+eventlet/hubs/__pycache__/kqueue.cpython-311.pyc,,
+eventlet/hubs/__pycache__/poll.cpython-311.pyc,,
+eventlet/hubs/__pycache__/pyevent.cpython-311.pyc,,
+eventlet/hubs/__pycache__/selects.cpython-311.pyc,,
+eventlet/hubs/__pycache__/timer.cpython-311.pyc,,
+eventlet/hubs/asyncio.py,sha256=8PsWA55Pj8U855fYD1N1JBLxfOxvyy2OBkFuUaKYAiA,5961
+eventlet/hubs/epolls.py,sha256=IkY-yX7shRxVO5LQ8Ysv5FiH6g-XW0XKhtyvorrRFlg,1018
+eventlet/hubs/hub.py,sha256=JcfZBQfFuo0dk_PpqKDcIf_9K_Kzzf0vGBxCqOTIy_E,17604
+eventlet/hubs/kqueue.py,sha256=-jOGtjNHcJAeIDfZYzFB8ZZeIfYAf4tssHuK_A9Qt1o,3420
+eventlet/hubs/poll.py,sha256=qn0qQdvmvKMCQRHr6arvyI027TDVRM1G_kjhx5biLrk,3895
+eventlet/hubs/pyevent.py,sha256=PtImWgRlaH9NmglMcAw5BnqYrTnVoy-4VjfRHUSdvyo,156
+eventlet/hubs/selects.py,sha256=13R8ueir1ga8nFapuqnjFEpRbsRcda4V1CpNhUwtKt8,1984
+eventlet/hubs/timer.py,sha256=Uvo5gxjptEyCtTaeb_X7SpaIvATqLb6ehWX_33Y242c,3185
+eventlet/lock.py,sha256=GGrKyItc5a0ANCrB2eS7243g_BiHVAS_ufjy1eWE7Es,1229
+eventlet/patcher.py,sha256=cMuVlnYIOEPuIe_npl7q3P1H-Bfh7iwuvEaJaOr1VB4,26890
+eventlet/pools.py,sha256=3JPSudnQP3M-FD0ihc17zS7NPaQZ4cXwwmf1qDDJKuU,6244
+eventlet/queue.py,sha256=iA9lG-oiMePgYYNnspubTBu4xbaoyaSSWYa_cL5Q7-Q,18394
+eventlet/semaphore.py,sha256=F6aIp2d5uuvYJPTmRAwt9U8sfDIjlT259MtDWKp4SHY,12163
+eventlet/support/__init__.py,sha256=Gkqs5h-VXQZc73NIkBXps45uuFdRLrXvme4DNwY3Y3k,1764
+eventlet/support/__pycache__/__init__.cpython-311.pyc,,
+eventlet/support/__pycache__/greendns.cpython-311.pyc,,
+eventlet/support/__pycache__/greenlets.cpython-311.pyc,,
+eventlet/support/__pycache__/psycopg2_patcher.cpython-311.pyc,,
+eventlet/support/__pycache__/pylib.cpython-311.pyc,,
+eventlet/support/__pycache__/stacklesspypys.cpython-311.pyc,,
+eventlet/support/__pycache__/stacklesss.cpython-311.pyc,,
+eventlet/support/greendns.py,sha256=X1w1INSzAudrdPIVg19MARRmc5o1pkzM4C-gQgWU0Z8,35489
+eventlet/support/greenlets.py,sha256=1mxaAJJlZYSBgoWM1EL9IvbtMHTo61KokzScSby1Qy8,133
+eventlet/support/psycopg2_patcher.py,sha256=Rzm9GYS7PmrNpKAw04lqJV7KPcxLovnaCUI8CXE328A,2272
+eventlet/support/pylib.py,sha256=EvZ1JZEX3wqWtzfga5HeVL-sLLb805_f_ywX2k5BDHo,274
+eventlet/support/stacklesspypys.py,sha256=6BwZcnsCtb1m4wdK6GygoiPvYV03v7P7YlBxPIE6Zns,275
+eventlet/support/stacklesss.py,sha256=hxen8xtqrHS-bMPP3ThiqRCutNeNlQHjzmW-1DzE0JM,1851
+eventlet/timeout.py,sha256=mFW8oEj3wxSFQQhXOejdtOyWYaqFgRK82ccfz5fojQ4,6644
+eventlet/tpool.py,sha256=2EXw7sNqfRo7aBPOUxhOV3bHWgmbIoIQyyb9SGAQLQY,10573
+eventlet/websocket.py,sha256=b_D4u3NQ04XVLSp_rZ-jApFY0THBsG03z8rcDsKTYjk,34535
+eventlet/wsgi.py,sha256=CjQjjSQsfk95NonoQwu2ykezALX5umDUYEmZXkP3hXM,42360
+eventlet/zipkin/README.rst,sha256=xmt_Mmbtl3apFwYzgrWOtaQdM46AdT1MV11N-dwrLsA,3866
+eventlet/zipkin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+eventlet/zipkin/__pycache__/__init__.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/api.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/client.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/greenthread.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/http.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/log.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/patcher.cpython-311.pyc,,
+eventlet/zipkin/__pycache__/wsgi.cpython-311.pyc,,
+eventlet/zipkin/_thrift/README.rst,sha256=5bZ4doepGQlXdemHzPfvcobc5C0Mwa0lxzuAn_Dm3LY,233
+eventlet/zipkin/_thrift/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+eventlet/zipkin/_thrift/__pycache__/__init__.cpython-311.pyc,,
+eventlet/zipkin/_thrift/zipkinCore.thrift,sha256=zbV8L5vQUXNngVbI1eXR2gAgenmWRyPGzf7QEb2_wNU,2121
+eventlet/zipkin/_thrift/zipkinCore/__init__.py,sha256=YFcZTT8Cm-6Y4oTiCaqq0DT1lw2W09WqoEc5_pTAwW0,34
+eventlet/zipkin/_thrift/zipkinCore/__pycache__/__init__.cpython-311.pyc,,
+eventlet/zipkin/_thrift/zipkinCore/__pycache__/constants.cpython-311.pyc,,
+eventlet/zipkin/_thrift/zipkinCore/__pycache__/ttypes.cpython-311.pyc,,
+eventlet/zipkin/_thrift/zipkinCore/constants.py,sha256=cbgWT_mN04BRZbyzjr1LzT40xvotzFyz-vbYp8Q_klo,275
+eventlet/zipkin/_thrift/zipkinCore/ttypes.py,sha256=94RG3YtkmpeMmJ-EvKiwnYUtovYlfjrRVnh6sI27cJ0,13497
+eventlet/zipkin/api.py,sha256=K9RdTr68ifYVQ28IhQZSOTC82E2y7P_cjIw28ykWJg8,5467
+eventlet/zipkin/client.py,sha256=hT6meeP8pM5WDWi-zDt8xXDLwjpfM1vaJ2DRju8MA9I,1691
+eventlet/zipkin/example/ex1.png,sha256=tMloQ9gWouUjGhHWTBzzuPQ308JdUtrVFd2ClXHRIBg,53179
+eventlet/zipkin/example/ex2.png,sha256=AAIYZig2qVz6RVTj8nlIKju0fYT3DfP-F28LLwYIxwI,40482
+eventlet/zipkin/example/ex3.png,sha256=xc4J1WOjKCeAYr4gRSFFggJbHMEk-_C9ukmAKXTEfuk,73175
+eventlet/zipkin/greenthread.py,sha256=ify1VnsJmrFneAwfPl6QE8kgHIPJE5fAE9Ks9wQzeVI,843
+eventlet/zipkin/http.py,sha256=qe_QMKI9GAV7HDZ6z1k_8rgEbICpCsqa80EdjQLG5Uk,666
+eventlet/zipkin/log.py,sha256=jElBHT8H3_vs9T3r8Q-JG30xyajQ7u6wNGWmmMPQ4AA,337
+eventlet/zipkin/patcher.py,sha256=t1g5tXcbuEvNix3ICtZyuIWaJKQtUHJ5ZUqsi14j9Dc,1388
+eventlet/zipkin/wsgi.py,sha256=IT3d_j2DKRTALf5BRr7IPqWbFwfxH0VUIQ_EyItWfp4,2268
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/REQUESTED b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/WHEEL b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/WHEEL
new file mode 100644
index 0000000..12228d4
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.27.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/licenses/AUTHORS b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/licenses/AUTHORS
new file mode 100644
index 0000000..a976907
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/licenses/AUTHORS
@@ -0,0 +1,189 @@
+Maintainer (i.e., Who To Hassle If You Find Bugs)
+-------------------------------------------------
+
+The current maintainer(s) are volunteers with unrelated jobs.
+We can only pay sporadic attention to responding to your issue and pull request submissions.
+Your patience is greatly appreciated!
+
+Active maintainers
+~~~~~~~~~~~~~~~~~~
+
+* Itamar Turner-Trauring https://github.com/itamarst
+* Tim Burke https://github.com/tipabu
+* Hervé Beraud https://github.com/4383
+
+Less active maintainers
+~~~~~~~~~~~~~~~~~~~~~~~
+
+* Sergey Shepelev https://github.com/temoto
+* Jakub Stasiak https://github.com/jstasiak
+* Nat Goodspeed https://github.com/nat-goodspeed
+
+Original Authors
+----------------
+* Bob Ippolito
+* Donovan Preston
+
+Contributors
+------------
+* AG Projects
+* Chris AtLee
+* R\. Tyler Ballance
+* Denis Bilenko
+* Mike Barton
+* Patrick Carlisle
+* Ben Ford
+* Andrew Godwin
+* Brantley Harris
+* Gregory Holt
+* Joe Malicki
+* Chet Murthy
+* Eugene Oden
+* radix
+* Scott Robinson
+* Tavis Rudd
+* Sergey Shepelev
+* Chuck Thier
+* Nick V
+* Daniele Varrazzo
+* Ryan Williams
+* Geoff Salmon
+* Edward George
+* Floris Bruynooghe
+* Paul Oppenheim
+* Jakub Stasiak
+* Aldona Majorek
+* Victor Sergeyev
+* David Szotten
+* Victor Stinner
+* Samuel Merritt
+* Eric Urban
+* Miguel Grinberg
+* Tuomo Kriikkula
+
+Linden Lab Contributors
+-----------------------
+* John Beisley
+* Tess Chu
+* Nat Goodspeed
+* Dave Kaprielian
+* Kartic Krishnamurthy
+* Bryan O'Sullivan
+* Kent Quirk
+* Ryan Williams
+
+Thanks To
+---------
+* AdamKG, giving the hint that invalid argument errors were introduced post-0.9.0
+* Luke Tucker, bug report regarding wsgi + webob
+* Taso Du Val, reproing an exception squelching bug, saving children's lives ;-)
+* Luci Stanescu, for reporting twisted hub bug
+* Marcus Cavanaugh, for test case code that has been incredibly useful in tracking down bugs
+* Brian Brunswick, for many helpful questions and suggestions on the mailing list
+* Cesar Alaniz, for uncovering bugs of great import
+* the grugq, for contributing patches, suggestions, and use cases
+* Ralf Schmitt, for wsgi/webob incompatibility bug report and suggested fix
+* Benoit Chesneau, bug report on green.os and patch to fix it
+* Slant, better iterator implementation in tpool
+* Ambroff, nice pygtk hub example
+* Michael Carter, websocket patch to improve location handling
+* Marcin Bachry, nice repro of a bug and good diagnosis leading to the fix
+* David Ziegler, reporting issue #53
+* Favo Yang, twisted hub patch
+* Schmir, patch that fixes readline method with chunked encoding in wsgi.py, advice on patcher
+* Slide, for open-sourcing gogreen
+* Holger Krekel, websocket example small fix
+* mikepk, debugging MySQLdb/tpool issues
+* Malcolm Cleaton, patch for Event exception handling
+* Alexey Borzenkov, for finding and fixing issues with Windows error detection (#66, #69), reducing dependencies in zeromq hub (#71)
+* Anonymous, finding and fixing error in websocket chat example (#70)
+* Edward George, finding and fixing an issue in the [e]poll hubs (#74), and in convenience (#86)
+* Ruijun Luo, figuring out incorrect openssl import for wrap_ssl (#73)
+* rfk, patch to get green zmq to respect noblock flag.
+* Soren Hansen, finding and fixing issue in subprocess (#77)
+* Stefano Rivera, making tests pass in absence of postgres (#78)
+* Joshua Kwan, fixing busy-wait in eventlet.green.ssl.
+* Nick Vatamaniuc, Windows SO_REUSEADDR patch (#83)
+* Clay Gerrard, wsgi handle socket closed by client (#95)
+* Eric Windisch, zmq getsockopt(EVENTS) wake correct threads (pull request 22)
+* Raymond Lu, fixing busy-wait in eventlet.green.ssl.socket.sendall()
+* Thomas Grainger, webcrawler example small fix, "requests" library import bug report, Travis integration
+* Peter Portante, save syscalls in socket.dup(), environ[REMOTE_PORT] in wsgi
+* Peter Skirko, fixing socket.settimeout(0) bug
+* Derk Tegeler, Pre-cache proxied GreenSocket methods (Bitbucket #136)
+* David Malcolm, optional "timeout" argument to the subprocess module (Bitbucket #89)
+* David Goetz, wsgi: Allow minimum_chunk_size to be overriden on a per request basis
+* Dmitry Orlov, websocket: accept Upgrade: websocket (lowercase)
+* Zhang Hua, profile: accumulate results between runs (Bitbucket #162)
+* Astrum Kuo, python3 compatibility fixes; greenthread.unlink() method
+* Davanum Srinivas, Python3 compatibility fixes
+* Dmitriy Kruglyak, PyPy 2.3 compatibility fix
+* Jan Grant, Michael Kerrin, second simultaneous read (GH-94)
+* Simon Jagoe, Python3 octal literal fix
+* Tushar Gohad, wsgi: Support optional headers w/ "100 Continue" responses
+* raylu, fixing operator precedence bug in eventlet.wsgi
+* Christoph Gysin, PEP 8 conformance
+* Andrey Gubarev
+* Corey Wright
+* Deva
+* Johannes Erdfelt
+* Kevin
+* QthCN
+* Steven Hardy
+* Stuart McLaren
+* Tomaz Muraus
+* ChangBo Guo(gcb), fixing typos in the documentation (GH-194)
+* Marc Abramowitz, fixing the README so it renders correctly on PyPI (GH-183)
+* Shaun Stanworth, equal chance to acquire semaphore from different greenthreads (GH-136)
+* Lior Neudorfer, Make sure SSL retries are done using the exact same data buffer
+* Sean Dague, wsgi: Provide python logging compatibility
+* Tim Simmons, Use _socket_nodns and select in dnspython support
+* Antonio Cuni, fix fd double close on PyPy
+* Seyeong Kim
+* Ihar Hrachyshka
+* Janusz Harkot
+* Fukuchi Daisuke
+* Ramakrishnan G
+* ashutosh-mishra
+* Azhar Hussain
+* Josh VanderLinden
+* Levente Polyak
+* Phus Lu
+* Collin Stocks, fixing eventlet.green.urllib2.urlopen() so it accepts cafile, capath, or cadefault arguments
+* Alexis Lee
+* Steven Erenst
+* Piët Delport
+* Alex Villacís Lasso
+* Yashwardhan Singh
+* Tim Burke
+* Ondřej Nový
+* Jarrod Johnson
+* Whitney Young
+* Matthew D. Pagel
+* Matt Yule-Bennett
+* Artur Stawiarski
+* Tal Wrii
+* Roman Podoliaka
+* Gevorg Davoian
+* Ondřej Kobližek
+* Yuichi Bando
+* Feng
+* Aayush Kasurde
+* Linbing
+* Geoffrey Thomas
+* Costas Christofi, adding permessage-deflate weboscket extension support
+* Peter Kovary, adding permessage-deflate weboscket extension support
+* Konstantin Enchant
+* James Page
+* Stefan Nica
+* Haikel Guemar
+* Miguel Grinberg
+* Chris Kerr
+* Anthony Sottile
+* Quan Tian
+* orishoshan
+* Matt Bennett
+* Ralf Haferkamp
+* Jake Tesler
+* Aayush Kasurde
+* Psycho Mantys, patch for exception handling on ReferenceError
diff --git a/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/licenses/LICENSE b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..2ddd0d9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet-0.40.3.dist-info/licenses/LICENSE
@@ -0,0 +1,23 @@
+Unless otherwise noted, the files in Eventlet are under the following MIT license:
+
+Copyright (c) 2005-2006, Bob Ippolito
+Copyright (c) 2007-2010, Linden Research, Inc.
+Copyright (c) 2008-2010, Eventlet Contributors (see AUTHORS)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/__init__.py
new file mode 100644
index 0000000..01773c5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/__init__.py
@@ -0,0 +1,88 @@
+import os
+import sys
+import warnings
+
+
+from eventlet import convenience
+from eventlet import event
+from eventlet import greenpool
+from eventlet import greenthread
+from eventlet import patcher
+from eventlet import queue
+from eventlet import semaphore
+from eventlet import support
+from eventlet import timeout
+# NOTE(hberaud): Versions are now managed by hatch and control version.
+# hatch has a build hook which generates the version file, however,
+# if the project is installed in editable mode then the _version.py file
+# will not be updated unless the package is reinstalled (or locally rebuilt).
+# For further details, please read:
+# https://github.com/ofek/hatch-vcs#build-hook
+# https://github.com/maresb/hatch-vcs-footgun-example
+try:
+ from eventlet._version import __version__
+except ImportError:
+ __version__ = "0.0.0"
+import greenlet
+
+# Force monotonic library search as early as possible.
+# Helpful when CPython < 3.5 on Linux blocked in `os.waitpid(-1)` before first use of hub.
+# Example: gunicorn
+# https://github.com/eventlet/eventlet/issues/401#issuecomment-327500352
+try:
+ import monotonic
+ del monotonic
+except ImportError:
+ pass
+
+connect = convenience.connect
+listen = convenience.listen
+serve = convenience.serve
+StopServe = convenience.StopServe
+wrap_ssl = convenience.wrap_ssl
+
+Event = event.Event
+
+GreenPool = greenpool.GreenPool
+GreenPile = greenpool.GreenPile
+
+sleep = greenthread.sleep
+spawn = greenthread.spawn
+spawn_n = greenthread.spawn_n
+spawn_after = greenthread.spawn_after
+kill = greenthread.kill
+
+import_patched = patcher.import_patched
+monkey_patch = patcher.monkey_patch
+
+Queue = queue.Queue
+
+Semaphore = semaphore.Semaphore
+CappedSemaphore = semaphore.CappedSemaphore
+BoundedSemaphore = semaphore.BoundedSemaphore
+
+Timeout = timeout.Timeout
+with_timeout = timeout.with_timeout
+wrap_is_timeout = timeout.wrap_is_timeout
+is_timeout = timeout.is_timeout
+
+getcurrent = greenlet.greenlet.getcurrent
+
+# deprecated
+TimeoutError, exc_after, call_after_global = (
+ support.wrap_deprecated(old, new)(fun) for old, new, fun in (
+ ('TimeoutError', 'Timeout', Timeout),
+ ('exc_after', 'greenthread.exc_after', greenthread.exc_after),
+ ('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global),
+ ))
+
+
+if hasattr(os, "register_at_fork"):
+ def _warn_on_fork():
+ import warnings
+ warnings.warn(
+ "Using fork() is a bad idea, and there is no guarantee eventlet will work." +
+ " See https://eventlet.readthedocs.io/en/latest/fork.html for more details.",
+ DeprecationWarning
+ )
+ os.register_at_fork(before=_warn_on_fork)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/_version.py b/tapdown/lib/python3.11/site-packages/eventlet/_version.py
new file mode 100644
index 0000000..204a16a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/_version.py
@@ -0,0 +1,34 @@
+# file generated by setuptools-scm
+# don't change, don't track in version control
+
+__all__ = [
+ "__version__",
+ "__version_tuple__",
+ "version",
+ "version_tuple",
+ "__commit_id__",
+ "commit_id",
+]
+
+TYPE_CHECKING = False
+if TYPE_CHECKING:
+ from typing import Tuple
+ from typing import Union
+
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
+ COMMIT_ID = Union[str, None]
+else:
+ VERSION_TUPLE = object
+ COMMIT_ID = object
+
+version: str
+__version__: str
+__version_tuple__: VERSION_TUPLE
+version_tuple: VERSION_TUPLE
+commit_id: COMMIT_ID
+__commit_id__: COMMIT_ID
+
+__version__ = version = '0.40.3'
+__version_tuple__ = version_tuple = (0, 40, 3)
+
+__commit_id__ = commit_id = None
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/asyncio.py b/tapdown/lib/python3.11/site-packages/eventlet/asyncio.py
new file mode 100644
index 0000000..b9eca92
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/asyncio.py
@@ -0,0 +1,57 @@
+"""
+Asyncio compatibility functions.
+"""
+import asyncio
+
+from greenlet import GreenletExit
+
+from .greenthread import spawn, getcurrent
+from .event import Event
+from .hubs import get_hub
+from .hubs.asyncio import Hub as AsyncioHub
+
+__all__ = ["spawn_for_awaitable"]
+
+
+def spawn_for_awaitable(coroutine):
+ """
+ Take a coroutine or some other object that can be awaited
+ (``asyncio.Future``, ``asyncio.Task``), and turn it into a ``GreenThread``.
+
+ Known limitations:
+
+ * The coroutine/future/etc. don't run in their own
+ greenlet/``GreenThread``.
+ * As a result, things like ``eventlet.Lock``
+ won't work correctly inside ``async`` functions, thread ids aren't
+ meaningful, and so on.
+ """
+ if not isinstance(get_hub(), AsyncioHub):
+ raise RuntimeError(
+ "This API only works with eventlet's asyncio hub. "
+ + "To use it, set an EVENTLET_HUB=asyncio environment variable."
+ )
+
+ def _run():
+ # Convert the coroutine/Future/Task we're wrapping into a Future.
+ future = asyncio.ensure_future(coroutine, loop=asyncio.get_running_loop())
+
+ # Ensure killing the GreenThread cancels the Future:
+ def _got_result(gthread):
+ try:
+ gthread.wait()
+ except GreenletExit:
+ future.cancel()
+
+ getcurrent().link(_got_result)
+
+ # Wait until the Future has a result.
+ has_result = Event()
+ future.add_done_callback(lambda _: has_result.send(True))
+ has_result.wait()
+ # Return the result of the Future (or raise an exception if it had an
+ # exception).
+ return future.result()
+
+ # Start a GreenThread:
+ return spawn(_run)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/backdoor.py b/tapdown/lib/python3.11/site-packages/eventlet/backdoor.py
new file mode 100644
index 0000000..3f3887f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/backdoor.py
@@ -0,0 +1,140 @@
+from code import InteractiveConsole
+import errno
+import socket
+import sys
+
+import eventlet
+from eventlet import hubs
+from eventlet.support import greenlets, get_errno
+
+try:
+ sys.ps1
+except AttributeError:
+ sys.ps1 = '>>> '
+try:
+ sys.ps2
+except AttributeError:
+ sys.ps2 = '... '
+
+
+class FileProxy:
+ def __init__(self, f):
+ self.f = f
+
+ def isatty(self):
+ return True
+
+ def flush(self):
+ pass
+
+ def write(self, data, *a, **kw):
+ try:
+ self.f.write(data, *a, **kw)
+ self.f.flush()
+ except OSError as e:
+ if get_errno(e) != errno.EPIPE:
+ raise
+
+ def readline(self, *a):
+ return self.f.readline(*a).replace('\r\n', '\n')
+
+ def __getattr__(self, attr):
+ return getattr(self.f, attr)
+
+
+# @@tavis: the `locals` args below mask the built-in function. Should
+# be renamed.
+class SocketConsole(greenlets.greenlet):
+ def __init__(self, desc, hostport, locals):
+ self.hostport = hostport
+ self.locals = locals
+ # mangle the socket
+ self.desc = FileProxy(desc)
+ greenlets.greenlet.__init__(self)
+
+ def run(self):
+ try:
+ console = InteractiveConsole(self.locals)
+ console.interact()
+ finally:
+ self.switch_out()
+ self.finalize()
+
+ def switch(self, *args, **kw):
+ self.saved = sys.stdin, sys.stderr, sys.stdout
+ sys.stdin = sys.stdout = sys.stderr = self.desc
+ greenlets.greenlet.switch(self, *args, **kw)
+
+ def switch_out(self):
+ sys.stdin, sys.stderr, sys.stdout = self.saved
+
+ def finalize(self):
+ # restore the state of the socket
+ self.desc = None
+ if len(self.hostport) >= 2:
+ host = self.hostport[0]
+ port = self.hostport[1]
+ print("backdoor closed to %s:%s" % (host, port,))
+ else:
+ print('backdoor closed')
+
+
+def backdoor_server(sock, locals=None):
+ """ Blocking function that runs a backdoor server on the socket *sock*,
+ accepting connections and running backdoor consoles for each client that
+ connects.
+
+ The *locals* argument is a dictionary that will be included in the locals()
+ of the interpreters. It can be convenient to stick important application
+ variables in here.
+ """
+ listening_on = sock.getsockname()
+ if sock.family == socket.AF_INET:
+ # Expand result to IP + port
+ listening_on = '%s:%s' % listening_on
+ elif sock.family == socket.AF_INET6:
+ ip, port, _, _ = listening_on
+ listening_on = '%s:%s' % (ip, port,)
+ # No action needed if sock.family == socket.AF_UNIX
+
+ print("backdoor server listening on %s" % (listening_on,))
+ try:
+ while True:
+ socketpair = None
+ try:
+ socketpair = sock.accept()
+ backdoor(socketpair, locals)
+ except OSError as e:
+ # Broken pipe means it was shutdown
+ if get_errno(e) != errno.EPIPE:
+ raise
+ finally:
+ if socketpair:
+ socketpair[0].close()
+ finally:
+ sock.close()
+
+
+def backdoor(conn_info, locals=None):
+ """Sets up an interactive console on a socket with a single connected
+ client. This does not block the caller, as it spawns a new greenlet to
+ handle the console. This is meant to be called from within an accept loop
+ (such as backdoor_server).
+ """
+ conn, addr = conn_info
+ if conn.family == socket.AF_INET:
+ host, port = addr
+ print("backdoor to %s:%s" % (host, port))
+ elif conn.family == socket.AF_INET6:
+ host, port, _, _ = addr
+ print("backdoor to %s:%s" % (host, port))
+ else:
+ print('backdoor opened')
+ fl = conn.makefile("rw")
+ console = SocketConsole(fl, addr, locals)
+ hub = hubs.get_hub()
+ hub.schedule_call_global(0, console.switch)
+
+
+if __name__ == '__main__':
+ backdoor_server(eventlet.listen(('127.0.0.1', 9000)), {})
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/convenience.py b/tapdown/lib/python3.11/site-packages/eventlet/convenience.py
new file mode 100644
index 0000000..4d286aa
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/convenience.py
@@ -0,0 +1,190 @@
+import sys
+import warnings
+
+from eventlet import greenpool
+from eventlet import greenthread
+from eventlet import support
+from eventlet.green import socket
+from eventlet.support import greenlets as greenlet
+
+
+def connect(addr, family=socket.AF_INET, bind=None):
+ """Convenience function for opening client sockets.
+
+ :param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
+ :param family: Socket family, optional. See :mod:`socket` documentation for available families.
+ :param bind: Local address to bind to, optional.
+ :return: The connected green socket object.
+ """
+ sock = socket.socket(family, socket.SOCK_STREAM)
+ if bind is not None:
+ sock.bind(bind)
+ sock.connect(addr)
+ return sock
+
+
+class ReuseRandomPortWarning(Warning):
+ pass
+
+
+class ReusePortUnavailableWarning(Warning):
+ pass
+
+
+def listen(addr, family=socket.AF_INET, backlog=50, reuse_addr=True, reuse_port=None):
+ """Convenience function for opening server sockets. This
+ socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
+
+ Sets SO_REUSEADDR on the socket to save on annoyance.
+
+ :param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
+ :param family: Socket family, optional. See :mod:`socket` documentation for available families.
+ :param backlog:
+
+ The maximum number of queued connections. Should be at least 1; the maximum
+ value is system-dependent.
+
+ :return: The listening green socket object.
+ """
+ sock = socket.socket(family, socket.SOCK_STREAM)
+ if reuse_addr and sys.platform[:3] != 'win':
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if family in (socket.AF_INET, socket.AF_INET6) and addr[1] == 0:
+ if reuse_port:
+ warnings.warn(
+ '''listen on random port (0) with SO_REUSEPORT is dangerous.
+ Double check your intent.
+ Example problem: https://github.com/eventlet/eventlet/issues/411''',
+ ReuseRandomPortWarning, stacklevel=3)
+ elif reuse_port is None:
+ reuse_port = True
+ if reuse_port and hasattr(socket, 'SO_REUSEPORT'):
+ # NOTE(zhengwei): linux kernel >= 3.9
+ try:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
+ # OSError is enough on Python 3+
+ except OSError as ex:
+ if support.get_errno(ex) in (22, 92):
+ # A famous platform defines unsupported socket option.
+ # https://github.com/eventlet/eventlet/issues/380
+ # https://github.com/eventlet/eventlet/issues/418
+ warnings.warn(
+ '''socket.SO_REUSEPORT is defined but not supported.
+ On Windows: known bug, wontfix.
+ On other systems: please comment in the issue linked below.
+ More information: https://github.com/eventlet/eventlet/issues/380''',
+ ReusePortUnavailableWarning, stacklevel=3)
+
+ sock.bind(addr)
+ sock.listen(backlog)
+ return sock
+
+
+class StopServe(Exception):
+ """Exception class used for quitting :func:`~eventlet.serve` gracefully."""
+ pass
+
+
+def _stop_checker(t, server_gt, conn):
+ try:
+ try:
+ t.wait()
+ finally:
+ conn.close()
+ except greenlet.GreenletExit:
+ pass
+ except Exception:
+ greenthread.kill(server_gt, *sys.exc_info())
+
+
+def serve(sock, handle, concurrency=1000):
+ """Runs a server on the supplied socket. Calls the function *handle* in a
+ separate greenthread for every incoming client connection. *handle* takes
+ two arguments: the client socket object, and the client address::
+
+ def myhandle(client_sock, client_addr):
+ print("client connected", client_addr)
+
+ eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
+
+ Returning from *handle* closes the client socket.
+
+ :func:`serve` blocks the calling greenthread; it won't return until
+ the server completes. If you desire an immediate return,
+ spawn a new greenthread for :func:`serve`.
+
+ Any uncaught exceptions raised in *handle* are raised as exceptions
+ from :func:`serve`, terminating the server, so be sure to be aware of the
+ exceptions your application can raise. The return value of *handle* is
+ ignored.
+
+ Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
+ server -- that's the only way to get the server() function to return rather
+ than raise.
+
+ The value in *concurrency* controls the maximum number of
+ greenthreads that will be open at any time handling requests. When
+ the server hits the concurrency limit, it stops accepting new
+ connections until the existing ones complete.
+ """
+ pool = greenpool.GreenPool(concurrency)
+ server_gt = greenthread.getcurrent()
+
+ while True:
+ try:
+ conn, addr = sock.accept()
+ gt = pool.spawn(handle, conn, addr)
+ gt.link(_stop_checker, server_gt, conn)
+ conn, addr, gt = None, None, None
+ except StopServe:
+ return
+
+
+def wrap_ssl(sock, *a, **kw):
+ """Convenience function for converting a regular socket into an
+ SSL socket. Has the same interface as :func:`ssl.wrap_socket`,
+ but can also use PyOpenSSL. Though, note that it ignores the
+ `cert_reqs`, `ssl_version`, `ca_certs`, `do_handshake_on_connect`,
+ and `suppress_ragged_eofs` arguments when using PyOpenSSL.
+
+ The preferred idiom is to call wrap_ssl directly on the creation
+ method, e.g., ``wrap_ssl(connect(addr))`` or
+ ``wrap_ssl(listen(addr), server_side=True)``. This way there is
+ no "naked" socket sitting around to accidentally corrupt the SSL
+ session.
+
+ :return Green SSL object.
+ """
+ return wrap_ssl_impl(sock, *a, **kw)
+
+
+try:
+ from eventlet.green import ssl
+ wrap_ssl_impl = ssl.wrap_socket
+except ImportError:
+ # trying PyOpenSSL
+ try:
+ from eventlet.green.OpenSSL import SSL
+ except ImportError:
+ def wrap_ssl_impl(*a, **kw):
+ raise ImportError(
+ "To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.7 or later.")
+ else:
+ def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
+ cert_reqs=None, ssl_version=None, ca_certs=None,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True, ciphers=None):
+ # theoretically the ssl_version could be respected in this line
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ if certfile is not None:
+ context.use_certificate_file(certfile)
+ if keyfile is not None:
+ context.use_privatekey_file(keyfile)
+ context.set_verify(SSL.VERIFY_NONE, lambda *x: True)
+
+ connection = SSL.Connection(context, sock)
+ if server_side:
+ connection.set_accept_state()
+ else:
+ connection.set_connect_state()
+ return connection
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/corolocal.py b/tapdown/lib/python3.11/site-packages/eventlet/corolocal.py
new file mode 100644
index 0000000..73b10b6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/corolocal.py
@@ -0,0 +1,53 @@
+import weakref
+
+from eventlet import greenthread
+
+__all__ = ['get_ident', 'local']
+
+
+def get_ident():
+ """ Returns ``id()`` of current greenlet. Useful for debugging."""
+ return id(greenthread.getcurrent())
+
+
+# the entire purpose of this class is to store off the constructor
+# arguments in a local variable without calling __init__ directly
+class _localbase:
+ __slots__ = '_local__args', '_local__greens'
+
+ def __new__(cls, *args, **kw):
+ self = object.__new__(cls)
+ object.__setattr__(self, '_local__args', (args, kw))
+ object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary())
+ if (args or kw) and (cls.__init__ is object.__init__):
+ raise TypeError("Initialization arguments are not supported")
+ return self
+
+
+def _patch(thrl):
+ greens = object.__getattribute__(thrl, '_local__greens')
+ # until we can store the localdict on greenlets themselves,
+ # we store it in _local__greens on the local object
+ cur = greenthread.getcurrent()
+ if cur not in greens:
+ # must be the first time we've seen this greenlet, call __init__
+ greens[cur] = {}
+ cls = type(thrl)
+ if cls.__init__ is not object.__init__:
+ args, kw = object.__getattribute__(thrl, '_local__args')
+ thrl.__init__(*args, **kw)
+ object.__setattr__(thrl, '__dict__', greens[cur])
+
+
+class local(_localbase):
+ def __getattribute__(self, attr):
+ _patch(self)
+ return object.__getattribute__(self, attr)
+
+ def __setattr__(self, attr, value):
+ _patch(self)
+ return object.__setattr__(self, attr, value)
+
+ def __delattr__(self, attr):
+ _patch(self)
+ return object.__delattr__(self, attr)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/coros.py b/tapdown/lib/python3.11/site-packages/eventlet/coros.py
new file mode 100644
index 0000000..fbd7e99
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/coros.py
@@ -0,0 +1,59 @@
+from eventlet import event as _event
+
+
+class metaphore:
+ """This is sort of an inverse semaphore: a counter that starts at 0 and
+ waits only if nonzero. It's used to implement a "wait for all" scenario.
+
+ >>> from eventlet import coros, spawn_n
+ >>> count = coros.metaphore()
+ >>> count.wait()
+ >>> def decrementer(count, id):
+ ... print("{0} decrementing".format(id))
+ ... count.dec()
+ ...
+ >>> _ = spawn_n(decrementer, count, 'A')
+ >>> _ = spawn_n(decrementer, count, 'B')
+ >>> count.inc(2)
+ >>> count.wait()
+ A decrementing
+ B decrementing
+ """
+
+ def __init__(self):
+ self.counter = 0
+ self.event = _event.Event()
+ # send() right away, else we'd wait on the default 0 count!
+ self.event.send()
+
+ def inc(self, by=1):
+ """Increment our counter. If this transitions the counter from zero to
+ nonzero, make any subsequent :meth:`wait` call wait.
+ """
+ assert by > 0
+ self.counter += by
+ if self.counter == by:
+ # If we just incremented self.counter by 'by', and the new count
+ # equals 'by', then the old value of self.counter was 0.
+ # Transitioning from 0 to a nonzero value means wait() must
+ # actually wait.
+ self.event.reset()
+
+ def dec(self, by=1):
+ """Decrement our counter. If this transitions the counter from nonzero
+ to zero, a current or subsequent wait() call need no longer wait.
+ """
+ assert by > 0
+ self.counter -= by
+ if self.counter <= 0:
+ # Don't leave self.counter < 0, that will screw things up in
+ # future calls.
+ self.counter = 0
+ # Transitioning from nonzero to 0 means wait() need no longer wait.
+ self.event.send()
+
+ def wait(self):
+ """Suspend the caller only if our count is nonzero. In that case,
+ resume the caller once the count decrements to zero again.
+ """
+ self.event.wait()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/dagpool.py b/tapdown/lib/python3.11/site-packages/eventlet/dagpool.py
new file mode 100644
index 0000000..47d13a8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/dagpool.py
@@ -0,0 +1,601 @@
+# @file dagpool.py
+# @author Nat Goodspeed
+# @date 2016-08-08
+# @brief Provide DAGPool class
+
+from eventlet.event import Event
+from eventlet import greenthread
+import collections
+
+
+# value distinguished from any other Python value including None
+_MISSING = object()
+
+
+class Collision(Exception):
+ """
+ DAGPool raises Collision when you try to launch two greenthreads with the
+ same key, or post() a result for a key corresponding to a greenthread, or
+ post() twice for the same key. As with KeyError, str(collision) names the
+ key in question.
+ """
+ pass
+
+
+class PropagateError(Exception):
+ """
+ When a DAGPool greenthread terminates with an exception instead of
+ returning a result, attempting to retrieve its value raises
+ PropagateError.
+
+ Attributes:
+
+ key
+ the key of the greenthread which raised the exception
+
+ exc
+ the exception object raised by the greenthread
+ """
+ def __init__(self, key, exc):
+ # initialize base class with a reasonable string message
+ msg = "PropagateError({}): {}: {}" \
+ .format(key, exc.__class__.__name__, exc)
+ super().__init__(msg)
+ self.msg = msg
+ # Unless we set args, this is unpickleable:
+ # https://bugs.python.org/issue1692335
+ self.args = (key, exc)
+ self.key = key
+ self.exc = exc
+
+ def __str__(self):
+ return self.msg
+
+
+class DAGPool:
+ """
+ A DAGPool is a pool that constrains greenthreads, not by max concurrency,
+ but by data dependencies.
+
+ This is a way to implement general DAG dependencies. A simple dependency
+ tree (flowing in either direction) can straightforwardly be implemented
+ using recursion and (e.g.)
+ :meth:`GreenThread.imap() `.
+ What gets complicated is when a given node depends on several other nodes
+ as well as contributing to several other nodes.
+
+ With DAGPool, you concurrently launch all applicable greenthreads; each
+ will proceed as soon as it has all required inputs. The DAG is implicit in
+ which items are required by each greenthread.
+
+ Each greenthread is launched in a DAGPool with a key: any value that can
+ serve as a Python dict key. The caller also specifies an iterable of other
+ keys on which this greenthread depends. This iterable may be empty.
+
+ The greenthread callable must accept (key, results), where:
+
+ key
+ is its own key
+
+ results
+ is an iterable of (key, value) pairs.
+
+ A newly-launched DAGPool greenthread is entered immediately, and can
+ perform any necessary setup work. At some point it will iterate over the
+ (key, value) pairs from the passed 'results' iterable. Doing so blocks the
+ greenthread until a value is available for each of the keys specified in
+ its initial dependencies iterable. These (key, value) pairs are delivered
+ in chronological order, *not* the order in which they are initially
+ specified: each value will be delivered as soon as it becomes available.
+
+ The value returned by a DAGPool greenthread becomes the value for its
+ key, which unblocks any other greenthreads waiting on that key.
+
+ If a DAGPool greenthread terminates with an exception instead of returning
+ a value, attempting to retrieve the value raises :class:`PropagateError`,
+ which binds the key of the original greenthread and the original
+ exception. Unless the greenthread attempting to retrieve the value handles
+ PropagateError, that exception will in turn be wrapped in a PropagateError
+ of its own, and so forth. The code that ultimately handles PropagateError
+ can follow the chain of PropagateError.exc attributes to discover the flow
+ of that exception through the DAG of greenthreads.
+
+ External greenthreads may also interact with a DAGPool. See :meth:`wait_each`,
+ :meth:`waitall`, :meth:`post`.
+
+ It is not recommended to constrain external DAGPool producer greenthreads
+ in a :class:`GreenPool `: it may be hard to
+ provably avoid deadlock.
+
+ .. automethod:: __init__
+ .. automethod:: __getitem__
+ """
+
+ _Coro = collections.namedtuple("_Coro", ("greenthread", "pending"))
+
+ def __init__(self, preload={}):
+ """
+ DAGPool can be prepopulated with an initial dict or iterable of (key,
+ value) pairs. These (key, value) pairs are of course immediately
+ available for any greenthread that depends on any of those keys.
+ """
+ try:
+ # If a dict is passed, copy it. Don't risk a subsequent
+ # modification to passed dict affecting our internal state.
+ iteritems = preload.items()
+ except AttributeError:
+ # Not a dict, just an iterable of (key, value) pairs
+ iteritems = preload
+
+ # Load the initial dict
+ self.values = dict(iteritems)
+
+ # track greenthreads
+ self.coros = {}
+
+ # The key to blocking greenthreads is the Event.
+ self.event = Event()
+
+ def waitall(self):
+ """
+ waitall() blocks the calling greenthread until there is a value for
+ every DAGPool greenthread launched by :meth:`spawn`. It returns a dict
+ containing all :class:`preload data `, all data from
+ :meth:`post` and all values returned by spawned greenthreads.
+
+ See also :meth:`wait`.
+ """
+ # waitall() is an alias for compatibility with GreenPool
+ return self.wait()
+
+ def wait(self, keys=_MISSING):
+ """
+ *keys* is an optional iterable of keys. If you omit the argument, it
+ waits for all the keys from :class:`preload data `, from
+ :meth:`post` calls and from :meth:`spawn` calls: in other words, all
+ the keys of which this DAGPool is aware.
+
+ wait() blocks the calling greenthread until all of the relevant keys
+ have values. wait() returns a dict whose keys are the relevant keys,
+ and whose values come from the *preload* data, from values returned by
+ DAGPool greenthreads or from :meth:`post` calls.
+
+ If a DAGPool greenthread terminates with an exception, wait() will
+ raise :class:`PropagateError` wrapping that exception. If more than
+ one greenthread terminates with an exception, it is indeterminate
+ which one wait() will raise.
+
+ If an external greenthread posts a :class:`PropagateError` instance,
+ wait() will raise that PropagateError. If more than one greenthread
+ posts PropagateError, it is indeterminate which one wait() will raise.
+
+ See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
+ """
+ # This is mostly redundant with wait_each() functionality.
+ return dict(self.wait_each(keys))
+
+ def wait_each(self, keys=_MISSING):
+ """
+ *keys* is an optional iterable of keys. If you omit the argument, it
+ waits for all the keys from :class:`preload data `, from
+ :meth:`post` calls and from :meth:`spawn` calls: in other words, all
+ the keys of which this DAGPool is aware.
+
+ wait_each() is a generator producing (key, value) pairs as a value
+ becomes available for each requested key. wait_each() blocks the
+ calling greenthread until the next value becomes available. If the
+ DAGPool was prepopulated with values for any of the relevant keys, of
+ course those can be delivered immediately without waiting.
+
+ Delivery order is intentionally decoupled from the initial sequence of
+ keys: each value is delivered as soon as it becomes available. If
+ multiple keys are available at the same time, wait_each() delivers
+ each of the ready ones in arbitrary order before blocking again.
+
+ The DAGPool does not distinguish between a value returned by one of
+ its own greenthreads and one provided by a :meth:`post` call or *preload* data.
+
+ The wait_each() generator terminates (raises StopIteration) when all
+ specified keys have been delivered. Thus, typical usage might be:
+
+ ::
+
+ for key, value in dagpool.wait_each(keys):
+ # process this ready key and value
+ # continue processing now that we've gotten values for all keys
+
+ By implication, if you pass wait_each() an empty iterable of keys, it
+ returns immediately without yielding anything.
+
+ If the value to be delivered is a :class:`PropagateError` exception object, the
+ generator raises that PropagateError instead of yielding it.
+
+ See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
+ """
+ # Build a local set() and then call _wait_each().
+ return self._wait_each(self._get_keyset_for_wait_each(keys))
+
+ def wait_each_success(self, keys=_MISSING):
+ """
+ wait_each_success() filters results so that only success values are
+ yielded. In other words, unlike :meth:`wait_each`, wait_each_success()
+ will not raise :class:`PropagateError`. Not every provided (or
+ defaulted) key will necessarily be represented, though naturally the
+ generator will not finish until all have completed.
+
+ In all other respects, wait_each_success() behaves like :meth:`wait_each`.
+ """
+ for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
+ if not isinstance(value, PropagateError):
+ yield key, value
+
+ def wait_each_exception(self, keys=_MISSING):
+ """
+ wait_each_exception() filters results so that only exceptions are
+ yielded. Not every provided (or defaulted) key will necessarily be
+ represented, though naturally the generator will not finish until
+ all have completed.
+
+ Unlike other DAGPool methods, wait_each_exception() simply yields
+ :class:`PropagateError` instances as values rather than raising them.
+
+ In all other respects, wait_each_exception() behaves like :meth:`wait_each`.
+ """
+ for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
+ if isinstance(value, PropagateError):
+ yield key, value
+
+ def _get_keyset_for_wait_each(self, keys):
+ """
+ wait_each(), wait_each_success() and wait_each_exception() promise
+ that if you pass an iterable of keys, the method will wait for results
+ from those keys -- but if you omit the keys argument, the method will
+ wait for results from all known keys. This helper implements that
+ distinction, returning a set() of the relevant keys.
+ """
+ if keys is not _MISSING:
+ return set(keys)
+ else:
+ # keys arg omitted -- use all the keys we know about
+ return set(self.coros.keys()) | set(self.values.keys())
+
+ def _wait_each(self, pending):
+ """
+ When _wait_each() encounters a value of PropagateError, it raises it.
+
+ In all other respects, _wait_each() behaves like _wait_each_raw().
+ """
+ for key, value in self._wait_each_raw(pending):
+ yield key, self._value_or_raise(value)
+
+ @staticmethod
+ def _value_or_raise(value):
+ # Most methods attempting to deliver PropagateError should raise that
+ # instead of simply returning it.
+ if isinstance(value, PropagateError):
+ raise value
+ return value
+
+ def _wait_each_raw(self, pending):
+ """
+ pending is a set() of keys for which we intend to wait. THIS SET WILL
+ BE DESTRUCTIVELY MODIFIED: as each key acquires a value, that key will
+ be removed from the passed 'pending' set.
+
+ _wait_each_raw() does not treat a PropagateError instance specially:
+ it will be yielded to the caller like any other value.
+
+ In all other respects, _wait_each_raw() behaves like wait_each().
+ """
+ while True:
+ # Before even waiting, show caller any (key, value) pairs that
+ # are already available. Copy 'pending' because we want to be able
+ # to remove items from the original set while iterating.
+ for key in pending.copy():
+ value = self.values.get(key, _MISSING)
+ if value is not _MISSING:
+ # found one, it's no longer pending
+ pending.remove(key)
+ yield (key, value)
+
+ if not pending:
+ # Once we've yielded all the caller's keys, done.
+ break
+
+ # There are still more keys pending, so wait.
+ self.event.wait()
+
+ def spawn(self, key, depends, function, *args, **kwds):
+ """
+ Launch the passed *function(key, results, ...)* as a greenthread,
+ passing it:
+
+ - the specified *key*
+ - an iterable of (key, value) pairs
+ - whatever other positional args or keywords you specify.
+
+ Iterating over the *results* iterable behaves like calling
+ :meth:`wait_each(depends) `.
+
+ Returning from *function()* behaves like
+ :meth:`post(key, return_value) `.
+
+ If *function()* terminates with an exception, that exception is wrapped
+ in :class:`PropagateError` with the greenthread's *key* and (effectively) posted
+ as the value for that key. Attempting to retrieve that value will
+ raise that PropagateError.
+
+ Thus, if the greenthread with key 'a' terminates with an exception,
+ and greenthread 'b' depends on 'a', when greenthread 'b' attempts to
+ iterate through its *results* argument, it will encounter
+ PropagateError. So by default, an uncaught exception will propagate
+ through all the downstream dependencies.
+
+ If you pass :meth:`spawn` a key already passed to spawn() or :meth:`post`, spawn()
+ raises :class:`Collision`.
+ """
+ if key in self.coros or key in self.values:
+ raise Collision(key)
+
+ # The order is a bit tricky. First construct the set() of keys.
+ pending = set(depends)
+ # It's important that we pass to _wait_each() the same 'pending' set()
+ # that we store in self.coros for this key. The generator-iterator
+ # returned by _wait_each() becomes the function's 'results' iterable.
+ newcoro = greenthread.spawn(self._wrapper, function, key,
+ self._wait_each(pending),
+ *args, **kwds)
+ # Also capture the same (!) set in the new _Coro object for this key.
+ # We must be able to observe ready keys being removed from the set.
+ self.coros[key] = self._Coro(newcoro, pending)
+
+ def _wrapper(self, function, key, results, *args, **kwds):
+ """
+ This wrapper runs the top-level function in a DAGPool greenthread,
+ posting its return value (or PropagateError) to the DAGPool.
+ """
+ try:
+ # call our passed function
+ result = function(key, results, *args, **kwds)
+ except Exception as err:
+ # Wrap any exception it may raise in a PropagateError.
+ result = PropagateError(key, err)
+ finally:
+ # function() has returned (or terminated with an exception). We no
+ # longer need to track this greenthread in self.coros. Remove it
+ # first so post() won't complain about a running greenthread.
+ del self.coros[key]
+
+ try:
+ # as advertised, try to post() our return value
+ self.post(key, result)
+ except Collision:
+ # if we've already post()ed a result, oh well
+ pass
+
+ # also, in case anyone cares...
+ return result
+
+ def spawn_many(self, depends, function, *args, **kwds):
+ """
+ spawn_many() accepts a single *function* whose parameters are the same
+ as for :meth:`spawn`.
+
+ The difference is that spawn_many() accepts a dependency dict
+ *depends*. A new greenthread is spawned for each key in the dict. That
+ dict key's value should be an iterable of other keys on which this
+ greenthread depends.
+
+ If the *depends* dict contains any key already passed to :meth:`spawn`
+ or :meth:`post`, spawn_many() raises :class:`Collision`. It is
+ indeterminate how many of the other keys in *depends* will have
+ successfully spawned greenthreads.
+ """
+ # Iterate over 'depends' items, relying on self.spawn() not to
+ # context-switch so no one can modify 'depends' along the way.
+ for key, deps in depends.items():
+ self.spawn(key, deps, function, *args, **kwds)
+
+ def kill(self, key):
+ """
+ Kill the greenthread that was spawned with the specified *key*.
+
+ If no such greenthread was spawned, raise KeyError.
+ """
+ # let KeyError, if any, propagate
+ self.coros[key].greenthread.kill()
+ # once killed, remove it
+ del self.coros[key]
+
+ def post(self, key, value, replace=False):
+ """
+ post(key, value) stores the passed *value* for the passed *key*. It
+ then causes each greenthread blocked on its results iterable, or on
+ :meth:`wait_each(keys) `, to check for new values.
+ A waiting greenthread might not literally resume on every single
+ post() of a relevant key, but the first post() of a relevant key
+ ensures that it will resume eventually, and when it does it will catch
+ up with all relevant post() calls.
+
+ Calling post(key, value) when there is a running greenthread with that
+ same *key* raises :class:`Collision`. If you must post(key, value) instead of
+ letting the greenthread run to completion, you must first call
+ :meth:`kill(key) `.
+
+ The DAGPool implicitly post()s the return value from each of its
+ greenthreads. But a greenthread may explicitly post() a value for its
+ own key, which will cause its return value to be discarded.
+
+ Calling post(key, value, replace=False) (the default *replace*) when a
+ value for that key has already been posted, by any means, raises
+ :class:`Collision`.
+
+ Calling post(key, value, replace=True) when a value for that key has
+ already been posted, by any means, replaces the previously-stored
+ value. However, that may make it complicated to reason about the
+ behavior of greenthreads waiting on that key.
+
+ After a post(key, value1) followed by post(key, value2, replace=True),
+ it is unspecified which pending :meth:`wait_each([key...]) `
+ calls (or greenthreads iterating over *results* involving that key)
+ will observe *value1* versus *value2*. It is guaranteed that
+ subsequent wait_each([key...]) calls (or greenthreads spawned after
+ that point) will observe *value2*.
+
+ A successful call to
+ post(key, :class:`PropagateError(key, ExceptionSubclass) `)
+ ensures that any subsequent attempt to retrieve that key's value will
+ raise that PropagateError instance.
+ """
+ # First, check if we're trying to post() to a key with a running
+ # greenthread.
+ # A DAGPool greenthread is explicitly permitted to post() to its
+ # OWN key.
+ coro = self.coros.get(key, _MISSING)
+ if coro is not _MISSING and coro.greenthread is not greenthread.getcurrent():
+ # oh oh, trying to post a value for running greenthread from
+ # some other greenthread
+ raise Collision(key)
+
+ # Here, either we're posting a value for a key with no greenthread or
+ # we're posting from that greenthread itself.
+
+ # Has somebody already post()ed a value for this key?
+ # Unless replace == True, this is a problem.
+ if key in self.values and not replace:
+ raise Collision(key)
+
+ # Either we've never before posted a value for this key, or we're
+ # posting with replace == True.
+
+ # update our database
+ self.values[key] = value
+ # and wake up pending waiters
+ self.event.send()
+ # The comment in Event.reset() says: "it's better to create a new
+ # event rather than reset an old one". Okay, fine. We do want to be
+ # able to support new waiters, so create a new Event.
+ self.event = Event()
+
+ def __getitem__(self, key):
+ """
+ __getitem__(key) (aka dagpool[key]) blocks until *key* has a value,
+ then delivers that value.
+ """
+ # This is a degenerate case of wait_each(). Construct a tuple
+ # containing only this 'key'. wait_each() will yield exactly one (key,
+ # value) pair. Return just its value.
+ for _, value in self.wait_each((key,)):
+ return value
+
+ def get(self, key, default=None):
+ """
+ get() returns the value for *key*. If *key* does not yet have a value,
+ get() returns *default*.
+ """
+ return self._value_or_raise(self.values.get(key, default))
+
+ def keys(self):
+ """
+ Return a snapshot tuple of keys for which we currently have values.
+ """
+ # Explicitly return a copy rather than an iterator: don't assume our
+ # caller will finish iterating before new values are posted.
+ return tuple(self.values.keys())
+
+ def items(self):
+ """
+ Return a snapshot tuple of currently-available (key, value) pairs.
+ """
+ # Don't assume our caller will finish iterating before new values are
+ # posted.
+ return tuple((key, self._value_or_raise(value))
+ for key, value in self.values.items())
+
+ def running(self):
+ """
+ Return number of running DAGPool greenthreads. This includes
+ greenthreads blocked while iterating through their *results* iterable,
+ that is, greenthreads waiting on values from other keys.
+ """
+ return len(self.coros)
+
+ def running_keys(self):
+ """
+ Return keys for running DAGPool greenthreads. This includes
+ greenthreads blocked while iterating through their *results* iterable,
+ that is, greenthreads waiting on values from other keys.
+ """
+ # return snapshot; don't assume caller will finish iterating before we
+ # next modify self.coros
+ return tuple(self.coros.keys())
+
+ def waiting(self):
+ """
+ Return number of waiting DAGPool greenthreads, that is, greenthreads
+ still waiting on values from other keys. This explicitly does *not*
+ include external greenthreads waiting on :meth:`wait`,
+ :meth:`waitall`, :meth:`wait_each`.
+ """
+ # n.b. if Event would provide a count of its waiters, we could say
+ # something about external greenthreads as well.
+ # The logic to determine this count is exactly the same as the general
+ # waiting_for() call.
+ return len(self.waiting_for())
+
+ # Use _MISSING instead of None as the default 'key' param so we can permit
+ # None as a supported key.
+ def waiting_for(self, key=_MISSING):
+ """
+ waiting_for(key) returns a set() of the keys for which the DAGPool
+ greenthread spawned with that *key* is still waiting. If you pass a
+ *key* for which no greenthread was spawned, waiting_for() raises
+ KeyError.
+
+ waiting_for() without argument returns a dict. Its keys are the keys
+ of DAGPool greenthreads still waiting on one or more values. In the
+ returned dict, the value of each such key is the set of other keys for
+ which that greenthread is still waiting.
+
+ This method allows diagnosing a "hung" DAGPool. If certain
+ greenthreads are making no progress, it's possible that they are
+ waiting on keys for which there is no greenthread and no :meth:`post` data.
+ """
+ # We may have greenthreads whose 'pending' entry indicates they're
+ # waiting on some keys even though values have now been posted for
+ # some or all of those keys, because those greenthreads have not yet
+ # regained control since values were posted. So make a point of
+ # excluding values that are now available.
+ available = set(self.values.keys())
+
+ if key is not _MISSING:
+ # waiting_for(key) is semantically different than waiting_for().
+ # It's just that they both seem to want the same method name.
+ coro = self.coros.get(key, _MISSING)
+ if coro is _MISSING:
+ # Hmm, no running greenthread with this key. But was there
+ # EVER a greenthread with this key? If not, let KeyError
+ # propagate.
+ self.values[key]
+ # Oh good, there's a value for this key. Either the
+ # greenthread finished, or somebody posted a value. Just say
+ # the greenthread isn't waiting for anything.
+ return set()
+ else:
+ # coro is the _Coro for the running greenthread with the
+ # specified key.
+ return coro.pending - available
+
+ # This is a waiting_for() call, i.e. a general query rather than for a
+ # specific key.
+
+ # Start by iterating over (key, coro) pairs in self.coros. Generate
+ # (key, pending) pairs in which 'pending' is the set of keys on which
+ # the greenthread believes it's waiting, minus the set of keys that
+ # are now available. Filter out any pair in which 'pending' is empty,
+ # that is, that greenthread will be unblocked next time it resumes.
+ # Make a dict from those pairs.
+ return {key: pending
+ for key, pending in ((key, (coro.pending - available))
+ for key, coro in self.coros.items())
+ if pending}
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/db_pool.py b/tapdown/lib/python3.11/site-packages/eventlet/db_pool.py
new file mode 100644
index 0000000..7deb993
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/db_pool.py
@@ -0,0 +1,460 @@
+from collections import deque
+from contextlib import contextmanager
+import sys
+import time
+
+from eventlet.pools import Pool
+from eventlet import timeout
+from eventlet import hubs
+from eventlet.hubs.timer import Timer
+from eventlet.greenthread import GreenThread
+
+
+_MISSING = object()
+
+
+class ConnectTimeout(Exception):
+ pass
+
+
+def cleanup_rollback(conn):
+ conn.rollback()
+
+
+class BaseConnectionPool(Pool):
+ def __init__(self, db_module,
+ min_size=0, max_size=4,
+ max_idle=10, max_age=30,
+ connect_timeout=5,
+ cleanup=cleanup_rollback,
+ *args, **kwargs):
+ """
+ Constructs a pool with at least *min_size* connections and at most
+ *max_size* connections. Uses *db_module* to construct new connections.
+
+ The *max_idle* parameter determines how long pooled connections can
+ remain idle, in seconds. After *max_idle* seconds have elapsed
+ without the connection being used, the pool closes the connection.
+
+ *max_age* is how long any particular connection is allowed to live.
+ Connections that have been open for longer than *max_age* seconds are
+ closed, regardless of idle time. If *max_age* is 0, all connections are
+ closed on return to the pool, reducing it to a concurrency limiter.
+
+ *connect_timeout* is the duration in seconds that the pool will wait
+ before timing out on connect() to the database. If triggered, the
+ timeout will raise a ConnectTimeout from get().
+
+ The remainder of the arguments are used as parameters to the
+ *db_module*'s connection constructor.
+ """
+ assert(db_module)
+ self._db_module = db_module
+ self._args = args
+ self._kwargs = kwargs
+ self.max_idle = max_idle
+ self.max_age = max_age
+ self.connect_timeout = connect_timeout
+ self._expiration_timer = None
+ self.cleanup = cleanup
+ super().__init__(min_size=min_size, max_size=max_size, order_as_stack=True)
+
+ def _schedule_expiration(self):
+ """Sets up a timer that will call _expire_old_connections when the
+ oldest connection currently in the free pool is ready to expire. This
+ is the earliest possible time that a connection could expire, thus, the
+ timer will be running as infrequently as possible without missing a
+ possible expiration.
+
+ If this function is called when a timer is already scheduled, it does
+ nothing.
+
+ If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
+ """
+ if self.max_age == 0 or self.max_idle == 0:
+ # expiration is unnecessary because all connections will be expired
+ # on put
+ return
+
+ if (self._expiration_timer is not None
+ and not getattr(self._expiration_timer, 'called', False)):
+ # the next timer is already scheduled
+ return
+
+ try:
+ now = time.time()
+ self._expire_old_connections(now)
+ # the last item in the list, because of the stack ordering,
+ # is going to be the most-idle
+ idle_delay = (self.free_items[-1][0] - now) + self.max_idle
+ oldest = min([t[1] for t in self.free_items])
+ age_delay = (oldest - now) + self.max_age
+
+ next_delay = min(idle_delay, age_delay)
+ except (IndexError, ValueError):
+ # no free items, unschedule ourselves
+ self._expiration_timer = None
+ return
+
+ if next_delay > 0:
+ # set up a continuous self-calling loop
+ self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch,
+ self._schedule_expiration, [], {})
+ self._expiration_timer.schedule()
+
+ def _expire_old_connections(self, now):
+ """Iterates through the open connections contained in the pool, closing
+ ones that have remained idle for longer than max_idle seconds, or have
+ been in existence for longer than max_age seconds.
+
+ *now* is the current time, as returned by time.time().
+ """
+ original_count = len(self.free_items)
+ expired = [
+ conn
+ for last_used, created_at, conn in self.free_items
+ if self._is_expired(now, last_used, created_at)]
+
+ new_free = [
+ (last_used, created_at, conn)
+ for last_used, created_at, conn in self.free_items
+ if not self._is_expired(now, last_used, created_at)]
+ self.free_items.clear()
+ self.free_items.extend(new_free)
+
+ # adjust the current size counter to account for expired
+ # connections
+ self.current_size -= original_count - len(self.free_items)
+
+ for conn in expired:
+ self._safe_close(conn, quiet=True)
+
+ def _is_expired(self, now, last_used, created_at):
+ """Returns true and closes the connection if it's expired.
+ """
+ if (self.max_idle <= 0 or self.max_age <= 0
+ or now - last_used > self.max_idle
+ or now - created_at > self.max_age):
+ return True
+ return False
+
+ def _unwrap_connection(self, conn):
+ """If the connection was wrapped by a subclass of
+ BaseConnectionWrapper and is still functional (as determined
+ by the __nonzero__, or __bool__ in python3, method), returns
+ the unwrapped connection. If anything goes wrong with this
+ process, returns None.
+ """
+ base = None
+ try:
+ if conn:
+ base = conn._base
+ conn._destroy()
+ else:
+ base = None
+ except AttributeError:
+ pass
+ return base
+
+ def _safe_close(self, conn, quiet=False):
+ """Closes the (already unwrapped) connection, squelching any
+ exceptions.
+ """
+ try:
+ conn.close()
+ except AttributeError:
+ pass # conn is None, or junk
+ except Exception:
+ if not quiet:
+ print("Connection.close raised: %s" % (sys.exc_info()[1]))
+
+ def get(self):
+ conn = super().get()
+
+ # None is a flag value that means that put got called with
+ # something it couldn't use
+ if conn is None:
+ try:
+ conn = self.create()
+ except Exception:
+ # unconditionally increase the free pool because
+ # even if there are waiters, doing a full put
+ # would incur a greenlib switch and thus lose the
+ # exception stack
+ self.current_size -= 1
+ raise
+
+ # if the call to get() draws from the free pool, it will come
+ # back as a tuple
+ if isinstance(conn, tuple):
+ _last_used, created_at, conn = conn
+ else:
+ created_at = time.time()
+
+ # wrap the connection so the consumer can call close() safely
+ wrapped = PooledConnectionWrapper(conn, self)
+ # annotating the wrapper so that when it gets put in the pool
+ # again, we'll know how old it is
+ wrapped._db_pool_created_at = created_at
+ return wrapped
+
+ def put(self, conn, cleanup=_MISSING):
+ created_at = getattr(conn, '_db_pool_created_at', 0)
+ now = time.time()
+ conn = self._unwrap_connection(conn)
+
+ if self._is_expired(now, now, created_at):
+ self._safe_close(conn, quiet=False)
+ conn = None
+ elif cleanup is not None:
+ if cleanup is _MISSING:
+ cleanup = self.cleanup
+ # by default, call rollback in case the connection is in the middle
+ # of a transaction. However, rollback has performance implications
+ # so optionally do nothing or call something else like ping
+ try:
+ if conn:
+ cleanup(conn)
+ except Exception as e:
+ # we don't care what the exception was, we just know the
+ # connection is dead
+ print("WARNING: cleanup %s raised: %s" % (cleanup, e))
+ conn = None
+ except:
+ conn = None
+ raise
+
+ if conn is not None:
+ super().put((now, created_at, conn))
+ else:
+ # wake up any waiters with a flag value that indicates
+ # they need to manufacture a connection
+ if self.waiting() > 0:
+ super().put(None)
+ else:
+ # no waiters -- just change the size
+ self.current_size -= 1
+ self._schedule_expiration()
+
+ @contextmanager
+ def item(self, cleanup=_MISSING):
+ conn = self.get()
+ try:
+ yield conn
+ finally:
+ self.put(conn, cleanup=cleanup)
+
+ def clear(self):
+ """Close all connections that this pool still holds a reference to,
+ and removes all references to them.
+ """
+ if self._expiration_timer:
+ self._expiration_timer.cancel()
+ free_items, self.free_items = self.free_items, deque()
+ for item in free_items:
+ # Free items created using min_size>0 are not tuples.
+ conn = item[2] if isinstance(item, tuple) else item
+ self._safe_close(conn, quiet=True)
+ self.current_size -= 1
+
+ def __del__(self):
+ self.clear()
+
+
+class TpooledConnectionPool(BaseConnectionPool):
+ """A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
+ connections.
+ """
+
+ def create(self):
+ now = time.time()
+ return now, now, self.connect(
+ self._db_module, self.connect_timeout, *self._args, **self._kwargs)
+
+ @classmethod
+ def connect(cls, db_module, connect_timeout, *args, **kw):
+ t = timeout.Timeout(connect_timeout, ConnectTimeout())
+ try:
+ from eventlet import tpool
+ conn = tpool.execute(db_module.connect, *args, **kw)
+ return tpool.Proxy(conn, autowrap_names=('cursor',))
+ finally:
+ t.cancel()
+
+
+class RawConnectionPool(BaseConnectionPool):
+ """A pool which gives out plain database connections.
+ """
+
+ def create(self):
+ now = time.time()
+ return now, now, self.connect(
+ self._db_module, self.connect_timeout, *self._args, **self._kwargs)
+
+ @classmethod
+ def connect(cls, db_module, connect_timeout, *args, **kw):
+ t = timeout.Timeout(connect_timeout, ConnectTimeout())
+ try:
+ return db_module.connect(*args, **kw)
+ finally:
+ t.cancel()
+
+
+# default connection pool is the tpool one
+ConnectionPool = TpooledConnectionPool
+
+
+class GenericConnectionWrapper:
+ def __init__(self, baseconn):
+ self._base = baseconn
+
+ # Proxy all method calls to self._base
+ # FIXME: remove repetition; options to consider:
+ # * for name in (...):
+ # setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw))
+ # * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
+ # * other?
+ def __enter__(self):
+ return self._base.__enter__()
+
+ def __exit__(self, exc, value, tb):
+ return self._base.__exit__(exc, value, tb)
+
+ def __repr__(self):
+ return self._base.__repr__()
+
+ _proxy_funcs = (
+ 'affected_rows',
+ 'autocommit',
+ 'begin',
+ 'change_user',
+ 'character_set_name',
+ 'close',
+ 'commit',
+ 'cursor',
+ 'dump_debug_info',
+ 'errno',
+ 'error',
+ 'errorhandler',
+ 'get_server_info',
+ 'insert_id',
+ 'literal',
+ 'ping',
+ 'query',
+ 'rollback',
+ 'select_db',
+ 'server_capabilities',
+ 'set_character_set',
+ 'set_isolation_level',
+ 'set_server_option',
+ 'set_sql_mode',
+ 'show_warnings',
+ 'shutdown',
+ 'sqlstate',
+ 'stat',
+ 'store_result',
+ 'string_literal',
+ 'thread_id',
+ 'use_result',
+ 'warning_count',
+ )
+
+
+for _proxy_fun in GenericConnectionWrapper._proxy_funcs:
+ # excess wrapper for early binding (closure by value)
+ def _wrapper(_proxy_fun=_proxy_fun):
+ def _proxy_method(self, *args, **kwargs):
+ return getattr(self._base, _proxy_fun)(*args, **kwargs)
+ _proxy_method.func_name = _proxy_fun
+ _proxy_method.__name__ = _proxy_fun
+ _proxy_method.__qualname__ = 'GenericConnectionWrapper.' + _proxy_fun
+ return _proxy_method
+ setattr(GenericConnectionWrapper, _proxy_fun, _wrapper(_proxy_fun))
+del GenericConnectionWrapper._proxy_funcs
+del _proxy_fun
+del _wrapper
+
+
+class PooledConnectionWrapper(GenericConnectionWrapper):
+ """A connection wrapper where:
+ - the close method returns the connection to the pool instead of closing it directly
+ - ``bool(conn)`` returns a reasonable value
+ - returns itself to the pool if it gets garbage collected
+ """
+
+ def __init__(self, baseconn, pool):
+ super().__init__(baseconn)
+ self._pool = pool
+
+ def __nonzero__(self):
+ return (hasattr(self, '_base') and bool(self._base))
+
+ __bool__ = __nonzero__
+
+ def _destroy(self):
+ self._pool = None
+ try:
+ del self._base
+ except AttributeError:
+ pass
+
+ def close(self):
+ """Return the connection to the pool, and remove the
+ reference to it so that you can't use it again through this
+ wrapper object.
+ """
+ if self and self._pool:
+ self._pool.put(self)
+ self._destroy()
+
+ def __del__(self):
+ return # this causes some issues if __del__ is called in the
+ # main coroutine, so for now this is disabled
+ # self.close()
+
+
+class DatabaseConnector:
+ """
+ This is an object which will maintain a collection of database
+ connection pools on a per-host basis.
+ """
+
+ def __init__(self, module, credentials,
+ conn_pool=None, *args, **kwargs):
+ """constructor
+ *module*
+ Database module to use.
+ *credentials*
+ Mapping of hostname to connect arguments (e.g. username and password)
+ """
+ assert(module)
+ self._conn_pool_class = conn_pool
+ if self._conn_pool_class is None:
+ self._conn_pool_class = ConnectionPool
+ self._module = module
+ self._args = args
+ self._kwargs = kwargs
+ # this is a map of hostname to username/password
+ self._credentials = credentials
+ self._databases = {}
+
+ def credentials_for(self, host):
+ if host in self._credentials:
+ return self._credentials[host]
+ else:
+ return self._credentials.get('default', None)
+
+ def get(self, host, dbname):
+ """Returns a ConnectionPool to the target host and schema.
+ """
+ key = (host, dbname)
+ if key not in self._databases:
+ new_kwargs = self._kwargs.copy()
+ new_kwargs['db'] = dbname
+ new_kwargs['host'] = host
+ new_kwargs.update(self.credentials_for(host))
+ dbpool = self._conn_pool_class(
+ self._module, *self._args, **new_kwargs)
+ self._databases[key] = dbpool
+
+ return self._databases[key]
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/debug.py b/tapdown/lib/python3.11/site-packages/eventlet/debug.py
new file mode 100644
index 0000000..f78e2f8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/debug.py
@@ -0,0 +1,222 @@
+"""The debug module contains utilities and functions for better
+debugging Eventlet-powered applications."""
+
+import os
+import sys
+import linecache
+import re
+import inspect
+
+__all__ = ['spew', 'unspew', 'format_hub_listeners', 'format_hub_timers',
+ 'hub_listener_stacks', 'hub_exceptions', 'tpool_exceptions',
+ 'hub_prevent_multiple_readers', 'hub_timer_stacks',
+ 'hub_blocking_detection', 'format_asyncio_info',
+ 'format_threads_info']
+
+_token_splitter = re.compile(r'\W+')
+
+
+class Spew:
+
+ def __init__(self, trace_names=None, show_values=True):
+ self.trace_names = trace_names
+ self.show_values = show_values
+
+ def __call__(self, frame, event, arg):
+ if event == 'line':
+ lineno = frame.f_lineno
+ if '__file__' in frame.f_globals:
+ filename = frame.f_globals['__file__']
+ if (filename.endswith('.pyc') or
+ filename.endswith('.pyo')):
+ filename = filename[:-1]
+ name = frame.f_globals['__name__']
+ line = linecache.getline(filename, lineno)
+ else:
+ name = '[unknown]'
+ try:
+ src, offset = inspect.getsourcelines(frame)
+ # The first line is line 1
+ # But 0 may be returned when executing module-level code
+ if offset == 0:
+ offset = 1
+ line = src[lineno - offset]
+ except OSError:
+ line = 'Unknown code named [%s]. VM instruction #%d' % (
+ frame.f_code.co_name, frame.f_lasti)
+ if self.trace_names is None or name in self.trace_names:
+ print('%s:%s: %s' % (name, lineno, line.rstrip()))
+ if not self.show_values:
+ return self
+ details = []
+ tokens = _token_splitter.split(line)
+ for tok in tokens:
+ if tok in frame.f_globals:
+ details.append('%s=%r' % (tok, frame.f_globals[tok]))
+ if tok in frame.f_locals:
+ details.append('%s=%r' % (tok, frame.f_locals[tok]))
+ if details:
+ print("\t%s" % ' '.join(details))
+ return self
+
+
+def spew(trace_names=None, show_values=False):
+ """Install a trace hook which writes incredibly detailed logs
+ about what code is being executed to stdout.
+ """
+ sys.settrace(Spew(trace_names, show_values))
+
+
+def unspew():
+ """Remove the trace hook installed by spew.
+ """
+ sys.settrace(None)
+
+
+def format_hub_listeners():
+ """ Returns a formatted string of the current listeners on the current
+ hub. This can be useful in determining what's going on in the event system,
+ especially when used in conjunction with :func:`hub_listener_stacks`.
+ """
+ from eventlet import hubs
+ hub = hubs.get_hub()
+ result = ['READERS:']
+ for l in hub.get_readers():
+ result.append(repr(l))
+ result.append('WRITERS:')
+ for l in hub.get_writers():
+ result.append(repr(l))
+ return os.linesep.join(result)
+
+
+def format_asyncio_info():
+ """ Returns a formatted string of the asyncio info.
+ This can be useful in determining what's going on in the asyncio event
+ loop system, especially when used in conjunction with the asyncio hub.
+ """
+ import asyncio
+ tasks = asyncio.all_tasks()
+ result = ['TASKS:']
+ result.append(repr(tasks))
+ result.append(f'EVENTLOOP: {asyncio.events.get_event_loop()}')
+ return os.linesep.join(result)
+
+
+def format_threads_info():
+ """ Returns a formatted string of the threads info.
+ This can be useful in determining what's going on with created threads,
+ especially when used in conjunction with greenlet
+ """
+ import threading
+ threads = threading._active
+ result = ['THREADS:']
+ result.append(repr(threads))
+ return os.linesep.join(result)
+
+
+def format_hub_timers():
+ """ Returns a formatted string of the current timers on the current
+ hub. This can be useful in determining what's going on in the event system,
+ especially when used in conjunction with :func:`hub_timer_stacks`.
+ """
+ from eventlet import hubs
+ hub = hubs.get_hub()
+ result = ['TIMERS:']
+ for l in hub.timers:
+ result.append(repr(l))
+ return os.linesep.join(result)
+
+
+def hub_listener_stacks(state=False):
+ """Toggles whether or not the hub records the stack when clients register
+ listeners on file descriptors. This can be useful when trying to figure
+ out what the hub is up to at any given moment. To inspect the stacks
+ of the current listeners, call :func:`format_hub_listeners` at critical
+ junctures in the application logic.
+ """
+ from eventlet import hubs
+ hubs.get_hub().set_debug_listeners(state)
+
+
+def hub_timer_stacks(state=False):
+ """Toggles whether or not the hub records the stack when timers are set.
+ To inspect the stacks of the current timers, call :func:`format_hub_timers`
+ at critical junctures in the application logic.
+ """
+ from eventlet.hubs import timer
+ timer._g_debug = state
+
+
+def hub_prevent_multiple_readers(state=True):
+ """Toggle prevention of multiple greenlets reading from a socket
+
+ When multiple greenlets read from the same socket it is often hard
+ to predict which greenlet will receive what data. To achieve
+ resource sharing consider using ``eventlet.pools.Pool`` instead.
+
+ It is important to note that this feature is a debug
+ convenience. That's not a feature made to be integrated in a production
+ code in some sort.
+
+ **If you really know what you are doing** you can change the state
+ to ``False`` to stop the hub from protecting against this mistake. Else
+ we strongly discourage using this feature, or you should consider using it
+ really carefully.
+
+ You should be aware that disabling this prevention will be applied to
+ your entire stack and not only to the context where you may find it useful,
+ meaning that using this debug feature may have several significant
+ unexpected side effects on your process, which could cause race conditions
+ between your sockets and on all your I/O in general.
+
+ You should also notice that this debug convenience is not supported
+ by the Asyncio hub, which is the official plan for migrating off of
+ eventlet. Using this feature will lock your migration path.
+ """
+ from eventlet.hubs import hub, get_hub
+ from eventlet.hubs import asyncio
+ if not state and isinstance(get_hub(), asyncio.Hub):
+ raise RuntimeError("Multiple readers are not yet supported by asyncio hub")
+ hub.g_prevent_multiple_readers = state
+
+
+def hub_exceptions(state=True):
+ """Toggles whether the hub prints exceptions that are raised from its
+ timers. This can be useful to see how greenthreads are terminating.
+ """
+ from eventlet import hubs
+ hubs.get_hub().set_timer_exceptions(state)
+ from eventlet import greenpool
+ greenpool.DEBUG = state
+
+
+def tpool_exceptions(state=False):
+ """Toggles whether tpool itself prints exceptions that are raised from
+ functions that are executed in it, in addition to raising them like
+ it normally does."""
+ from eventlet import tpool
+ tpool.QUIET = not state
+
+
+def hub_blocking_detection(state=False, resolution=1):
+ """Toggles whether Eventlet makes an effort to detect blocking
+ behavior in an application.
+
+ It does this by telling the kernel to raise a SIGALARM after a
+ short timeout, and clearing the timeout every time the hub
+ greenlet is resumed. Therefore, any code that runs for a long
+ time without yielding to the hub will get interrupted by the
+ blocking detector (don't use it in production!).
+
+ The *resolution* argument governs how long the SIGALARM timeout
+ waits in seconds. The implementation uses :func:`signal.setitimer`
+ and can be specified as a floating-point value.
+ The shorter the resolution, the greater the chance of false
+ positives.
+ """
+ from eventlet import hubs
+ assert resolution > 0
+ hubs.get_hub().debug_blocking = state
+ hubs.get_hub().debug_blocking_resolution = resolution
+ if not state:
+ hubs.get_hub().block_detect_post()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/event.py b/tapdown/lib/python3.11/site-packages/eventlet/event.py
new file mode 100644
index 0000000..122bd5d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/event.py
@@ -0,0 +1,218 @@
+from eventlet import hubs
+from eventlet.support import greenlets as greenlet
+
+__all__ = ['Event']
+
+
+class NOT_USED:
+ def __repr__(self):
+ return 'NOT_USED'
+
+
+NOT_USED = NOT_USED()
+
+
+class Event:
+ """An abstraction where an arbitrary number of coroutines
+ can wait for one event from another.
+
+ Events are similar to a Queue that can only hold one item, but differ
+ in two important ways:
+
+ 1. calling :meth:`send` never unschedules the current greenthread
+ 2. :meth:`send` can only be called once; create a new event to send again.
+
+ They are good for communicating results between coroutines, and
+ are the basis for how
+ :meth:`GreenThread.wait() `
+ is implemented.
+
+ >>> from eventlet import event
+ >>> import eventlet
+ >>> evt = event.Event()
+ >>> def baz(b):
+ ... evt.send(b + 1)
+ ...
+ >>> _ = eventlet.spawn_n(baz, 3)
+ >>> evt.wait()
+ 4
+ """
+ _result = None
+ _exc = None
+
+ def __init__(self):
+ self._waiters = set()
+ self.reset()
+
+ def __str__(self):
+ params = (self.__class__.__name__, hex(id(self)),
+ self._result, self._exc, len(self._waiters))
+ return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
+
+ def reset(self):
+ # this is kind of a misfeature and doesn't work perfectly well,
+ # it's better to create a new event rather than reset an old one
+ # removing documentation so that we don't get new use cases for it
+ assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
+ self._result = NOT_USED
+ self._exc = None
+
+ def ready(self):
+ """ Return true if the :meth:`wait` call will return immediately.
+ Used to avoid waiting for things that might take a while to time out.
+ For example, you can put a bunch of events into a list, and then visit
+ them all repeatedly, calling :meth:`ready` until one returns ``True``,
+ and then you can :meth:`wait` on that one."""
+ return self._result is not NOT_USED
+
+ def has_exception(self):
+ return self._exc is not None
+
+ def has_result(self):
+ return self._result is not NOT_USED and self._exc is None
+
+ def poll(self, notready=None):
+ if self.ready():
+ return self.wait()
+ return notready
+
+ # QQQ make it return tuple (type, value, tb) instead of raising
+ # because
+ # 1) "poll" does not imply raising
+ # 2) it's better not to screw up caller's sys.exc_info() by default
+ # (e.g. if caller wants to calls the function in except or finally)
+ def poll_exception(self, notready=None):
+ if self.has_exception():
+ return self.wait()
+ return notready
+
+ def poll_result(self, notready=None):
+ if self.has_result():
+ return self.wait()
+ return notready
+
+ def wait(self, timeout=None):
+ """Wait until another coroutine calls :meth:`send`.
+ Returns the value the other coroutine passed to :meth:`send`.
+
+ >>> import eventlet
+ >>> evt = eventlet.Event()
+ >>> def wait_on():
+ ... retval = evt.wait()
+ ... print("waited for {0}".format(retval))
+ >>> _ = eventlet.spawn(wait_on)
+ >>> evt.send('result')
+ >>> eventlet.sleep(0)
+ waited for result
+
+ Returns immediately if the event has already occurred.
+
+ >>> evt.wait()
+ 'result'
+
+ When the timeout argument is present and not None, it should be a floating point number
+ specifying a timeout for the operation in seconds (or fractions thereof).
+ """
+ current = greenlet.getcurrent()
+ if self._result is NOT_USED:
+ hub = hubs.get_hub()
+ self._waiters.add(current)
+ timer = None
+ if timeout is not None:
+ timer = hub.schedule_call_local(timeout, self._do_send, None, None, current)
+ try:
+ result = hub.switch()
+ if timer is not None:
+ timer.cancel()
+ return result
+ finally:
+ self._waiters.discard(current)
+ if self._exc is not None:
+ current.throw(*self._exc)
+ return self._result
+
+ def send(self, result=None, exc=None):
+ """Makes arrangements for the waiters to be woken with the
+ result and then returns immediately to the parent.
+
+ >>> from eventlet import event
+ >>> import eventlet
+ >>> evt = event.Event()
+ >>> def waiter():
+ ... print('about to wait')
+ ... result = evt.wait()
+ ... print('waited for {0}'.format(result))
+ >>> _ = eventlet.spawn(waiter)
+ >>> eventlet.sleep(0)
+ about to wait
+ >>> evt.send('a')
+ >>> eventlet.sleep(0)
+ waited for a
+
+ It is an error to call :meth:`send` multiple times on the same event.
+
+ >>> evt.send('whoops') # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ AssertionError: Trying to re-send() an already-triggered event.
+
+ Use :meth:`reset` between :meth:`send` s to reuse an event object.
+ """
+ assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
+ self._result = result
+ if exc is not None and not isinstance(exc, tuple):
+ exc = (exc, )
+ self._exc = exc
+ hub = hubs.get_hub()
+ for waiter in self._waiters:
+ hub.schedule_call_global(
+ 0, self._do_send, self._result, self._exc, waiter)
+
+ def _do_send(self, result, exc, waiter):
+ if waiter in self._waiters:
+ if exc is None:
+ waiter.switch(result)
+ else:
+ waiter.throw(*exc)
+
+ def send_exception(self, *args):
+ """Same as :meth:`send`, but sends an exception to waiters.
+
+ The arguments to send_exception are the same as the arguments
+ to ``raise``. If a single exception object is passed in, it
+ will be re-raised when :meth:`wait` is called, generating a
+ new stacktrace.
+
+ >>> from eventlet import event
+ >>> evt = event.Event()
+ >>> evt.send_exception(RuntimeError())
+ >>> evt.wait()
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "eventlet/event.py", line 120, in wait
+ current.throw(*self._exc)
+ RuntimeError
+
+ If it's important to preserve the entire original stack trace,
+ you must pass in the entire :func:`sys.exc_info` tuple.
+
+ >>> import sys
+ >>> evt = event.Event()
+ >>> try:
+ ... raise RuntimeError()
+ ... except RuntimeError:
+ ... evt.send_exception(*sys.exc_info())
+ ...
+ >>> evt.wait()
+ Traceback (most recent call last):
+ File "", line 1, in
+ File "eventlet/event.py", line 120, in wait
+ current.throw(*self._exc)
+ File "", line 2, in
+ RuntimeError
+
+ Note that doing so stores a traceback object directly on the
+ Event object, which may cause reference cycles. See the
+ :func:`sys.exc_info` documentation.
+ """
+ # the arguments and the same as for greenlet.throw
+ return self.send(None, args)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/BaseHTTPServer.py b/tapdown/lib/python3.11/site-packages/eventlet/green/BaseHTTPServer.py
new file mode 100644
index 0000000..9a73730
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/BaseHTTPServer.py
@@ -0,0 +1,15 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.green import SocketServer
+
+patcher.inject(
+ 'http.server',
+ globals(),
+ ('socket', socket),
+ ('SocketServer', SocketServer),
+ ('socketserver', SocketServer))
+
+del patcher
+
+if __name__ == '__main__':
+ test()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/CGIHTTPServer.py b/tapdown/lib/python3.11/site-packages/eventlet/green/CGIHTTPServer.py
new file mode 100644
index 0000000..285b50c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/CGIHTTPServer.py
@@ -0,0 +1,17 @@
+from eventlet import patcher
+from eventlet.green import BaseHTTPServer
+from eventlet.green import SimpleHTTPServer
+from eventlet.green import urllib
+from eventlet.green import select
+
+test = None # bind prior to patcher.inject to silence pyflakes warning below
+patcher.inject(
+ 'http.server',
+ globals(),
+ ('urllib', urllib),
+ ('select', select))
+
+del patcher
+
+if __name__ == '__main__':
+ test() # pyflakes false alarm here unless test = None above
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/MySQLdb.py b/tapdown/lib/python3.11/site-packages/eventlet/green/MySQLdb.py
new file mode 100644
index 0000000..16a7ec5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/MySQLdb.py
@@ -0,0 +1,40 @@
+__MySQLdb = __import__('MySQLdb')
+
+__all__ = __MySQLdb.__all__
+__patched__ = ["connect", "Connect", 'Connection', 'connections']
+
+from eventlet.patcher import slurp_properties
+slurp_properties(
+ __MySQLdb, globals(),
+ ignore=__patched__, srckeys=dir(__MySQLdb))
+
+from eventlet import tpool
+
+__orig_connections = __import__('MySQLdb.connections').connections
+
+
+def Connection(*args, **kw):
+ conn = tpool.execute(__orig_connections.Connection, *args, **kw)
+ return tpool.Proxy(conn, autowrap_names=('cursor',))
+
+
+connect = Connect = Connection
+
+
+# replicate the MySQLdb.connections module but with a tpooled Connection factory
+class MySQLdbConnectionsModule:
+ pass
+
+
+connections = MySQLdbConnectionsModule()
+for var in dir(__orig_connections):
+ if not var.startswith('__'):
+ setattr(connections, var, getattr(__orig_connections, var))
+connections.Connection = Connection
+
+cursors = __import__('MySQLdb.cursors').cursors
+converters = __import__('MySQLdb.converters').converters
+
+# TODO support instantiating cursors.FooCursor objects directly
+# TODO though this is a low priority, it would be nice if we supported
+# subclassing eventlet.green.MySQLdb.connections.Connection
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/SSL.py b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/SSL.py
new file mode 100644
index 0000000..bb06c8b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/SSL.py
@@ -0,0 +1,125 @@
+from OpenSSL import SSL as orig_SSL
+from OpenSSL.SSL import *
+from eventlet.support import get_errno
+from eventlet import greenio
+from eventlet.hubs import trampoline
+import socket
+
+
+class GreenConnection(greenio.GreenSocket):
+ """ Nonblocking wrapper for SSL.Connection objects.
+ """
+
+ def __init__(self, ctx, sock=None):
+ if sock is not None:
+ fd = orig_SSL.Connection(ctx, sock)
+ else:
+ # if we're given a Connection object directly, use it;
+ # this is used in the inherited accept() method
+ fd = ctx
+ super(ConnectionType, self).__init__(fd)
+
+ def do_handshake(self):
+ """ Perform an SSL handshake (usually called after renegotiate or one of
+ set_accept_state or set_accept_state). This can raise the same exceptions as
+ send and recv. """
+ if self.act_non_blocking:
+ return self.fd.do_handshake()
+ while True:
+ try:
+ return self.fd.do_handshake()
+ except WantReadError:
+ trampoline(self.fd.fileno(),
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+ except WantWriteError:
+ trampoline(self.fd.fileno(),
+ write=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+
+ def dup(self):
+ raise NotImplementedError("Dup not supported on SSL sockets")
+
+ def makefile(self, mode='r', bufsize=-1):
+ raise NotImplementedError("Makefile not supported on SSL sockets")
+
+ def read(self, size):
+ """Works like a blocking call to SSL_read(), whose behavior is
+ described here: http://www.openssl.org/docs/ssl/SSL_read.html"""
+ if self.act_non_blocking:
+ return self.fd.read(size)
+ while True:
+ try:
+ return self.fd.read(size)
+ except WantReadError:
+ trampoline(self.fd.fileno(),
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+ except WantWriteError:
+ trampoline(self.fd.fileno(),
+ write=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+ except SysCallError as e:
+ if get_errno(e) == -1 or get_errno(e) > 0:
+ return ''
+
+ recv = read
+
+ def write(self, data):
+ """Works like a blocking call to SSL_write(), whose behavior is
+ described here: http://www.openssl.org/docs/ssl/SSL_write.html"""
+ if not data:
+ return 0 # calling SSL_write() with 0 bytes to be sent is undefined
+ if self.act_non_blocking:
+ return self.fd.write(data)
+ while True:
+ try:
+ return self.fd.write(data)
+ except WantReadError:
+ trampoline(self.fd.fileno(),
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+ except WantWriteError:
+ trampoline(self.fd.fileno(),
+ write=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+
+ send = write
+
+ def sendall(self, data):
+ """Send "all" data on the connection. This calls send() repeatedly until
+ all data is sent. If an error occurs, it's impossible to tell how much data
+ has been sent.
+
+ No return value."""
+ tail = self.send(data)
+ while tail < len(data):
+ tail += self.send(data[tail:])
+
+ def shutdown(self):
+ if self.act_non_blocking:
+ return self.fd.shutdown()
+ while True:
+ try:
+ return self.fd.shutdown()
+ except WantReadError:
+ trampoline(self.fd.fileno(),
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+ except WantWriteError:
+ trampoline(self.fd.fileno(),
+ write=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket.timeout)
+
+
+Connection = ConnectionType = GreenConnection
+
+del greenio
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/__init__.py
new file mode 100644
index 0000000..1b25009
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/__init__.py
@@ -0,0 +1,9 @@
+from . import crypto
+from . import SSL
+try:
+ # pyopenssl tsafe module was deprecated and removed in v20.0.0
+ # https://github.com/pyca/pyopenssl/pull/913
+ from . import tsafe
+except ImportError:
+ pass
+from .version import __version__
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/crypto.py b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/crypto.py
new file mode 100644
index 0000000..0a57f6f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/crypto.py
@@ -0,0 +1 @@
+from OpenSSL.crypto import *
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/tsafe.py b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/tsafe.py
new file mode 100644
index 0000000..dd0dd8c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/tsafe.py
@@ -0,0 +1 @@
+from OpenSSL.tsafe import *
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/version.py b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/version.py
new file mode 100644
index 0000000..c886ef0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/OpenSSL/version.py
@@ -0,0 +1 @@
+from OpenSSL.version import __version__, __doc__
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/Queue.py b/tapdown/lib/python3.11/site-packages/eventlet/green/Queue.py
new file mode 100644
index 0000000..947d43a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/Queue.py
@@ -0,0 +1,33 @@
+from eventlet import queue
+
+__all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue']
+
+__patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
+
+# these classes exist to paper over the major operational difference between
+# eventlet.queue.Queue and the stdlib equivalents
+
+
+class Queue(queue.Queue):
+ def __init__(self, maxsize=0):
+ if maxsize == 0:
+ maxsize = None
+ super().__init__(maxsize)
+
+
+class PriorityQueue(queue.PriorityQueue):
+ def __init__(self, maxsize=0):
+ if maxsize == 0:
+ maxsize = None
+ super().__init__(maxsize)
+
+
+class LifoQueue(queue.LifoQueue):
+ def __init__(self, maxsize=0):
+ if maxsize == 0:
+ maxsize = None
+ super().__init__(maxsize)
+
+
+Empty = queue.Empty
+Full = queue.Full
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/SimpleHTTPServer.py b/tapdown/lib/python3.11/site-packages/eventlet/green/SimpleHTTPServer.py
new file mode 100644
index 0000000..df49fc9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/SimpleHTTPServer.py
@@ -0,0 +1,13 @@
+from eventlet import patcher
+from eventlet.green import BaseHTTPServer
+from eventlet.green import urllib
+
+patcher.inject(
+ 'http.server',
+ globals(),
+ ('urllib', urllib))
+
+del patcher
+
+if __name__ == '__main__':
+ test()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/SocketServer.py b/tapdown/lib/python3.11/site-packages/eventlet/green/SocketServer.py
new file mode 100644
index 0000000..b94ead3
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/SocketServer.py
@@ -0,0 +1,14 @@
+from eventlet import patcher
+
+from eventlet.green import socket
+from eventlet.green import select
+from eventlet.green import threading
+
+patcher.inject(
+ 'socketserver',
+ globals(),
+ ('socket', socket),
+ ('select', select),
+ ('threading', threading))
+
+# QQQ ForkingMixIn should be fixed to use green waitpid?
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/green/__init__.py
new file mode 100644
index 0000000..d965325
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/__init__.py
@@ -0,0 +1 @@
+# this package contains modules from the standard library converted to use eventlet
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/_socket_nodns.py b/tapdown/lib/python3.11/site-packages/eventlet/green/_socket_nodns.py
new file mode 100644
index 0000000..7dca20a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/_socket_nodns.py
@@ -0,0 +1,33 @@
+__socket = __import__('socket')
+
+__all__ = __socket.__all__
+__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout']
+
+import eventlet.patcher
+eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket))
+
+os = __import__('os')
+import sys
+from eventlet import greenio
+
+
+socket = greenio.GreenSocket
+_GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT
+timeout = greenio.socket_timeout
+
+try:
+ __original_fromfd__ = __socket.fromfd
+
+ def fromfd(*args):
+ return socket(__original_fromfd__(*args))
+except AttributeError:
+ pass
+
+try:
+ __original_socketpair__ = __socket.socketpair
+
+ def socketpair(*args):
+ one, two = __original_socketpair__(*args)
+ return socket(one), socket(two)
+except AttributeError:
+ pass
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/asynchat.py b/tapdown/lib/python3.11/site-packages/eventlet/green/asynchat.py
new file mode 100644
index 0000000..da51396
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/asynchat.py
@@ -0,0 +1,14 @@
+import sys
+
+if sys.version_info < (3, 12):
+ from eventlet import patcher
+ from eventlet.green import asyncore
+ from eventlet.green import socket
+
+ patcher.inject(
+ 'asynchat',
+ globals(),
+ ('asyncore', asyncore),
+ ('socket', socket))
+
+ del patcher
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/asyncore.py b/tapdown/lib/python3.11/site-packages/eventlet/green/asyncore.py
new file mode 100644
index 0000000..e7a7959
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/asyncore.py
@@ -0,0 +1,16 @@
+import sys
+
+if sys.version_info < (3, 12):
+ from eventlet import patcher
+ from eventlet.green import select
+ from eventlet.green import socket
+ from eventlet.green import time
+
+ patcher.inject(
+ "asyncore",
+ globals(),
+ ('select', select),
+ ('socket', socket),
+ ('time', time))
+
+ del patcher
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/builtin.py b/tapdown/lib/python3.11/site-packages/eventlet/green/builtin.py
new file mode 100644
index 0000000..ce98290
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/builtin.py
@@ -0,0 +1,38 @@
+"""
+In order to detect a filehandle that's been closed, our only clue may be
+the operating system returning the same filehandle in response to some
+other operation.
+
+The builtins 'file' and 'open' are patched to collaborate with the
+notify_opened protocol.
+"""
+
+builtins_orig = __builtins__
+
+from eventlet import hubs
+from eventlet.hubs import hub
+from eventlet.patcher import slurp_properties
+import sys
+
+__all__ = dir(builtins_orig)
+__patched__ = ['open']
+slurp_properties(builtins_orig, globals(),
+ ignore=__patched__, srckeys=dir(builtins_orig))
+
+hubs.get_hub()
+
+__original_open = open
+__opening = False
+
+
+def open(*args, **kwargs):
+ global __opening
+ result = __original_open(*args, **kwargs)
+ if not __opening:
+ # This is incredibly ugly. 'open' is used under the hood by
+ # the import process. So, ensure we don't wind up in an
+ # infinite loop.
+ __opening = True
+ hubs.notify_opened(result.fileno())
+ __opening = False
+ return result
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/ftplib.py b/tapdown/lib/python3.11/site-packages/eventlet/green/ftplib.py
new file mode 100644
index 0000000..b452e1d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/ftplib.py
@@ -0,0 +1,13 @@
+from eventlet import patcher
+
+# *NOTE: there might be some funny business with the "SOCKS" module
+# if it even still exists
+from eventlet.green import socket
+
+patcher.inject('ftplib', globals(), ('socket', socket))
+
+del patcher
+
+# Run test program when run as a script
+if __name__ == '__main__':
+ test()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/http/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/green/http/__init__.py
new file mode 100644
index 0000000..14e74fd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/http/__init__.py
@@ -0,0 +1,189 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+
+from enum import IntEnum
+
+__all__ = ['HTTPStatus']
+
+class HTTPStatus(IntEnum):
+ """HTTP status codes and reason phrases
+
+ Status codes from the following RFCs are all observed:
+
+ * RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
+ * RFC 6585: Additional HTTP Status Codes
+ * RFC 3229: Delta encoding in HTTP
+ * RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
+ * RFC 5842: Binding Extensions to WebDAV
+ * RFC 7238: Permanent Redirect
+ * RFC 2295: Transparent Content Negotiation in HTTP
+ * RFC 2774: An HTTP Extension Framework
+ """
+ def __new__(cls, value, phrase, description=''):
+ obj = int.__new__(cls, value)
+ obj._value_ = value
+
+ obj.phrase = phrase
+ obj.description = description
+ return obj
+
+ # informational
+ CONTINUE = 100, 'Continue', 'Request received, please continue'
+ SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
+ 'Switching to new protocol; obey Upgrade header')
+ PROCESSING = 102, 'Processing'
+
+ # success
+ OK = 200, 'OK', 'Request fulfilled, document follows'
+ CREATED = 201, 'Created', 'Document created, URL follows'
+ ACCEPTED = (202, 'Accepted',
+ 'Request accepted, processing continues off-line')
+ NON_AUTHORITATIVE_INFORMATION = (203,
+ 'Non-Authoritative Information', 'Request fulfilled from cache')
+ NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
+ RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
+ PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
+ MULTI_STATUS = 207, 'Multi-Status'
+ ALREADY_REPORTED = 208, 'Already Reported'
+ IM_USED = 226, 'IM Used'
+
+ # redirection
+ MULTIPLE_CHOICES = (300, 'Multiple Choices',
+ 'Object has several resources -- see URI list')
+ MOVED_PERMANENTLY = (301, 'Moved Permanently',
+ 'Object moved permanently -- see URI list')
+ FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
+ SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
+ NOT_MODIFIED = (304, 'Not Modified',
+ 'Document has not changed since given time')
+ USE_PROXY = (305, 'Use Proxy',
+ 'You must use proxy specified in Location to access this resource')
+ TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
+ 'Object moved temporarily -- see URI list')
+ PERMANENT_REDIRECT = (308, 'Permanent Redirect',
+ 'Object moved temporarily -- see URI list')
+
+ # client error
+ BAD_REQUEST = (400, 'Bad Request',
+ 'Bad request syntax or unsupported method')
+ UNAUTHORIZED = (401, 'Unauthorized',
+ 'No permission -- see authorization schemes')
+ PAYMENT_REQUIRED = (402, 'Payment Required',
+ 'No payment -- see charging schemes')
+ FORBIDDEN = (403, 'Forbidden',
+ 'Request forbidden -- authorization will not help')
+ NOT_FOUND = (404, 'Not Found',
+ 'Nothing matches the given URI')
+ METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
+ 'Specified method is invalid for this resource')
+ NOT_ACCEPTABLE = (406, 'Not Acceptable',
+ 'URI not available in preferred format')
+ PROXY_AUTHENTICATION_REQUIRED = (407,
+ 'Proxy Authentication Required',
+ 'You must authenticate with this proxy before proceeding')
+ REQUEST_TIMEOUT = (408, 'Request Timeout',
+ 'Request timed out; try again later')
+ CONFLICT = 409, 'Conflict', 'Request conflict'
+ GONE = (410, 'Gone',
+ 'URI no longer exists and has been permanently removed')
+ LENGTH_REQUIRED = (411, 'Length Required',
+ 'Client must specify Content-Length')
+ PRECONDITION_FAILED = (412, 'Precondition Failed',
+ 'Precondition in headers is false')
+ REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
+ 'Entity is too large')
+ REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
+ 'URI is too long')
+ UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
+ 'Entity body in unsupported format')
+ REQUESTED_RANGE_NOT_SATISFIABLE = (416,
+ 'Requested Range Not Satisfiable',
+ 'Cannot satisfy request range')
+ EXPECTATION_FAILED = (417, 'Expectation Failed',
+ 'Expect condition could not be satisfied')
+ UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
+ LOCKED = 423, 'Locked'
+ FAILED_DEPENDENCY = 424, 'Failed Dependency'
+ UPGRADE_REQUIRED = 426, 'Upgrade Required'
+ PRECONDITION_REQUIRED = (428, 'Precondition Required',
+ 'The origin server requires the request to be conditional')
+ TOO_MANY_REQUESTS = (429, 'Too Many Requests',
+ 'The user has sent too many requests in '
+ 'a given amount of time ("rate limiting")')
+ REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
+ 'Request Header Fields Too Large',
+ 'The server is unwilling to process the request because its header '
+ 'fields are too large')
+
+ # server errors
+ INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
+ 'Server got itself in trouble')
+ NOT_IMPLEMENTED = (501, 'Not Implemented',
+ 'Server does not support this operation')
+ BAD_GATEWAY = (502, 'Bad Gateway',
+ 'Invalid responses from another server/proxy')
+ SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
+ 'The server cannot process the request due to a high load')
+ GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
+ 'The gateway server did not receive a timely response')
+ HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
+ 'Cannot fulfill request')
+ VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
+ INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
+ LOOP_DETECTED = 508, 'Loop Detected'
+ NOT_EXTENDED = 510, 'Not Extended'
+ NETWORK_AUTHENTICATION_REQUIRED = (511,
+ 'Network Authentication Required',
+ 'The client needs to authenticate to gain network access')
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/http/client.py b/tapdown/lib/python3.11/site-packages/eventlet/green/http/client.py
new file mode 100644
index 0000000..2051ca9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/http/client.py
@@ -0,0 +1,1578 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+"""HTTP/1.1 client library
+
+
+
+
+HTTPConnection goes through a number of "states", which define when a client
+may legally make another request or fetch the response for a particular
+request. This diagram details these state transitions:
+
+ (null)
+ |
+ | HTTPConnection()
+ v
+ Idle
+ |
+ | putrequest()
+ v
+ Request-started
+ |
+ | ( putheader() )* endheaders()
+ v
+ Request-sent
+ |\\_____________________________
+ | | getresponse() raises
+ | response = getresponse() | ConnectionError
+ v v
+ Unread-response Idle
+ [Response-headers-read]
+ |\\____________________
+ | |
+ | response.read() | putrequest()
+ v v
+ Idle Req-started-unread-response
+ ______/|
+ / |
+ response.read() | | ( putheader() )* endheaders()
+ v v
+ Request-started Req-sent-unread-response
+ |
+ | response.read()
+ v
+ Request-sent
+
+This diagram presents the following rules:
+ -- a second request may not be started until {response-headers-read}
+ -- a response [object] cannot be retrieved until {request-sent}
+ -- there is no differentiation between an unread response body and a
+ partially read response body
+
+Note: this enforcement is applied by the HTTPConnection class. The
+ HTTPResponse class does not enforce this state machine, which
+ implies sophisticated clients may accelerate the request/response
+ pipeline. Caution should be taken, though: accelerating the states
+ beyond the above pattern may imply knowledge of the server's
+ connection-close behavior for certain requests. For example, it
+ is impossible to tell whether the server will close the connection
+ UNTIL the response headers have been read; this means that further
+ requests cannot be placed into the pipeline until it is known that
+ the server will NOT be closing the connection.
+
+Logical State __state __response
+------------- ------- ----------
+Idle _CS_IDLE None
+Request-started _CS_REQ_STARTED None
+Request-sent _CS_REQ_SENT None
+Unread-response _CS_IDLE
+Req-started-unread-response _CS_REQ_STARTED
+Req-sent-unread-response _CS_REQ_SENT
+"""
+
+import email.parser
+import email.message
+import io
+import re
+from collections.abc import Iterable
+from urllib.parse import urlsplit
+
+from eventlet.green import http, os, socket
+
+# HTTPMessage, parse_headers(), and the HTTP status code constants are
+# intentionally omitted for simplicity
+__all__ = ["HTTPResponse", "HTTPConnection",
+ "HTTPException", "NotConnected", "UnknownProtocol",
+ "UnknownTransferEncoding", "UnimplementedFileMode",
+ "IncompleteRead", "InvalidURL", "ImproperConnectionState",
+ "CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
+ "BadStatusLine", "LineTooLong", "RemoteDisconnected", "error",
+ "responses"]
+
+HTTP_PORT = 80
+HTTPS_PORT = 443
+
+_UNKNOWN = 'UNKNOWN'
+
+# connection states
+_CS_IDLE = 'Idle'
+_CS_REQ_STARTED = 'Request-started'
+_CS_REQ_SENT = 'Request-sent'
+
+
+# hack to maintain backwards compatibility
+globals().update(http.HTTPStatus.__members__)
+
+# another hack to maintain backwards compatibility
+# Mapping status codes to official W3C names
+responses = {v: v.phrase for v in http.HTTPStatus.__members__.values()}
+
+# maximal amount of data to read at one time in _safe_read
+MAXAMOUNT = 1048576
+
+# maximal line length when calling readline().
+_MAXLINE = 65536
+_MAXHEADERS = 100
+
+# Header name/value ABNF (http://tools.ietf.org/html/rfc7230#section-3.2)
+#
+# VCHAR = %x21-7E
+# obs-text = %x80-FF
+# header-field = field-name ":" OWS field-value OWS
+# field-name = token
+# field-value = *( field-content / obs-fold )
+# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+# field-vchar = VCHAR / obs-text
+#
+# obs-fold = CRLF 1*( SP / HTAB )
+# ; obsolete line folding
+# ; see Section 3.2.4
+
+# token = 1*tchar
+#
+# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+# / DIGIT / ALPHA
+# ; any VCHAR, except delimiters
+#
+# VCHAR defined in http://tools.ietf.org/html/rfc5234#appendix-B.1
+
+# the patterns for both name and value are more leniant than RFC
+# definitions to allow for backwards compatibility
+# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
+_is_legal_header_name = re.compile(rb'[^:\s][^:\r\n]*\Z').match
+_is_illegal_header_value = re.compile(rb'\n(?![ \t])|\r(?![ \t\n])').search
+
+# We always set the Content-Length header for these methods because some
+# servers will otherwise respond with a 411
+_METHODS_EXPECTING_BODY = {'PATCH', 'POST', 'PUT'}
+
+
+def _encode(data, name='data'):
+ """Call data.encode("latin-1") but show a better error message."""
+ try:
+ return data.encode("latin-1")
+ except UnicodeEncodeError as err:
+ raise UnicodeEncodeError(
+ err.encoding,
+ err.object,
+ err.start,
+ err.end,
+ "%s (%.20r) is not valid Latin-1. Use %s.encode('utf-8') "
+ "if you want to send it encoded in UTF-8." %
+ (name.title(), data[err.start:err.end], name)) from None
+
+
+class HTTPMessage(email.message.Message):
+ # XXX The only usage of this method is in
+ # http.server.CGIHTTPRequestHandler. Maybe move the code there so
+ # that it doesn't need to be part of the public API. The API has
+ # never been defined so this could cause backwards compatibility
+ # issues.
+
+ def getallmatchingheaders(self, name):
+ """Find all header lines matching a given header name.
+
+ Look through the list of headers and find all lines matching a given
+ header name (and their continuation lines). A list of the lines is
+ returned, without interpretation. If the header does not occur, an
+ empty list is returned. If the header occurs multiple times, all
+ occurrences are returned. Case is not important in the header name.
+
+ """
+ name = name.lower() + ':'
+ n = len(name)
+ lst = []
+ hit = 0
+ for line in self.keys():
+ if line[:n].lower() == name:
+ hit = 1
+ elif not line[:1].isspace():
+ hit = 0
+ if hit:
+ lst.append(line)
+ return lst
+
+def parse_headers(fp, _class=HTTPMessage):
+ """Parses only RFC2822 headers from a file pointer.
+
+ email Parser wants to see strings rather than bytes.
+ But a TextIOWrapper around self.rfile would buffer too many bytes
+ from the stream, bytes which we later need to read as bytes.
+ So we read the correct bytes here, as bytes, for email Parser
+ to parse.
+
+ """
+ headers = []
+ while True:
+ line = fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+ headers.append(line)
+ if len(headers) > _MAXHEADERS:
+ raise HTTPException("got more than %d headers" % _MAXHEADERS)
+ if line in (b'\r\n', b'\n', b''):
+ break
+ hstring = b''.join(headers).decode('iso-8859-1')
+ return email.parser.Parser(_class=_class).parsestr(hstring)
+
+
+class HTTPResponse(io.BufferedIOBase):
+
+ # See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
+
+ # The bytes from the socket object are iso-8859-1 strings.
+ # See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
+ # text following RFC 2047. The basic status line parsing only
+ # accepts iso-8859-1.
+
+ def __init__(self, sock, debuglevel=0, method=None, url=None):
+ # If the response includes a content-length header, we need to
+ # make sure that the client doesn't read more than the
+ # specified number of bytes. If it does, it will block until
+ # the server times out and closes the connection. This will
+ # happen if a self.fp.read() is done (without a size) whether
+ # self.fp is buffered or not. So, no self.fp.read() by
+ # clients unless they know what they are doing.
+ self.fp = sock.makefile("rb")
+ self.debuglevel = debuglevel
+ self._method = method
+
+ # The HTTPResponse object is returned via urllib. The clients
+ # of http and urllib expect different attributes for the
+ # headers. headers is used here and supports urllib. msg is
+ # provided as a backwards compatibility layer for http
+ # clients.
+
+ self.headers = self.msg = None
+
+ # from the Status-Line of the response
+ self.version = _UNKNOWN # HTTP-Version
+ self.status = _UNKNOWN # Status-Code
+ self.reason = _UNKNOWN # Reason-Phrase
+
+ self.chunked = _UNKNOWN # is "chunked" being used?
+ self.chunk_left = _UNKNOWN # bytes left to read in current chunk
+ self.length = _UNKNOWN # number of bytes left in response
+ self.will_close = _UNKNOWN # conn will close at end of response
+
+ def _read_status(self):
+ line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
+ if len(line) > _MAXLINE:
+ raise LineTooLong("status line")
+ if self.debuglevel > 0:
+ print("reply:", repr(line))
+ if not line:
+ # Presumably, the server closed the connection before
+ # sending a valid response.
+ raise RemoteDisconnected("Remote end closed connection without"
+ " response")
+ try:
+ version, status, reason = line.split(None, 2)
+ except ValueError:
+ try:
+ version, status = line.split(None, 1)
+ reason = ""
+ except ValueError:
+ # empty version will cause next test to fail.
+ version = ""
+ if not version.startswith("HTTP/"):
+ self._close_conn()
+ raise BadStatusLine(line)
+
+ # The status code is a three-digit number
+ try:
+ status = int(status)
+ if status < 100 or status > 999:
+ raise BadStatusLine(line)
+ except ValueError:
+ raise BadStatusLine(line)
+ return version, status, reason
+
+ def begin(self):
+ if self.headers is not None:
+ # we've already started reading the response
+ return
+
+ # read until we get a non-100 response
+ while True:
+ version, status, reason = self._read_status()
+ if status != CONTINUE:
+ break
+ # skip the header from the 100 response
+ while True:
+ skip = self.fp.readline(_MAXLINE + 1)
+ if len(skip) > _MAXLINE:
+ raise LineTooLong("header line")
+ skip = skip.strip()
+ if not skip:
+ break
+ if self.debuglevel > 0:
+ print("header:", skip)
+
+ self.code = self.status = status
+ self.reason = reason.strip()
+ if version in ("HTTP/1.0", "HTTP/0.9"):
+ # Some servers might still return "0.9", treat it as 1.0 anyway
+ self.version = 10
+ elif version.startswith("HTTP/1."):
+ self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
+ else:
+ raise UnknownProtocol(version)
+
+ self.headers = self.msg = parse_headers(self.fp)
+
+ if self.debuglevel > 0:
+ for hdr in self.headers:
+ print("header:", hdr, end=" ")
+
+ # are we using the chunked-style of transfer encoding?
+ tr_enc = self.headers.get("transfer-encoding")
+ if tr_enc and tr_enc.lower() == "chunked":
+ self.chunked = True
+ self.chunk_left = None
+ else:
+ self.chunked = False
+
+ # will the connection close at the end of the response?
+ self.will_close = self._check_close()
+
+ # do we have a Content-Length?
+ # NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
+ self.length = None
+ length = self.headers.get("content-length")
+
+ # are we using the chunked-style of transfer encoding?
+ tr_enc = self.headers.get("transfer-encoding")
+ if length and not self.chunked:
+ try:
+ self.length = int(length)
+ except ValueError:
+ self.length = None
+ else:
+ if self.length < 0: # ignore nonsensical negative lengths
+ self.length = None
+ else:
+ self.length = None
+
+ # does the body have a fixed length? (of zero)
+ if (status == NO_CONTENT or status == NOT_MODIFIED or
+ 100 <= status < 200 or # 1xx codes
+ self._method == "HEAD"):
+ self.length = 0
+
+ # if the connection remains open, and we aren't using chunked, and
+ # a content-length was not provided, then assume that the connection
+ # WILL close.
+ if (not self.will_close and
+ not self.chunked and
+ self.length is None):
+ self.will_close = True
+
+ def _check_close(self):
+ conn = self.headers.get("connection")
+ if self.version == 11:
+ # An HTTP/1.1 proxy is assumed to stay open unless
+ # explicitly closed.
+ conn = self.headers.get("connection")
+ if conn and "close" in conn.lower():
+ return True
+ return False
+
+ # Some HTTP/1.0 implementations have support for persistent
+ # connections, using rules different than HTTP/1.1.
+
+ # For older HTTP, Keep-Alive indicates persistent connection.
+ if self.headers.get("keep-alive"):
+ return False
+
+ # At least Akamai returns a "Connection: Keep-Alive" header,
+ # which was supposed to be sent by the client.
+ if conn and "keep-alive" in conn.lower():
+ return False
+
+ # Proxy-Connection is a netscape hack.
+ pconn = self.headers.get("proxy-connection")
+ if pconn and "keep-alive" in pconn.lower():
+ return False
+
+ # otherwise, assume it will close
+ return True
+
+ def _close_conn(self):
+ fp = self.fp
+ self.fp = None
+ fp.close()
+
+ def close(self):
+ try:
+ super().close() # set "closed" flag
+ finally:
+ if self.fp:
+ self._close_conn()
+
+ # These implementations are for the benefit of io.BufferedReader.
+
+ # XXX This class should probably be revised to act more like
+ # the "raw stream" that BufferedReader expects.
+
+ def flush(self):
+ super().flush()
+ if self.fp:
+ self.fp.flush()
+
+ def readable(self):
+ """Always returns True"""
+ return True
+
+ # End of "raw stream" methods
+
+ def isclosed(self):
+ """True if the connection is closed."""
+ # NOTE: it is possible that we will not ever call self.close(). This
+ # case occurs when will_close is TRUE, length is None, and we
+ # read up to the last byte, but NOT past it.
+ #
+ # IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
+ # called, meaning self.isclosed() is meaningful.
+ return self.fp is None
+
+ def read(self, amt=None):
+ if self.fp is None:
+ return b""
+
+ if self._method == "HEAD":
+ self._close_conn()
+ return b""
+
+ if amt is not None:
+ # Amount is given, implement using readinto
+ b = bytearray(amt)
+ n = self.readinto(b)
+ return memoryview(b)[:n].tobytes()
+ else:
+ # Amount is not given (unbounded read) so we must check self.length
+ # and self.chunked
+
+ if self.chunked:
+ return self._readall_chunked()
+
+ if self.length is None:
+ s = self.fp.read()
+ else:
+ try:
+ s = self._safe_read(self.length)
+ except IncompleteRead:
+ self._close_conn()
+ raise
+ self.length = 0
+ self._close_conn() # we read everything
+ return s
+
+ def readinto(self, b):
+ """Read up to len(b) bytes into bytearray b and return the number
+ of bytes read.
+ """
+
+ if self.fp is None:
+ return 0
+
+ if self._method == "HEAD":
+ self._close_conn()
+ return 0
+
+ if self.chunked:
+ return self._readinto_chunked(b)
+
+ if self.length is not None:
+ if len(b) > self.length:
+ # clip the read to the "end of response"
+ b = memoryview(b)[0:self.length]
+
+ # we do not use _safe_read() here because this may be a .will_close
+ # connection, and the user is reading more bytes than will be provided
+ # (for example, reading in 1k chunks)
+ n = self.fp.readinto(b)
+ if not n and b:
+ # Ideally, we would raise IncompleteRead if the content-length
+ # wasn't satisfied, but it might break compatibility.
+ self._close_conn()
+ elif self.length is not None:
+ self.length -= n
+ if not self.length:
+ self._close_conn()
+ return n
+
+ def _read_next_chunk_size(self):
+ # Read the next chunk size from the file
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("chunk size")
+ i = line.find(b";")
+ if i >= 0:
+ line = line[:i] # strip chunk-extensions
+ try:
+ return int(line, 16)
+ except ValueError:
+ # close the connection as protocol synchronisation is
+ # probably lost
+ self._close_conn()
+ raise
+
+ def _read_and_discard_trailer(self):
+ # read and discard trailer up to the CRLF terminator
+ ### note: we shouldn't have any trailers!
+ while True:
+ line = self.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("trailer line")
+ if not line:
+ # a vanishingly small number of sites EOF without
+ # sending the trailer
+ break
+ if line in (b'\r\n', b'\n', b''):
+ break
+
+ def _get_chunk_left(self):
+ # return self.chunk_left, reading a new chunk if necessary.
+ # chunk_left == 0: at the end of the current chunk, need to close it
+ # chunk_left == None: No current chunk, should read next.
+ # This function returns non-zero or None if the last chunk has
+ # been read.
+ chunk_left = self.chunk_left
+ if not chunk_left: # Can be 0 or None
+ if chunk_left is not None:
+ # We are at the end of chunk. dicard chunk end
+ self._safe_read(2) # toss the CRLF at the end of the chunk
+ try:
+ chunk_left = self._read_next_chunk_size()
+ except ValueError:
+ raise IncompleteRead(b'')
+ if chunk_left == 0:
+ # last chunk: 1*("0") [ chunk-extension ] CRLF
+ self._read_and_discard_trailer()
+ # we read everything; close the "file"
+ self._close_conn()
+ chunk_left = None
+ self.chunk_left = chunk_left
+ return chunk_left
+
+ def _readall_chunked(self):
+ assert self.chunked != _UNKNOWN
+ value = []
+ try:
+ while True:
+ chunk_left = self._get_chunk_left()
+ if chunk_left is None:
+ break
+ value.append(self._safe_read(chunk_left))
+ self.chunk_left = 0
+ return b''.join(value)
+ except IncompleteRead:
+ raise IncompleteRead(b''.join(value))
+
+ def _readinto_chunked(self, b):
+ assert self.chunked != _UNKNOWN
+ total_bytes = 0
+ mvb = memoryview(b)
+ try:
+ while True:
+ chunk_left = self._get_chunk_left()
+ if chunk_left is None:
+ return total_bytes
+
+ if len(mvb) <= chunk_left:
+ n = self._safe_readinto(mvb)
+ self.chunk_left = chunk_left - n
+ return total_bytes + n
+
+ temp_mvb = mvb[:chunk_left]
+ n = self._safe_readinto(temp_mvb)
+ mvb = mvb[n:]
+ total_bytes += n
+ self.chunk_left = 0
+
+ except IncompleteRead:
+ raise IncompleteRead(bytes(b[0:total_bytes]))
+
+ def _safe_read(self, amt):
+ """Read the number of bytes requested, compensating for partial reads.
+
+ Normally, we have a blocking socket, but a read() can be interrupted
+ by a signal (resulting in a partial read).
+
+ Note that we cannot distinguish between EOF and an interrupt when zero
+ bytes have been read. IncompleteRead() will be raised in this
+ situation.
+
+ This function should be used when bytes "should" be present for
+ reading. If the bytes are truly not available (due to EOF), then the
+ IncompleteRead exception can be used to detect the problem.
+ """
+ s = []
+ while amt > 0:
+ chunk = self.fp.read(min(amt, MAXAMOUNT))
+ if not chunk:
+ raise IncompleteRead(b''.join(s), amt)
+ s.append(chunk)
+ amt -= len(chunk)
+ return b"".join(s)
+
+ def _safe_readinto(self, b):
+ """Same as _safe_read, but for reading into a buffer."""
+ total_bytes = 0
+ mvb = memoryview(b)
+ while total_bytes < len(b):
+ if MAXAMOUNT < len(mvb):
+ temp_mvb = mvb[0:MAXAMOUNT]
+ n = self.fp.readinto(temp_mvb)
+ else:
+ n = self.fp.readinto(mvb)
+ if not n:
+ raise IncompleteRead(bytes(mvb[0:total_bytes]), len(b))
+ mvb = mvb[n:]
+ total_bytes += n
+ return total_bytes
+
+ def read1(self, n=-1):
+ """Read with at most one underlying system call. If at least one
+ byte is buffered, return that instead.
+ """
+ if self.fp is None or self._method == "HEAD":
+ return b""
+ if self.chunked:
+ return self._read1_chunked(n)
+ if self.length is not None and (n < 0 or n > self.length):
+ n = self.length
+ try:
+ result = self.fp.read1(n)
+ except ValueError:
+ if n >= 0:
+ raise
+ # some implementations, like BufferedReader, don't support -1
+ # Read an arbitrarily selected largeish chunk.
+ result = self.fp.read1(16*1024)
+ if not result and n:
+ self._close_conn()
+ elif self.length is not None:
+ self.length -= len(result)
+ return result
+
+ def peek(self, n=-1):
+ # Having this enables IOBase.readline() to read more than one
+ # byte at a time
+ if self.fp is None or self._method == "HEAD":
+ return b""
+ if self.chunked:
+ return self._peek_chunked(n)
+ return self.fp.peek(n)
+
+ def readline(self, limit=-1):
+ if self.fp is None or self._method == "HEAD":
+ return b""
+ if self.chunked:
+ # Fallback to IOBase readline which uses peek() and read()
+ return super().readline(limit)
+ if self.length is not None and (limit < 0 or limit > self.length):
+ limit = self.length
+ result = self.fp.readline(limit)
+ if not result and limit:
+ self._close_conn()
+ elif self.length is not None:
+ self.length -= len(result)
+ return result
+
+ def _read1_chunked(self, n):
+ # Strictly speaking, _get_chunk_left() may cause more than one read,
+ # but that is ok, since that is to satisfy the chunked protocol.
+ chunk_left = self._get_chunk_left()
+ if chunk_left is None or n == 0:
+ return b''
+ if not (0 <= n <= chunk_left):
+ n = chunk_left # if n is negative or larger than chunk_left
+ read = self.fp.read1(n)
+ self.chunk_left -= len(read)
+ if not read:
+ raise IncompleteRead(b"")
+ return read
+
+ def _peek_chunked(self, n):
+ # Strictly speaking, _get_chunk_left() may cause more than one read,
+ # but that is ok, since that is to satisfy the chunked protocol.
+ try:
+ chunk_left = self._get_chunk_left()
+ except IncompleteRead:
+ return b'' # peek doesn't worry about protocol
+ if chunk_left is None:
+ return b'' # eof
+ # peek is allowed to return more than requested. Just request the
+ # entire chunk, and truncate what we get.
+ return self.fp.peek(chunk_left)[:chunk_left]
+
+ def fileno(self):
+ return self.fp.fileno()
+
+ def getheader(self, name, default=None):
+ '''Returns the value of the header matching *name*.
+
+ If there are multiple matching headers, the values are
+ combined into a single string separated by commas and spaces.
+
+ If no matching header is found, returns *default* or None if
+ the *default* is not specified.
+
+ If the headers are unknown, raises http.client.ResponseNotReady.
+
+ '''
+ if self.headers is None:
+ raise ResponseNotReady()
+ headers = self.headers.get_all(name) or default
+ if isinstance(headers, str) or not hasattr(headers, '__iter__'):
+ return headers
+ else:
+ return ', '.join(headers)
+
+ def getheaders(self):
+ """Return list of (header, value) tuples."""
+ if self.headers is None:
+ raise ResponseNotReady()
+ return list(self.headers.items())
+
+ # We override IOBase.__iter__ so that it doesn't check for closed-ness
+
+ def __iter__(self):
+ return self
+
+ # For compatibility with old-style urllib responses.
+
+ def info(self):
+ '''Returns an instance of the class mimetools.Message containing
+ meta-information associated with the URL.
+
+ When the method is HTTP, these headers are those returned by
+ the server at the head of the retrieved HTML page (including
+ Content-Length and Content-Type).
+
+ When the method is FTP, a Content-Length header will be
+ present if (as is now usual) the server passed back a file
+ length in response to the FTP retrieval request. A
+ Content-Type header will be present if the MIME type can be
+ guessed.
+
+ When the method is local-file, returned headers will include
+ a Date representing the file's last-modified time, a
+ Content-Length giving file size, and a Content-Type
+ containing a guess at the file's type. See also the
+ description of the mimetools module.
+
+ '''
+ return self.headers
+
+ def geturl(self):
+ '''Return the real URL of the page.
+
+ In some cases, the HTTP server redirects a client to another
+ URL. The urlopen() function handles this transparently, but in
+ some cases the caller needs to know which URL the client was
+ redirected to. The geturl() method can be used to get at this
+ redirected URL.
+
+ '''
+ return self.url
+
+ def getcode(self):
+ '''Return the HTTP status code that was sent with the response,
+ or None if the URL is not an HTTP URL.
+
+ '''
+ return self.status
+
+class HTTPConnection:
+
+ _http_vsn = 11
+ _http_vsn_str = 'HTTP/1.1'
+
+ response_class = HTTPResponse
+ default_port = HTTP_PORT
+ auto_open = 1
+ debuglevel = 0
+
+ @staticmethod
+ def _is_textIO(stream):
+ """Test whether a file-like object is a text or a binary stream.
+ """
+ return isinstance(stream, io.TextIOBase)
+
+ @staticmethod
+ def _get_content_length(body, method):
+ """Get the content-length based on the body.
+
+ If the body is None, we set Content-Length: 0 for methods that expect
+ a body (RFC 7230, Section 3.3.2). We also set the Content-Length for
+ any method if the body is a str or bytes-like object and not a file.
+ """
+ if body is None:
+ # do an explicit check for not None here to distinguish
+ # between unset and set but empty
+ if method.upper() in _METHODS_EXPECTING_BODY:
+ return 0
+ else:
+ return None
+
+ if hasattr(body, 'read'):
+ # file-like object.
+ return None
+
+ try:
+ # does it implement the buffer protocol (bytes, bytearray, array)?
+ mv = memoryview(body)
+ return mv.nbytes
+ except TypeError:
+ pass
+
+ if isinstance(body, str):
+ return len(body)
+
+ return None
+
+ def __init__(self, host, port=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ self.timeout = timeout
+ self.source_address = source_address
+ self.sock = None
+ self._buffer = []
+ self.__response = None
+ self.__state = _CS_IDLE
+ self._method = None
+ self._tunnel_host = None
+ self._tunnel_port = None
+ self._tunnel_headers = {}
+
+ (self.host, self.port) = self._get_hostport(host, port)
+
+ # This is stored as an instance variable to allow unit
+ # tests to replace it with a suitable mockup
+ self._create_connection = socket.create_connection
+
+ def set_tunnel(self, host, port=None, headers=None):
+ """Set up host and port for HTTP CONNECT tunnelling.
+
+ In a connection that uses HTTP CONNECT tunneling, the host passed to the
+ constructor is used as a proxy server that relays all communication to
+ the endpoint passed to `set_tunnel`. This done by sending an HTTP
+ CONNECT request to the proxy server when the connection is established.
+
+ This method must be called before the HTML connection has been
+ established.
+
+ The headers argument should be a mapping of extra HTTP headers to send
+ with the CONNECT request.
+ """
+
+ if self.sock:
+ raise RuntimeError("Can't set up tunnel for established connection")
+
+ self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)
+ if headers:
+ self._tunnel_headers = headers
+ else:
+ self._tunnel_headers.clear()
+
+ def _get_hostport(self, host, port):
+ if port is None:
+ i = host.rfind(':')
+ j = host.rfind(']') # ipv6 addresses have [...]
+ if i > j:
+ try:
+ port = int(host[i+1:])
+ except ValueError:
+ if host[i+1:] == "": # http://foo.com:/ == http://foo.com/
+ port = self.default_port
+ else:
+ raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
+ host = host[:i]
+ else:
+ port = self.default_port
+ if host and host[0] == '[' and host[-1] == ']':
+ host = host[1:-1]
+
+ return (host, port)
+
+ def set_debuglevel(self, level):
+ self.debuglevel = level
+
+ def _tunnel(self):
+ connect_str = "CONNECT %s:%d HTTP/1.0\r\n" % (self._tunnel_host,
+ self._tunnel_port)
+ connect_bytes = connect_str.encode("ascii")
+ self.send(connect_bytes)
+ for header, value in self._tunnel_headers.items():
+ header_str = "%s: %s\r\n" % (header, value)
+ header_bytes = header_str.encode("latin-1")
+ self.send(header_bytes)
+ self.send(b'\r\n')
+
+ response = self.response_class(self.sock, method=self._method)
+ (version, code, message) = response._read_status()
+
+ if code != http.HTTPStatus.OK:
+ self.close()
+ raise OSError("Tunnel connection failed: %d %s" % (code,
+ message.strip()))
+ while True:
+ line = response.fp.readline(_MAXLINE + 1)
+ if len(line) > _MAXLINE:
+ raise LineTooLong("header line")
+ if not line:
+ # for sites which EOF without sending a trailer
+ break
+ if line in (b'\r\n', b'\n', b''):
+ break
+
+ if self.debuglevel > 0:
+ print('header:', line.decode())
+
+ def connect(self):
+ """Connect to the host and port specified in __init__."""
+ self.sock = self._create_connection(
+ (self.host,self.port), self.timeout, self.source_address)
+ self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ if self._tunnel_host:
+ self._tunnel()
+
+ def close(self):
+ """Close the connection to the HTTP server."""
+ self.__state = _CS_IDLE
+ try:
+ sock = self.sock
+ if sock:
+ self.sock = None
+ sock.close() # close it manually... there may be other refs
+ finally:
+ response = self.__response
+ if response:
+ self.__response = None
+ response.close()
+
+ def send(self, data):
+ """Send `data' to the server.
+ ``data`` can be a string object, a bytes object, an array object, a
+ file-like object that supports a .read() method, or an iterable object.
+ """
+
+ if self.sock is None:
+ if self.auto_open:
+ self.connect()
+ else:
+ raise NotConnected()
+
+ if self.debuglevel > 0:
+ print("send:", repr(data))
+ blocksize = 8192
+ if hasattr(data, "read") :
+ if self.debuglevel > 0:
+ print("sendIng a read()able")
+ encode = False
+ try:
+ mode = data.mode
+ except AttributeError:
+ # io.BytesIO and other file-like objects don't have a `mode`
+ # attribute.
+ pass
+ else:
+ if "b" not in mode:
+ encode = True
+ if self.debuglevel > 0:
+ print("encoding file using iso-8859-1")
+ while 1:
+ datablock = data.read(blocksize)
+ if not datablock:
+ break
+ if encode:
+ datablock = datablock.encode("iso-8859-1")
+ self.sock.sendall(datablock)
+ return
+ try:
+ self.sock.sendall(data)
+ except TypeError:
+ if isinstance(data, Iterable):
+ for d in data:
+ self.sock.sendall(d)
+ else:
+ raise TypeError("data should be a bytes-like object "
+ "or an iterable, got %r" % type(data))
+
+ def _output(self, s):
+ """Add a line of output to the current request buffer.
+
+ Assumes that the line does *not* end with \\r\\n.
+ """
+ self._buffer.append(s)
+
+ def _read_readable(self, readable):
+ blocksize = 8192
+ if self.debuglevel > 0:
+ print("sendIng a read()able")
+ encode = self._is_textIO(readable)
+ if encode and self.debuglevel > 0:
+ print("encoding file using iso-8859-1")
+ while True:
+ datablock = readable.read(blocksize)
+ if not datablock:
+ break
+ if encode:
+ datablock = datablock.encode("iso-8859-1")
+ yield datablock
+
+ def _send_output(self, message_body=None, encode_chunked=False):
+ """Send the currently buffered request and clear the buffer.
+
+ Appends an extra \\r\\n to the buffer.
+ A message_body may be specified, to be appended to the request.
+ """
+ self._buffer.extend((b"", b""))
+ msg = b"\r\n".join(self._buffer)
+ del self._buffer[:]
+ self.send(msg)
+
+ if message_body is not None:
+
+ # create a consistent interface to message_body
+ if hasattr(message_body, 'read'):
+ # Let file-like take precedence over byte-like. This
+ # is needed to allow the current position of mmap'ed
+ # files to be taken into account.
+ chunks = self._read_readable(message_body)
+ else:
+ try:
+ # this is solely to check to see if message_body
+ # implements the buffer API. it /would/ be easier
+ # to capture if PyObject_CheckBuffer was exposed
+ # to Python.
+ memoryview(message_body)
+ except TypeError:
+ try:
+ chunks = iter(message_body)
+ except TypeError:
+ raise TypeError("message_body should be a bytes-like "
+ "object or an iterable, got %r"
+ % type(message_body))
+ else:
+ # the object implements the buffer interface and
+ # can be passed directly into socket methods
+ chunks = (message_body,)
+
+ for chunk in chunks:
+ if not chunk:
+ if self.debuglevel > 0:
+ print('Zero length chunk ignored')
+ continue
+
+ if encode_chunked and self._http_vsn == 11:
+ # chunked encoding
+ chunk = '{:X}\r\n'.format(len(chunk)).encode('ascii') + chunk + b'\r\n'
+ self.send(chunk)
+
+ if encode_chunked and self._http_vsn == 11:
+ # end chunked transfer
+ self.send(b'0\r\n\r\n')
+
+ def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
+ """Send a request to the server.
+
+ `method' specifies an HTTP request method, e.g. 'GET'.
+ `url' specifies the object being requested, e.g. '/index.html'.
+ `skip_host' if True does not add automatically a 'Host:' header
+ `skip_accept_encoding' if True does not add automatically an
+ 'Accept-Encoding:' header
+ """
+
+ # if a prior response has been completed, then forget about it.
+ if self.__response and self.__response.isclosed():
+ self.__response = None
+
+
+ # in certain cases, we cannot issue another request on this connection.
+ # this occurs when:
+ # 1) we are in the process of sending a request. (_CS_REQ_STARTED)
+ # 2) a response to a previous request has signalled that it is going
+ # to close the connection upon completion.
+ # 3) the headers for the previous response have not been read, thus
+ # we cannot determine whether point (2) is true. (_CS_REQ_SENT)
+ #
+ # if there is no prior response, then we can request at will.
+ #
+ # if point (2) is true, then we will have passed the socket to the
+ # response (effectively meaning, "there is no prior response"), and
+ # will open a new one when a new request is made.
+ #
+ # Note: if a prior response exists, then we *can* start a new request.
+ # We are not allowed to begin fetching the response to this new
+ # request, however, until that prior response is complete.
+ #
+ if self.__state == _CS_IDLE:
+ self.__state = _CS_REQ_STARTED
+ else:
+ raise CannotSendRequest(self.__state)
+
+ # Save the method we use, we need it later in the response phase
+ self._method = method
+ if not url:
+ url = '/'
+ request = '%s %s %s' % (method, url, self._http_vsn_str)
+
+ # Non-ASCII characters should have been eliminated earlier
+ self._output(request.encode('ascii'))
+
+ if self._http_vsn == 11:
+ # Issue some standard headers for better HTTP/1.1 compliance
+
+ if not skip_host:
+ # this header is issued *only* for HTTP/1.1
+ # connections. more specifically, this means it is
+ # only issued when the client uses the new
+ # HTTPConnection() class. backwards-compat clients
+ # will be using HTTP/1.0 and those clients may be
+ # issuing this header themselves. we should NOT issue
+ # it twice; some web servers (such as Apache) barf
+ # when they see two Host: headers
+
+ # If we need a non-standard port,include it in the
+ # header. If the request is going through a proxy,
+ # but the host of the actual URL, not the host of the
+ # proxy.
+
+ netloc = ''
+ if url.startswith('http'):
+ nil, netloc, nil, nil, nil = urlsplit(url)
+
+ if netloc:
+ try:
+ netloc_enc = netloc.encode("ascii")
+ except UnicodeEncodeError:
+ netloc_enc = netloc.encode("idna")
+ self.putheader('Host', netloc_enc)
+ else:
+ if self._tunnel_host:
+ host = self._tunnel_host
+ port = self._tunnel_port
+ else:
+ host = self.host
+ port = self.port
+
+ try:
+ host_enc = host.encode("ascii")
+ except UnicodeEncodeError:
+ host_enc = host.encode("idna")
+
+ # As per RFC 273, IPv6 address should be wrapped with []
+ # when used as Host header
+
+ if host.find(':') >= 0:
+ host_enc = b'[' + host_enc + b']'
+
+ if port == self.default_port:
+ self.putheader('Host', host_enc)
+ else:
+ host_enc = host_enc.decode("ascii")
+ self.putheader('Host', "%s:%s" % (host_enc, port))
+
+ # note: we are assuming that clients will not attempt to set these
+ # headers since *this* library must deal with the
+ # consequences. this also means that when the supporting
+ # libraries are updated to recognize other forms, then this
+ # code should be changed (removed or updated).
+
+ # we only want a Content-Encoding of "identity" since we don't
+ # support encodings such as x-gzip or x-deflate.
+ if not skip_accept_encoding:
+ self.putheader('Accept-Encoding', 'identity')
+
+ # we can accept "chunked" Transfer-Encodings, but no others
+ # NOTE: no TE header implies *only* "chunked"
+ #self.putheader('TE', 'chunked')
+
+ # if TE is supplied in the header, then it must appear in a
+ # Connection header.
+ #self.putheader('Connection', 'TE')
+
+ else:
+ # For HTTP/1.0, the server will assume "not chunked"
+ pass
+
+ def putheader(self, header, *values):
+ """Send a request header line to the server.
+
+ For example: h.putheader('Accept', 'text/html')
+ """
+ if self.__state != _CS_REQ_STARTED:
+ raise CannotSendHeader()
+
+ if hasattr(header, 'encode'):
+ header = header.encode('ascii')
+
+ if not _is_legal_header_name(header):
+ raise ValueError('Invalid header name %r' % (header,))
+
+ values = list(values)
+ for i, one_value in enumerate(values):
+ if hasattr(one_value, 'encode'):
+ values[i] = one_value.encode('latin-1')
+ elif isinstance(one_value, int):
+ values[i] = str(one_value).encode('ascii')
+
+ if _is_illegal_header_value(values[i]):
+ raise ValueError('Invalid header value %r' % (values[i],))
+
+ value = b'\r\n\t'.join(values)
+ header = header + b': ' + value
+ self._output(header)
+
+ def endheaders(self, message_body=None, **kwds):
+ """Indicate that the last header line has been sent to the server.
+
+ This method sends the request to the server. The optional message_body
+ argument can be used to pass a message body associated with the
+ request.
+ """
+ encode_chunked = kwds.pop('encode_chunked', False)
+ if kwds:
+ # mimic interpreter error for unrecognized keyword
+ raise TypeError("endheaders() got an unexpected keyword argument '{}'"
+ .format(kwds.popitem()[0]))
+
+ if self.__state == _CS_REQ_STARTED:
+ self.__state = _CS_REQ_SENT
+ else:
+ raise CannotSendHeader()
+ self._send_output(message_body, encode_chunked=encode_chunked)
+
+ def request(self, method, url, body=None, headers={}, **kwds):
+ """Send a complete request to the server."""
+ encode_chunked = kwds.pop('encode_chunked', False)
+ if kwds:
+ # mimic interpreter error for unrecognized keyword
+ raise TypeError("request() got an unexpected keyword argument '{}'"
+ .format(kwds.popitem()[0]))
+ self._send_request(method, url, body, headers, encode_chunked)
+
+ def _set_content_length(self, body, method):
+ # Set the content-length based on the body. If the body is "empty", we
+ # set Content-Length: 0 for methods that expect a body (RFC 7230,
+ # Section 3.3.2). If the body is set for other methods, we set the
+ # header provided we can figure out what the length is.
+ thelen = None
+ method_expects_body = method.upper() in _METHODS_EXPECTING_BODY
+ if body is None and method_expects_body:
+ thelen = '0'
+ elif body is not None:
+ try:
+ thelen = str(len(body))
+ except TypeError:
+ # If this is a file-like object, try to
+ # fstat its file descriptor
+ try:
+ thelen = str(os.fstat(body.fileno()).st_size)
+ except (AttributeError, OSError):
+ # Don't send a length if this failed
+ if self.debuglevel > 0: print("Cannot stat!!")
+
+ if thelen is not None:
+ self.putheader('Content-Length', thelen)
+
+ def _send_request(self, method, url, body, headers, encode_chunked):
+ # Honor explicitly requested Host: and Accept-Encoding: headers.
+ header_names = frozenset(k.lower() for k in headers)
+ skips = {}
+ if 'host' in header_names:
+ skips['skip_host'] = 1
+ if 'accept-encoding' in header_names:
+ skips['skip_accept_encoding'] = 1
+
+ self.putrequest(method, url, **skips)
+
+ # chunked encoding will happen if HTTP/1.1 is used and either
+ # the caller passes encode_chunked=True or the following
+ # conditions hold:
+ # 1. content-length has not been explicitly set
+ # 2. the body is a file or iterable, but not a str or bytes-like
+ # 3. Transfer-Encoding has NOT been explicitly set by the caller
+
+ if 'content-length' not in header_names:
+ # only chunk body if not explicitly set for backwards
+ # compatibility, assuming the client code is already handling the
+ # chunking
+ if 'transfer-encoding' not in header_names:
+ # if content-length cannot be automatically determined, fall
+ # back to chunked encoding
+ encode_chunked = False
+ content_length = self._get_content_length(body, method)
+ if content_length is None:
+ if body is not None:
+ if self.debuglevel > 0:
+ print('Unable to determine size of %r' % body)
+ encode_chunked = True
+ self.putheader('Transfer-Encoding', 'chunked')
+ else:
+ self.putheader('Content-Length', str(content_length))
+ else:
+ encode_chunked = False
+
+ for hdr, value in headers.items():
+ self.putheader(hdr, value)
+ if isinstance(body, str):
+ # RFC 2616 Section 3.7.1 says that text default has a
+ # default charset of iso-8859-1.
+ body = _encode(body, 'body')
+ self.endheaders(body, encode_chunked=encode_chunked)
+
+ def getresponse(self):
+ """Get the response from the server.
+
+ If the HTTPConnection is in the correct state, returns an
+ instance of HTTPResponse or of whatever object is returned by
+ the response_class variable.
+
+ If a request has not been sent or if a previous response has
+ not be handled, ResponseNotReady is raised. If the HTTP
+ response indicates that the connection should be closed, then
+ it will be closed before the response is returned. When the
+ connection is closed, the underlying socket is closed.
+ """
+
+ # if a prior response has been completed, then forget about it.
+ if self.__response and self.__response.isclosed():
+ self.__response = None
+
+ # if a prior response exists, then it must be completed (otherwise, we
+ # cannot read this response's header to determine the connection-close
+ # behavior)
+ #
+ # note: if a prior response existed, but was connection-close, then the
+ # socket and response were made independent of this HTTPConnection
+ # object since a new request requires that we open a whole new
+ # connection
+ #
+ # this means the prior response had one of two states:
+ # 1) will_close: this connection was reset and the prior socket and
+ # response operate independently
+ # 2) persistent: the response was retained and we await its
+ # isclosed() status to become true.
+ #
+ if self.__state != _CS_REQ_SENT or self.__response:
+ raise ResponseNotReady(self.__state)
+
+ if self.debuglevel > 0:
+ response = self.response_class(self.sock, self.debuglevel,
+ method=self._method)
+ else:
+ response = self.response_class(self.sock, method=self._method)
+
+ try:
+ try:
+ response.begin()
+ except ConnectionError:
+ self.close()
+ raise
+ assert response.will_close != _UNKNOWN
+ self.__state = _CS_IDLE
+
+ if response.will_close:
+ # this effectively passes the connection to the response
+ self.close()
+ else:
+ # remember this, so we can tell when it is complete
+ self.__response = response
+
+ return response
+ except:
+ response.close()
+ raise
+
+try:
+ from eventlet.green import ssl
+except ImportError:
+ pass
+else:
+ def _create_https_context(http_version):
+ # Function also used by urllib.request to be able to set the check_hostname
+ # attribute on a context object.
+ context = ssl._create_default_https_context()
+ # send ALPN extension to indicate HTTP/1.1 protocol
+ if http_version == 11:
+ context.set_alpn_protocols(['http/1.1'])
+ # enable PHA for TLS 1.3 connections if available
+ if context.post_handshake_auth is not None:
+ context.post_handshake_auth = True
+ return context
+
+ def _populate_https_context(context, check_hostname):
+ if check_hostname is not None:
+ context.check_hostname = check_hostname
+
+ class HTTPSConnection(HTTPConnection):
+ "This class allows communication via SSL."
+
+ default_port = HTTPS_PORT
+
+ # XXX Should key_file and cert_file be deprecated in favour of context?
+
+ def __init__(self, host, port=None, key_file=None, cert_file=None,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None, *, context=None,
+ check_hostname=None):
+ super().__init__(host, port, timeout,
+ source_address)
+ self.key_file = key_file
+ self.cert_file = cert_file
+ if context is None:
+ context = _create_https_context(self._http_vsn)
+ _populate_https_context(context, check_hostname)
+ if key_file or cert_file:
+ context.load_cert_chain(cert_file, key_file)
+ self._context = context
+ self._check_hostname = check_hostname
+
+ def connect(self):
+ "Connect to a host on a given (SSL) port."
+
+ super().connect()
+
+ if self._tunnel_host:
+ server_hostname = self._tunnel_host
+ else:
+ server_hostname = self.host
+
+ self.sock = self._context.wrap_socket(self.sock,
+ server_hostname=server_hostname)
+ if not self._context.check_hostname and self._check_hostname:
+ try:
+ ssl.match_hostname(self.sock.getpeercert(), server_hostname)
+ except Exception:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ raise
+
+ __all__.append("HTTPSConnection")
+
+class HTTPException(Exception):
+ # Subclasses that define an __init__ must call Exception.__init__
+ # or define self.args. Otherwise, str() will fail.
+ pass
+
+class NotConnected(HTTPException):
+ pass
+
+class InvalidURL(HTTPException):
+ pass
+
+class UnknownProtocol(HTTPException):
+ def __init__(self, version):
+ self.args = version,
+ self.version = version
+
+class UnknownTransferEncoding(HTTPException):
+ pass
+
+class UnimplementedFileMode(HTTPException):
+ pass
+
+class IncompleteRead(HTTPException):
+ def __init__(self, partial, expected=None):
+ self.args = partial,
+ self.partial = partial
+ self.expected = expected
+ def __repr__(self):
+ if self.expected is not None:
+ e = ', %i more expected' % self.expected
+ else:
+ e = ''
+ return '%s(%i bytes read%s)' % (self.__class__.__name__,
+ len(self.partial), e)
+ def __str__(self):
+ return repr(self)
+
+class ImproperConnectionState(HTTPException):
+ pass
+
+class CannotSendRequest(ImproperConnectionState):
+ pass
+
+class CannotSendHeader(ImproperConnectionState):
+ pass
+
+class ResponseNotReady(ImproperConnectionState):
+ pass
+
+class BadStatusLine(HTTPException):
+ def __init__(self, line):
+ if not line:
+ line = repr(line)
+ self.args = line,
+ self.line = line
+
+class LineTooLong(HTTPException):
+ def __init__(self, line_type):
+ HTTPException.__init__(self, "got more than %d bytes when reading %s"
+ % (_MAXLINE, line_type))
+
+class RemoteDisconnected(ConnectionResetError, BadStatusLine):
+ def __init__(self, *pos, **kw):
+ BadStatusLine.__init__(self, "")
+ ConnectionResetError.__init__(self, *pos, **kw)
+
+# for backwards compatibility
+error = HTTPException
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/http/cookiejar.py b/tapdown/lib/python3.11/site-packages/eventlet/green/http/cookiejar.py
new file mode 100644
index 0000000..0394ca5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/http/cookiejar.py
@@ -0,0 +1,2154 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+r"""HTTP cookie handling for web clients.
+
+This module has (now fairly distant) origins in Gisle Aas' Perl module
+HTTP::Cookies, from the libwww-perl library.
+
+Docstrings, comments and debug strings in this code refer to the
+attributes of the HTTP cookie system as cookie-attributes, to distinguish
+them clearly from Python attributes.
+
+Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
+distributed with the Python standard library, but are available from
+http://wwwsearch.sf.net/):
+
+ CookieJar____
+ / \ \
+ FileCookieJar \ \
+ / | \ \ \
+ MozillaCookieJar | LWPCookieJar \ \
+ | | \
+ | ---MSIEBase | \
+ | / | | \
+ | / MSIEDBCookieJar BSDDBCookieJar
+ |/
+ MSIECookieJar
+
+"""
+
+__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
+ 'FileCookieJar', 'LWPCookieJar', 'LoadError', 'MozillaCookieJar']
+
+import copy
+import datetime
+import re
+import time
+# Eventlet change: urllib.request used to be imported here but it's not used,
+# removed for clarity
+import urllib.parse
+from calendar import timegm
+
+from eventlet.green import threading as _threading, time
+from eventlet.green.http import client as http_client # only for the default HTTP port
+
+debug = False # set to True to enable debugging via the logging module
+logger = None
+
+def _debug(*args):
+ if not debug:
+ return
+ global logger
+ if not logger:
+ import logging
+ logger = logging.getLogger("http.cookiejar")
+ return logger.debug(*args)
+
+
+DEFAULT_HTTP_PORT = str(http_client.HTTP_PORT)
+MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
+ "instance initialised with one)")
+
+def _warn_unhandled_exception():
+ # There are a few catch-all except: statements in this module, for
+ # catching input that's bad in unexpected ways. Warn if any
+ # exceptions are caught there.
+ import io, warnings, traceback
+ f = io.StringIO()
+ traceback.print_exc(None, f)
+ msg = f.getvalue()
+ warnings.warn("http.cookiejar bug!\n%s" % msg, stacklevel=2)
+
+
+# Date/time conversion
+# -----------------------------------------------------------------------------
+
+EPOCH_YEAR = 1970
+def _timegm(tt):
+ year, month, mday, hour, min, sec = tt[:6]
+ if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
+ (0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
+ return timegm(tt)
+ else:
+ return None
+
+DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+MONTHS_LOWER = []
+for month in MONTHS: MONTHS_LOWER.append(month.lower())
+
+def time2isoz(t=None):
+ """Return a string representing time in seconds since epoch, t.
+
+ If the function is called without an argument, it will use the current
+ time.
+
+ The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
+ representing Universal Time (UTC, aka GMT). An example of this format is:
+
+ 1994-11-24 08:49:37Z
+
+ """
+ if t is None:
+ dt = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
+ else:
+ dt = datetime.datetime.fromtimestamp(t, tz=datetime.timezone.utc
+ ).replace(tzinfo=None)
+ return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
+ dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
+
+def time2netscape(t=None):
+ """Return a string representing time in seconds since epoch, t.
+
+ If the function is called without an argument, it will use the current
+ time.
+
+ The format of the returned string is like this:
+
+ Wed, DD-Mon-YYYY HH:MM:SS GMT
+
+ """
+ if t is None:
+ dt = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
+ else:
+ dt = datetime.datetime.fromtimestamp(t, tz=datetime.timezone.utc
+ ).replace(tzinfo=None)
+ return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
+ DAYS[dt.weekday()], dt.day, MONTHS[dt.month-1],
+ dt.year, dt.hour, dt.minute, dt.second)
+
+
+UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
+
+TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$", re.ASCII)
+def offset_from_tz_string(tz):
+ offset = None
+ if tz in UTC_ZONES:
+ offset = 0
+ else:
+ m = TIMEZONE_RE.search(tz)
+ if m:
+ offset = 3600 * int(m.group(2))
+ if m.group(3):
+ offset = offset + 60 * int(m.group(3))
+ if m.group(1) == '-':
+ offset = -offset
+ return offset
+
+def _str2time(day, mon, yr, hr, min, sec, tz):
+ yr = int(yr)
+ if yr > datetime.MAXYEAR:
+ return None
+
+ # translate month name to number
+ # month numbers start with 1 (January)
+ try:
+ mon = MONTHS_LOWER.index(mon.lower())+1
+ except ValueError:
+ # maybe it's already a number
+ try:
+ imon = int(mon)
+ except ValueError:
+ return None
+ if 1 <= imon <= 12:
+ mon = imon
+ else:
+ return None
+
+ # make sure clock elements are defined
+ if hr is None: hr = 0
+ if min is None: min = 0
+ if sec is None: sec = 0
+
+ day = int(day)
+ hr = int(hr)
+ min = int(min)
+ sec = int(sec)
+
+ if yr < 1000:
+ # find "obvious" year
+ cur_yr = time.localtime(time.time())[0]
+ m = cur_yr % 100
+ tmp = yr
+ yr = yr + cur_yr - m
+ m = m - tmp
+ if abs(m) > 50:
+ if m > 0: yr = yr + 100
+ else: yr = yr - 100
+
+ # convert UTC time tuple to seconds since epoch (not timezone-adjusted)
+ t = _timegm((yr, mon, day, hr, min, sec, tz))
+
+ if t is not None:
+ # adjust time using timezone string, to get absolute time since epoch
+ if tz is None:
+ tz = "UTC"
+ tz = tz.upper()
+ offset = offset_from_tz_string(tz)
+ if offset is None:
+ return None
+ t = t - offset
+
+ return t
+
+STRICT_DATE_RE = re.compile(
+ r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
+ r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$", re.ASCII)
+WEEKDAY_RE = re.compile(
+ r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I | re.ASCII)
+LOOSE_HTTP_DATE_RE = re.compile(
+ r"""^
+ (\d\d?) # day
+ (?:\s+|[-\/])
+ (\w+) # month
+ (?:\s+|[-\/])
+ (\d+) # year
+ (?:
+ (?:\s+|:) # separator before clock
+ (\d\d?):(\d\d) # hour:min
+ (?::(\d\d))? # optional seconds
+ )? # optional clock
+ \s*
+ ([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
+ \s*
+ (?:\(\w+\))? # ASCII representation of timezone in parens.
+ \s*$""", re.X | re.ASCII)
+def http2time(text):
+ """Returns time in seconds since epoch of time represented by a string.
+
+ Return value is an integer.
+
+ None is returned if the format of str is unrecognized, the time is outside
+ the representable range, or the timezone string is not recognized. If the
+ string contains no timezone, UTC is assumed.
+
+ The timezone in the string may be numerical (like "-0800" or "+0100") or a
+ string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
+ timezone strings equivalent to UTC (zero offset) are known to the function.
+
+ The function loosely parses the following formats:
+
+ Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
+ Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
+ Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
+ 09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
+ 08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
+ 08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
+
+ The parser ignores leading and trailing whitespace. The time may be
+ absent.
+
+ If the year is given with only 2 digits, the function will select the
+ century that makes the year closest to the current date.
+
+ """
+ # fast exit for strictly conforming string
+ m = STRICT_DATE_RE.search(text)
+ if m:
+ g = m.groups()
+ mon = MONTHS_LOWER.index(g[1].lower()) + 1
+ tt = (int(g[2]), mon, int(g[0]),
+ int(g[3]), int(g[4]), float(g[5]))
+ return _timegm(tt)
+
+ # No, we need some messy parsing...
+
+ # clean up
+ text = text.lstrip()
+ text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
+
+ # tz is time zone specifier string
+ day, mon, yr, hr, min, sec, tz = [None]*7
+
+ # loose regexp parse
+ m = LOOSE_HTTP_DATE_RE.search(text)
+ if m is not None:
+ day, mon, yr, hr, min, sec, tz = m.groups()
+ else:
+ return None # bad format
+
+ return _str2time(day, mon, yr, hr, min, sec, tz)
+
+ISO_DATE_RE = re.compile(
+ r"""^
+ (\d{4}) # year
+ [-\/]?
+ (\d\d?) # numerical month
+ [-\/]?
+ (\d\d?) # day
+ (?:
+ (?:\s+|[-:Tt]) # separator before clock
+ (\d\d?):?(\d\d) # hour:min
+ (?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
+ )? # optional clock
+ \s*
+ ([-+]?\d\d?:?(:?\d\d)?
+ |Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
+ \s*$""", re.X | re. ASCII)
+def iso2time(text):
+ """
+ As for http2time, but parses the ISO 8601 formats:
+
+ 1994-02-03 14:15:29 -0100 -- ISO 8601 format
+ 1994-02-03 14:15:29 -- zone is optional
+ 1994-02-03 -- only date
+ 1994-02-03T14:15:29 -- Use T as separator
+ 19940203T141529Z -- ISO 8601 compact format
+ 19940203 -- only date
+
+ """
+ # clean up
+ text = text.lstrip()
+
+ # tz is time zone specifier string
+ day, mon, yr, hr, min, sec, tz = [None]*7
+
+ # loose regexp parse
+ m = ISO_DATE_RE.search(text)
+ if m is not None:
+ # XXX there's an extra bit of the timezone I'm ignoring here: is
+ # this the right thing to do?
+ yr, mon, day, hr, min, sec, tz, _ = m.groups()
+ else:
+ return None # bad format
+
+ return _str2time(day, mon, yr, hr, min, sec, tz)
+
+
+# Header parsing
+# -----------------------------------------------------------------------------
+
+def unmatched(match):
+ """Return unmatched part of re.Match object."""
+ start, end = match.span(0)
+ return match.string[:start]+match.string[end:]
+
+HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
+HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
+HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
+HEADER_ESCAPE_RE = re.compile(r"\\(.)")
+def split_header_words(header_values):
+ r"""Parse header values into a list of lists containing key,value pairs.
+
+ The function knows how to deal with ",", ";" and "=" as well as quoted
+ values after "=". A list of space separated tokens are parsed as if they
+ were separated by ";".
+
+ If the header_values passed as argument contains multiple values, then they
+ are treated as if they were a single value separated by comma ",".
+
+ This means that this function is useful for parsing header fields that
+ follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
+ the requirement for tokens).
+
+ headers = #header
+ header = (token | parameter) *( [";"] (token | parameter))
+
+ token = 1*
+ separators = "(" | ")" | "<" | ">" | "@"
+ | "," | ";" | ":" | "\" | <">
+ | "/" | "[" | "]" | "?" | "="
+ | "{" | "}" | SP | HT
+
+ quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
+ qdtext = >
+ quoted-pair = "\" CHAR
+
+ parameter = attribute "=" value
+ attribute = token
+ value = token | quoted-string
+
+ Each header is represented by a list of key/value pairs. The value for a
+ simple token (not part of a parameter) is None. Syntactically incorrect
+ headers will not necessarily be parsed as you would want.
+
+ This is easier to describe with some examples:
+
+ >>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
+ [[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
+ >>> split_header_words(['text/html; charset="iso-8859-1"'])
+ [[('text/html', None), ('charset', 'iso-8859-1')]]
+ >>> split_header_words([r'Basic realm="\"foo\bar\""'])
+ [[('Basic', None), ('realm', '"foobar"')]]
+
+ """
+ assert not isinstance(header_values, str)
+ result = []
+ for text in header_values:
+ orig_text = text
+ pairs = []
+ while text:
+ m = HEADER_TOKEN_RE.search(text)
+ if m:
+ text = unmatched(m)
+ name = m.group(1)
+ m = HEADER_QUOTED_VALUE_RE.search(text)
+ if m: # quoted value
+ text = unmatched(m)
+ value = m.group(1)
+ value = HEADER_ESCAPE_RE.sub(r"\1", value)
+ else:
+ m = HEADER_VALUE_RE.search(text)
+ if m: # unquoted value
+ text = unmatched(m)
+ value = m.group(1)
+ value = value.rstrip()
+ else:
+ # no value, a lone token
+ value = None
+ pairs.append((name, value))
+ elif text.lstrip().startswith(","):
+ # concatenated headers, as per RFC 2616 section 4.2
+ text = text.lstrip()[1:]
+ if pairs: result.append(pairs)
+ pairs = []
+ else:
+ # skip junk
+ non_junk, nr_junk_chars = re.subn(r"^[=\s;]*", "", text)
+ assert nr_junk_chars > 0, (
+ "split_header_words bug: '%s', '%s', %s" %
+ (orig_text, text, pairs))
+ text = non_junk
+ if pairs: result.append(pairs)
+ return result
+
+HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
+def join_header_words(lists):
+ """Do the inverse (almost) of the conversion done by split_header_words.
+
+ Takes a list of lists of (key, value) pairs and produces a single header
+ value. Attribute values are quoted if needed.
+
+ >>> join_header_words([[("text/plain", None), ("charset", "iso-8859-1")]])
+ 'text/plain; charset="iso-8859-1"'
+ >>> join_header_words([[("text/plain", None)], [("charset", "iso-8859-1")]])
+ 'text/plain, charset="iso-8859-1"'
+
+ """
+ headers = []
+ for pairs in lists:
+ attr = []
+ for k, v in pairs:
+ if v is not None:
+ if not re.search(r"^\w+$", v):
+ v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
+ v = '"%s"' % v
+ k = "%s=%s" % (k, v)
+ attr.append(k)
+ if attr: headers.append("; ".join(attr))
+ return ", ".join(headers)
+
+def strip_quotes(text):
+ if text.startswith('"'):
+ text = text[1:]
+ if text.endswith('"'):
+ text = text[:-1]
+ return text
+
+def parse_ns_headers(ns_headers):
+ """Ad-hoc parser for Netscape protocol cookie-attributes.
+
+ The old Netscape cookie format for Set-Cookie can for instance contain
+ an unquoted "," in the expires field, so we have to use this ad-hoc
+ parser instead of split_header_words.
+
+ XXX This may not make the best possible effort to parse all the crap
+ that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
+ parser is probably better, so could do worse than following that if
+ this ever gives any trouble.
+
+ Currently, this is also used for parsing RFC 2109 cookies.
+
+ """
+ known_attrs = ("expires", "domain", "path", "secure",
+ # RFC 2109 attrs (may turn up in Netscape cookies, too)
+ "version", "port", "max-age")
+
+ result = []
+ for ns_header in ns_headers:
+ pairs = []
+ version_set = False
+
+ # XXX: The following does not strictly adhere to RFCs in that empty
+ # names and values are legal (the former will only appear once and will
+ # be overwritten if multiple occurrences are present). This is
+ # mostly to deal with backwards compatibility.
+ for ii, param in enumerate(ns_header.split(';')):
+ param = param.strip()
+
+ key, sep, val = param.partition('=')
+ key = key.strip()
+
+ if not key:
+ if ii == 0:
+ break
+ else:
+ continue
+
+ # allow for a distinction between present and empty and missing
+ # altogether
+ val = val.strip() if sep else None
+
+ if ii != 0:
+ lc = key.lower()
+ if lc in known_attrs:
+ key = lc
+
+ if key == "version":
+ # This is an RFC 2109 cookie.
+ if val is not None:
+ val = strip_quotes(val)
+ version_set = True
+ elif key == "expires":
+ # convert expires date to seconds since epoch
+ if val is not None:
+ val = http2time(strip_quotes(val)) # None if invalid
+ pairs.append((key, val))
+
+ if pairs:
+ if not version_set:
+ pairs.append(("version", "0"))
+ result.append(pairs)
+
+ return result
+
+
+IPV4_RE = re.compile(r"\.\d+$", re.ASCII)
+def is_HDN(text):
+ """Return True if text is a host domain name."""
+ # XXX
+ # This may well be wrong. Which RFC is HDN defined in, if any (for
+ # the purposes of RFC 2965)?
+ # For the current implementation, what about IPv6? Remember to look
+ # at other uses of IPV4_RE also, if change this.
+ if IPV4_RE.search(text):
+ return False
+ if text == "":
+ return False
+ if text[0] == "." or text[-1] == ".":
+ return False
+ return True
+
+def domain_match(A, B):
+ """Return True if domain A domain-matches domain B, according to RFC 2965.
+
+ A and B may be host domain names or IP addresses.
+
+ RFC 2965, section 1:
+
+ Host names can be specified either as an IP address or a HDN string.
+ Sometimes we compare one host name with another. (Such comparisons SHALL
+ be case-insensitive.) Host A's name domain-matches host B's if
+
+ * their host name strings string-compare equal; or
+
+ * A is a HDN string and has the form NB, where N is a non-empty
+ name string, B has the form .B', and B' is a HDN string. (So,
+ x.y.com domain-matches .Y.com but not Y.com.)
+
+ Note that domain-match is not a commutative operation: a.b.c.com
+ domain-matches .c.com, but not the reverse.
+
+ """
+ # Note that, if A or B are IP addresses, the only relevant part of the
+ # definition of the domain-match algorithm is the direct string-compare.
+ A = A.lower()
+ B = B.lower()
+ if A == B:
+ return True
+ if not is_HDN(A):
+ return False
+ i = A.rfind(B)
+ if i == -1 or i == 0:
+ # A does not have form NB, or N is the empty string
+ return False
+ if not B.startswith("."):
+ return False
+ if not is_HDN(B[1:]):
+ return False
+ return True
+
+def liberal_is_HDN(text):
+ """Return True if text is a sort-of-like a host domain name.
+
+ For accepting/blocking domains.
+
+ """
+ if IPV4_RE.search(text):
+ return False
+ return True
+
+def user_domain_match(A, B):
+ """For blocking/accepting domains.
+
+ A and B may be host domain names or IP addresses.
+
+ """
+ A = A.lower()
+ B = B.lower()
+ if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
+ if A == B:
+ # equal IP addresses
+ return True
+ return False
+ initial_dot = B.startswith(".")
+ if initial_dot and A.endswith(B):
+ return True
+ if not initial_dot and A == B:
+ return True
+ return False
+
+cut_port_re = re.compile(r":\d+$", re.ASCII)
+def request_host(request):
+ """Return request-host, as defined by RFC 2965.
+
+ Variation from RFC: returned value is lowercased, for convenient
+ comparison.
+
+ """
+ url = request.get_full_url()
+ host = urllib.parse.urlparse(url)[1]
+ if host == "":
+ host = request.get_header("Host", "")
+
+ # remove port, if present
+ host = cut_port_re.sub("", host, 1)
+ return host.lower()
+
+def eff_request_host(request):
+ """Return a tuple (request-host, effective request-host name).
+
+ As defined by RFC 2965, except both are lowercased.
+
+ """
+ erhn = req_host = request_host(request)
+ if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
+ erhn = req_host + ".local"
+ return req_host, erhn
+
+def request_path(request):
+ """Path component of request-URI, as defined by RFC 2965."""
+ url = request.get_full_url()
+ parts = urllib.parse.urlsplit(url)
+ path = escape_path(parts.path)
+ if not path.startswith("/"):
+ # fix bad RFC 2396 absoluteURI
+ path = "/" + path
+ return path
+
+def request_port(request):
+ host = request.host
+ i = host.find(':')
+ if i >= 0:
+ port = host[i+1:]
+ try:
+ int(port)
+ except ValueError:
+ _debug("nonnumeric port: '%s'", port)
+ return None
+ else:
+ port = DEFAULT_HTTP_PORT
+ return port
+
+# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
+# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
+HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
+ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
+def uppercase_escaped_char(match):
+ return "%%%s" % match.group(1).upper()
+def escape_path(path):
+ """Escape any invalid characters in HTTP URL, and uppercase all escapes."""
+ # There's no knowing what character encoding was used to create URLs
+ # containing %-escapes, but since we have to pick one to escape invalid
+ # path characters, we pick UTF-8, as recommended in the HTML 4.0
+ # specification:
+ # http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
+ # And here, kind of: draft-fielding-uri-rfc2396bis-03
+ # (And in draft IRI specification: draft-duerst-iri-05)
+ # (And here, for new URI schemes: RFC 2718)
+ path = urllib.parse.quote(path, HTTP_PATH_SAFE)
+ path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
+ return path
+
+def reach(h):
+ """Return reach of host h, as defined by RFC 2965, section 1.
+
+ The reach R of a host name H is defined as follows:
+
+ * If
+
+ - H is the host domain name of a host; and,
+
+ - H has the form A.B; and
+
+ - A has no embedded (that is, interior) dots; and
+
+ - B has at least one embedded dot, or B is the string "local".
+ then the reach of H is .B.
+
+ * Otherwise, the reach of H is H.
+
+ >>> reach("www.acme.com")
+ '.acme.com'
+ >>> reach("acme.com")
+ 'acme.com'
+ >>> reach("acme.local")
+ '.local'
+
+ """
+ i = h.find(".")
+ if i >= 0:
+ #a = h[:i] # this line is only here to show what a is
+ b = h[i+1:]
+ i = b.find(".")
+ if is_HDN(h) and (i >= 0 or b == "local"):
+ return "."+b
+ return h
+
+def is_third_party(request):
+ """
+
+ RFC 2965, section 3.3.6:
+
+ An unverifiable transaction is to a third-party host if its request-
+ host U does not domain-match the reach R of the request-host O in the
+ origin transaction.
+
+ """
+ req_host = request_host(request)
+ if not domain_match(req_host, reach(request.origin_req_host)):
+ return True
+ else:
+ return False
+
+
+class Cookie:
+ """HTTP Cookie.
+
+ This class represents both Netscape and RFC 2965 cookies.
+
+ This is deliberately a very simple class. It just holds attributes. It's
+ possible to construct Cookie instances that don't comply with the cookie
+ standards. CookieJar.make_cookies is the factory function for Cookie
+ objects -- it deals with cookie parsing, supplying defaults, and
+ normalising to the representation used in this class. CookiePolicy is
+ responsible for checking them to see whether they should be accepted from
+ and returned to the server.
+
+ Note that the port may be present in the headers, but unspecified ("Port"
+ rather than"Port=80", for example); if this is the case, port is None.
+
+ """
+
+ def __init__(self, version, name, value,
+ port, port_specified,
+ domain, domain_specified, domain_initial_dot,
+ path, path_specified,
+ secure,
+ expires,
+ discard,
+ comment,
+ comment_url,
+ rest,
+ rfc2109=False,
+ ):
+
+ if version is not None: version = int(version)
+ if expires is not None: expires = int(float(expires))
+ if port is None and port_specified is True:
+ raise ValueError("if port is None, port_specified must be false")
+
+ self.version = version
+ self.name = name
+ self.value = value
+ self.port = port
+ self.port_specified = port_specified
+ # normalise case, as per RFC 2965 section 3.3.3
+ self.domain = domain.lower()
+ self.domain_specified = domain_specified
+ # Sigh. We need to know whether the domain given in the
+ # cookie-attribute had an initial dot, in order to follow RFC 2965
+ # (as clarified in draft errata). Needed for the returned $Domain
+ # value.
+ self.domain_initial_dot = domain_initial_dot
+ self.path = path
+ self.path_specified = path_specified
+ self.secure = secure
+ self.expires = expires
+ self.discard = discard
+ self.comment = comment
+ self.comment_url = comment_url
+ self.rfc2109 = rfc2109
+
+ self._rest = copy.copy(rest)
+
+ def has_nonstandard_attr(self, name):
+ return name in self._rest
+ def get_nonstandard_attr(self, name, default=None):
+ return self._rest.get(name, default)
+ def set_nonstandard_attr(self, name, value):
+ self._rest[name] = value
+
+ def is_expired(self, now=None):
+ if now is None: now = time.time()
+ if (self.expires is not None) and (self.expires <= now):
+ return True
+ return False
+
+ def __str__(self):
+ if self.port is None: p = ""
+ else: p = ":"+self.port
+ limit = self.domain + p + self.path
+ if self.value is not None:
+ namevalue = "%s=%s" % (self.name, self.value)
+ else:
+ namevalue = self.name
+ return "" % (namevalue, limit)
+
+ def __repr__(self):
+ args = []
+ for name in ("version", "name", "value",
+ "port", "port_specified",
+ "domain", "domain_specified", "domain_initial_dot",
+ "path", "path_specified",
+ "secure", "expires", "discard", "comment", "comment_url",
+ ):
+ attr = getattr(self, name)
+ args.append("%s=%s" % (name, repr(attr)))
+ args.append("rest=%s" % repr(self._rest))
+ args.append("rfc2109=%s" % repr(self.rfc2109))
+ return "%s(%s)" % (self.__class__.__name__, ", ".join(args))
+
+
+class CookiePolicy:
+ """Defines which cookies get accepted from and returned to server.
+
+ May also modify cookies, though this is probably a bad idea.
+
+ The subclass DefaultCookiePolicy defines the standard rules for Netscape
+ and RFC 2965 cookies -- override that if you want a customised policy.
+
+ """
+ def set_ok(self, cookie, request):
+ """Return true if (and only if) cookie should be accepted from server.
+
+ Currently, pre-expired cookies never get this far -- the CookieJar
+ class deletes such cookies itself.
+
+ """
+ raise NotImplementedError()
+
+ def return_ok(self, cookie, request):
+ """Return true if (and only if) cookie should be returned to server."""
+ raise NotImplementedError()
+
+ def domain_return_ok(self, domain, request):
+ """Return false if cookies should not be returned, given cookie domain.
+ """
+ return True
+
+ def path_return_ok(self, path, request):
+ """Return false if cookies should not be returned, given cookie path.
+ """
+ return True
+
+
+class DefaultCookiePolicy(CookiePolicy):
+ """Implements the standard rules for accepting and returning cookies."""
+
+ DomainStrictNoDots = 1
+ DomainStrictNonDomain = 2
+ DomainRFC2965Match = 4
+
+ DomainLiberal = 0
+ DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
+
+ def __init__(self,
+ blocked_domains=None, allowed_domains=None,
+ netscape=True, rfc2965=False,
+ rfc2109_as_netscape=None,
+ hide_cookie2=False,
+ strict_domain=False,
+ strict_rfc2965_unverifiable=True,
+ strict_ns_unverifiable=False,
+ strict_ns_domain=DomainLiberal,
+ strict_ns_set_initial_dollar=False,
+ strict_ns_set_path=False,
+ ):
+ """Constructor arguments should be passed as keyword arguments only."""
+ self.netscape = netscape
+ self.rfc2965 = rfc2965
+ self.rfc2109_as_netscape = rfc2109_as_netscape
+ self.hide_cookie2 = hide_cookie2
+ self.strict_domain = strict_domain
+ self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
+ self.strict_ns_unverifiable = strict_ns_unverifiable
+ self.strict_ns_domain = strict_ns_domain
+ self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
+ self.strict_ns_set_path = strict_ns_set_path
+
+ if blocked_domains is not None:
+ self._blocked_domains = tuple(blocked_domains)
+ else:
+ self._blocked_domains = ()
+
+ if allowed_domains is not None:
+ allowed_domains = tuple(allowed_domains)
+ self._allowed_domains = allowed_domains
+
+ def blocked_domains(self):
+ """Return the sequence of blocked domains (as a tuple)."""
+ return self._blocked_domains
+ def set_blocked_domains(self, blocked_domains):
+ """Set the sequence of blocked domains."""
+ self._blocked_domains = tuple(blocked_domains)
+
+ def is_blocked(self, domain):
+ for blocked_domain in self._blocked_domains:
+ if user_domain_match(domain, blocked_domain):
+ return True
+ return False
+
+ def allowed_domains(self):
+ """Return None, or the sequence of allowed domains (as a tuple)."""
+ return self._allowed_domains
+ def set_allowed_domains(self, allowed_domains):
+ """Set the sequence of allowed domains, or None."""
+ if allowed_domains is not None:
+ allowed_domains = tuple(allowed_domains)
+ self._allowed_domains = allowed_domains
+
+ def is_not_allowed(self, domain):
+ if self._allowed_domains is None:
+ return False
+ for allowed_domain in self._allowed_domains:
+ if user_domain_match(domain, allowed_domain):
+ return False
+ return True
+
+ def set_ok(self, cookie, request):
+ """
+ If you override .set_ok(), be sure to call this method. If it returns
+ false, so should your subclass (assuming your subclass wants to be more
+ strict about which cookies to accept).
+
+ """
+ _debug(" - checking cookie %s=%s", cookie.name, cookie.value)
+
+ assert cookie.name is not None
+
+ for n in "version", "verifiability", "name", "path", "domain", "port":
+ fn_name = "set_ok_"+n
+ fn = getattr(self, fn_name)
+ if not fn(cookie, request):
+ return False
+
+ return True
+
+ def set_ok_version(self, cookie, request):
+ if cookie.version is None:
+ # Version is always set to 0 by parse_ns_headers if it's a Netscape
+ # cookie, so this must be an invalid RFC 2965 cookie.
+ _debug(" Set-Cookie2 without version attribute (%s=%s)",
+ cookie.name, cookie.value)
+ return False
+ if cookie.version > 0 and not self.rfc2965:
+ _debug(" RFC 2965 cookies are switched off")
+ return False
+ elif cookie.version == 0 and not self.netscape:
+ _debug(" Netscape cookies are switched off")
+ return False
+ return True
+
+ def set_ok_verifiability(self, cookie, request):
+ if request.unverifiable and is_third_party(request):
+ if cookie.version > 0 and self.strict_rfc2965_unverifiable:
+ _debug(" third-party RFC 2965 cookie during "
+ "unverifiable transaction")
+ return False
+ elif cookie.version == 0 and self.strict_ns_unverifiable:
+ _debug(" third-party Netscape cookie during "
+ "unverifiable transaction")
+ return False
+ return True
+
+ def set_ok_name(self, cookie, request):
+ # Try and stop servers setting V0 cookies designed to hack other
+ # servers that know both V0 and V1 protocols.
+ if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
+ cookie.name.startswith("$")):
+ _debug(" illegal name (starts with '$'): '%s'", cookie.name)
+ return False
+ return True
+
+ def set_ok_path(self, cookie, request):
+ if cookie.path_specified:
+ req_path = request_path(request)
+ if ((cookie.version > 0 or
+ (cookie.version == 0 and self.strict_ns_set_path)) and
+ not req_path.startswith(cookie.path)):
+ _debug(" path attribute %s is not a prefix of request "
+ "path %s", cookie.path, req_path)
+ return False
+ return True
+
+ def set_ok_domain(self, cookie, request):
+ if self.is_blocked(cookie.domain):
+ _debug(" domain %s is in user block-list", cookie.domain)
+ return False
+ if self.is_not_allowed(cookie.domain):
+ _debug(" domain %s is not in user allow-list", cookie.domain)
+ return False
+ if cookie.domain_specified:
+ req_host, erhn = eff_request_host(request)
+ domain = cookie.domain
+ if self.strict_domain and (domain.count(".") >= 2):
+ # XXX This should probably be compared with the Konqueror
+ # (kcookiejar.cpp) and Mozilla implementations, but it's a
+ # losing battle.
+ i = domain.rfind(".")
+ j = domain.rfind(".", 0, i)
+ if j == 0: # domain like .foo.bar
+ tld = domain[i+1:]
+ sld = domain[j+1:i]
+ if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
+ "gov", "mil", "int", "aero", "biz", "cat", "coop",
+ "info", "jobs", "mobi", "museum", "name", "pro",
+ "travel", "eu") and len(tld) == 2:
+ # domain like .co.uk
+ _debug(" country-code second level domain %s", domain)
+ return False
+ if domain.startswith("."):
+ undotted_domain = domain[1:]
+ else:
+ undotted_domain = domain
+ embedded_dots = (undotted_domain.find(".") >= 0)
+ if not embedded_dots and domain != ".local":
+ _debug(" non-local domain %s contains no embedded dot",
+ domain)
+ return False
+ if cookie.version == 0:
+ if (not erhn.endswith(domain) and
+ (not erhn.startswith(".") and
+ not ("."+erhn).endswith(domain))):
+ _debug(" effective request-host %s (even with added "
+ "initial dot) does not end with %s",
+ erhn, domain)
+ return False
+ if (cookie.version > 0 or
+ (self.strict_ns_domain & self.DomainRFC2965Match)):
+ if not domain_match(erhn, domain):
+ _debug(" effective request-host %s does not domain-match "
+ "%s", erhn, domain)
+ return False
+ if (cookie.version > 0 or
+ (self.strict_ns_domain & self.DomainStrictNoDots)):
+ host_prefix = req_host[:-len(domain)]
+ if (host_prefix.find(".") >= 0 and
+ not IPV4_RE.search(req_host)):
+ _debug(" host prefix %s for domain %s contains a dot",
+ host_prefix, domain)
+ return False
+ return True
+
+ def set_ok_port(self, cookie, request):
+ if cookie.port_specified:
+ req_port = request_port(request)
+ if req_port is None:
+ req_port = "80"
+ else:
+ req_port = str(req_port)
+ for p in cookie.port.split(","):
+ try:
+ int(p)
+ except ValueError:
+ _debug(" bad port %s (not numeric)", p)
+ return False
+ if p == req_port:
+ break
+ else:
+ _debug(" request port (%s) not found in %s",
+ req_port, cookie.port)
+ return False
+ return True
+
+ def return_ok(self, cookie, request):
+ """
+ If you override .return_ok(), be sure to call this method. If it
+ returns false, so should your subclass (assuming your subclass wants to
+ be more strict about which cookies to return).
+
+ """
+ # Path has already been checked by .path_return_ok(), and domain
+ # blocking done by .domain_return_ok().
+ _debug(" - checking cookie %s=%s", cookie.name, cookie.value)
+
+ for n in "version", "verifiability", "secure", "expires", "port", "domain":
+ fn_name = "return_ok_"+n
+ fn = getattr(self, fn_name)
+ if not fn(cookie, request):
+ return False
+ return True
+
+ def return_ok_version(self, cookie, request):
+ if cookie.version > 0 and not self.rfc2965:
+ _debug(" RFC 2965 cookies are switched off")
+ return False
+ elif cookie.version == 0 and not self.netscape:
+ _debug(" Netscape cookies are switched off")
+ return False
+ return True
+
+ def return_ok_verifiability(self, cookie, request):
+ if request.unverifiable and is_third_party(request):
+ if cookie.version > 0 and self.strict_rfc2965_unverifiable:
+ _debug(" third-party RFC 2965 cookie during unverifiable "
+ "transaction")
+ return False
+ elif cookie.version == 0 and self.strict_ns_unverifiable:
+ _debug(" third-party Netscape cookie during unverifiable "
+ "transaction")
+ return False
+ return True
+
+ def return_ok_secure(self, cookie, request):
+ if cookie.secure and request.type != "https":
+ _debug(" secure cookie with non-secure request")
+ return False
+ return True
+
+ def return_ok_expires(self, cookie, request):
+ if cookie.is_expired(self._now):
+ _debug(" cookie expired")
+ return False
+ return True
+
+ def return_ok_port(self, cookie, request):
+ if cookie.port:
+ req_port = request_port(request)
+ if req_port is None:
+ req_port = "80"
+ for p in cookie.port.split(","):
+ if p == req_port:
+ break
+ else:
+ _debug(" request port %s does not match cookie port %s",
+ req_port, cookie.port)
+ return False
+ return True
+
+ def return_ok_domain(self, cookie, request):
+ req_host, erhn = eff_request_host(request)
+ domain = cookie.domain
+
+ # strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
+ if (cookie.version == 0 and
+ (self.strict_ns_domain & self.DomainStrictNonDomain) and
+ not cookie.domain_specified and domain != erhn):
+ _debug(" cookie with unspecified domain does not string-compare "
+ "equal to request domain")
+ return False
+
+ if cookie.version > 0 and not domain_match(erhn, domain):
+ _debug(" effective request-host name %s does not domain-match "
+ "RFC 2965 cookie domain %s", erhn, domain)
+ return False
+ if cookie.version == 0 and not ("."+erhn).endswith(domain):
+ _debug(" request-host %s does not match Netscape cookie domain "
+ "%s", req_host, domain)
+ return False
+ return True
+
+ def domain_return_ok(self, domain, request):
+ # Liberal check of. This is here as an optimization to avoid
+ # having to load lots of MSIE cookie files unless necessary.
+ req_host, erhn = eff_request_host(request)
+ if not req_host.startswith("."):
+ req_host = "."+req_host
+ if not erhn.startswith("."):
+ erhn = "."+erhn
+ if not (req_host.endswith(domain) or erhn.endswith(domain)):
+ #_debug(" request domain %s does not match cookie domain %s",
+ # req_host, domain)
+ return False
+
+ if self.is_blocked(domain):
+ _debug(" domain %s is in user block-list", domain)
+ return False
+ if self.is_not_allowed(domain):
+ _debug(" domain %s is not in user allow-list", domain)
+ return False
+
+ return True
+
+ def path_return_ok(self, path, request):
+ _debug("- checking cookie path=%s", path)
+ req_path = request_path(request)
+ if not req_path.startswith(path):
+ _debug(" %s does not path-match %s", req_path, path)
+ return False
+ return True
+
+
+def vals_sorted_by_key(adict):
+ keys = sorted(adict.keys())
+ return map(adict.get, keys)
+
+def deepvalues(mapping):
+ """Iterates over nested mapping, depth-first, in sorted order by key."""
+ values = vals_sorted_by_key(mapping)
+ for obj in values:
+ mapping = False
+ try:
+ obj.items
+ except AttributeError:
+ pass
+ else:
+ mapping = True
+ yield from deepvalues(obj)
+ if not mapping:
+ yield obj
+
+
+# Used as second parameter to dict.get() method, to distinguish absent
+# dict key from one with a None value.
+class Absent: pass
+
+class CookieJar:
+ """Collection of HTTP cookies.
+
+ You may not need to know about this class: try
+ urllib.request.build_opener(HTTPCookieProcessor).open(url).
+ """
+
+ non_word_re = re.compile(r"\W")
+ quote_re = re.compile(r"([\"\\])")
+ strict_domain_re = re.compile(r"\.?[^.]*")
+ domain_re = re.compile(r"[^.]*")
+ dots_re = re.compile(r"^\.+")
+
+ magic_re = re.compile(r"^\#LWP-Cookies-(\d+\.\d+)", re.ASCII)
+
+ def __init__(self, policy=None):
+ if policy is None:
+ policy = DefaultCookiePolicy()
+ self._policy = policy
+
+ self._cookies_lock = _threading.RLock()
+ self._cookies = {}
+
+ def set_policy(self, policy):
+ self._policy = policy
+
+ def _cookies_for_domain(self, domain, request):
+ cookies = []
+ if not self._policy.domain_return_ok(domain, request):
+ return []
+ _debug("Checking %s for cookies to return", domain)
+ cookies_by_path = self._cookies[domain]
+ for path in cookies_by_path.keys():
+ if not self._policy.path_return_ok(path, request):
+ continue
+ cookies_by_name = cookies_by_path[path]
+ for cookie in cookies_by_name.values():
+ if not self._policy.return_ok(cookie, request):
+ _debug(" not returning cookie")
+ continue
+ _debug(" it's a match")
+ cookies.append(cookie)
+ return cookies
+
+ def _cookies_for_request(self, request):
+ """Return a list of cookies to be returned to server."""
+ cookies = []
+ for domain in self._cookies.keys():
+ cookies.extend(self._cookies_for_domain(domain, request))
+ return cookies
+
+ def _cookie_attrs(self, cookies):
+ """Return a list of cookie-attributes to be returned to server.
+
+ like ['foo="bar"; $Path="/"', ...]
+
+ The $Version attribute is also added when appropriate (currently only
+ once per request).
+
+ """
+ # add cookies in order of most specific (ie. longest) path first
+ cookies.sort(key=lambda a: len(a.path), reverse=True)
+
+ version_set = False
+
+ attrs = []
+ for cookie in cookies:
+ # set version of Cookie header
+ # XXX
+ # What should it be if multiple matching Set-Cookie headers have
+ # different versions themselves?
+ # Answer: there is no answer; was supposed to be settled by
+ # RFC 2965 errata, but that may never appear...
+ version = cookie.version
+ if not version_set:
+ version_set = True
+ if version > 0:
+ attrs.append("$Version=%s" % version)
+
+ # quote cookie value if necessary
+ # (not for Netscape protocol, which already has any quotes
+ # intact, due to the poorly-specified Netscape Cookie: syntax)
+ if ((cookie.value is not None) and
+ self.non_word_re.search(cookie.value) and version > 0):
+ value = self.quote_re.sub(r"\\\1", cookie.value)
+ else:
+ value = cookie.value
+
+ # add cookie-attributes to be returned in Cookie header
+ if cookie.value is None:
+ attrs.append(cookie.name)
+ else:
+ attrs.append("%s=%s" % (cookie.name, value))
+ if version > 0:
+ if cookie.path_specified:
+ attrs.append('$Path="%s"' % cookie.path)
+ if cookie.domain.startswith("."):
+ domain = cookie.domain
+ if (not cookie.domain_initial_dot and
+ domain.startswith(".")):
+ domain = domain[1:]
+ attrs.append('$Domain="%s"' % domain)
+ if cookie.port is not None:
+ p = "$Port"
+ if cookie.port_specified:
+ p = p + ('="%s"' % cookie.port)
+ attrs.append(p)
+
+ return attrs
+
+ def add_cookie_header(self, request):
+ """Add correct Cookie: header to request (urllib.request.Request object).
+
+ The Cookie2 header is also added unless policy.hide_cookie2 is true.
+
+ """
+ _debug("add_cookie_header")
+ self._cookies_lock.acquire()
+ try:
+
+ self._policy._now = self._now = int(time.time())
+
+ cookies = self._cookies_for_request(request)
+
+ attrs = self._cookie_attrs(cookies)
+ if attrs:
+ if not request.has_header("Cookie"):
+ request.add_unredirected_header(
+ "Cookie", "; ".join(attrs))
+
+ # if necessary, advertise that we know RFC 2965
+ if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
+ not request.has_header("Cookie2")):
+ for cookie in cookies:
+ if cookie.version != 1:
+ request.add_unredirected_header("Cookie2", '$Version="1"')
+ break
+
+ finally:
+ self._cookies_lock.release()
+
+ self.clear_expired_cookies()
+
+ def _normalized_cookie_tuples(self, attrs_set):
+ """Return list of tuples containing normalised cookie information.
+
+ attrs_set is the list of lists of key,value pairs extracted from
+ the Set-Cookie or Set-Cookie2 headers.
+
+ Tuples are name, value, standard, rest, where name and value are the
+ cookie name and value, standard is a dictionary containing the standard
+ cookie-attributes (discard, secure, version, expires or max-age,
+ domain, path and port) and rest is a dictionary containing the rest of
+ the cookie-attributes.
+
+ """
+ cookie_tuples = []
+
+ boolean_attrs = "discard", "secure"
+ value_attrs = ("version",
+ "expires", "max-age",
+ "domain", "path", "port",
+ "comment", "commenturl")
+
+ for cookie_attrs in attrs_set:
+ name, value = cookie_attrs[0]
+
+ # Build dictionary of standard cookie-attributes (standard) and
+ # dictionary of other cookie-attributes (rest).
+
+ # Note: expiry time is normalised to seconds since epoch. V0
+ # cookies should have the Expires cookie-attribute, and V1 cookies
+ # should have Max-Age, but since V1 includes RFC 2109 cookies (and
+ # since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
+ # accept either (but prefer Max-Age).
+ max_age_set = False
+
+ bad_cookie = False
+
+ standard = {}
+ rest = {}
+ for k, v in cookie_attrs[1:]:
+ lc = k.lower()
+ # don't lose case distinction for unknown fields
+ if lc in value_attrs or lc in boolean_attrs:
+ k = lc
+ if k in boolean_attrs and v is None:
+ # boolean cookie-attribute is present, but has no value
+ # (like "discard", rather than "port=80")
+ v = True
+ if k in standard:
+ # only first value is significant
+ continue
+ if k == "domain":
+ if v is None:
+ _debug(" missing value for domain attribute")
+ bad_cookie = True
+ break
+ # RFC 2965 section 3.3.3
+ v = v.lower()
+ if k == "expires":
+ if max_age_set:
+ # Prefer max-age to expires (like Mozilla)
+ continue
+ if v is None:
+ _debug(" missing or invalid value for expires "
+ "attribute: treating as session cookie")
+ continue
+ if k == "max-age":
+ max_age_set = True
+ try:
+ v = int(v)
+ except ValueError:
+ _debug(" missing or invalid (non-numeric) value for "
+ "max-age attribute")
+ bad_cookie = True
+ break
+ # convert RFC 2965 Max-Age to seconds since epoch
+ # XXX Strictly you're supposed to follow RFC 2616
+ # age-calculation rules. Remember that zero Max-Age
+ # is a request to discard (old and new) cookie, though.
+ k = "expires"
+ v = self._now + v
+ if (k in value_attrs) or (k in boolean_attrs):
+ if (v is None and
+ k not in ("port", "comment", "commenturl")):
+ _debug(" missing value for %s attribute" % k)
+ bad_cookie = True
+ break
+ standard[k] = v
+ else:
+ rest[k] = v
+
+ if bad_cookie:
+ continue
+
+ cookie_tuples.append((name, value, standard, rest))
+
+ return cookie_tuples
+
+ def _cookie_from_cookie_tuple(self, tup, request):
+ # standard is dict of standard cookie-attributes, rest is dict of the
+ # rest of them
+ name, value, standard, rest = tup
+
+ domain = standard.get("domain", Absent)
+ path = standard.get("path", Absent)
+ port = standard.get("port", Absent)
+ expires = standard.get("expires", Absent)
+
+ # set the easy defaults
+ version = standard.get("version", None)
+ if version is not None:
+ try:
+ version = int(version)
+ except ValueError:
+ return None # invalid version, ignore cookie
+ secure = standard.get("secure", False)
+ # (discard is also set if expires is Absent)
+ discard = standard.get("discard", False)
+ comment = standard.get("comment", None)
+ comment_url = standard.get("commenturl", None)
+
+ # set default path
+ if path is not Absent and path != "":
+ path_specified = True
+ path = escape_path(path)
+ else:
+ path_specified = False
+ path = request_path(request)
+ i = path.rfind("/")
+ if i != -1:
+ if version == 0:
+ # Netscape spec parts company from reality here
+ path = path[:i]
+ else:
+ path = path[:i+1]
+ if len(path) == 0: path = "/"
+
+ # set default domain
+ domain_specified = domain is not Absent
+ # but first we have to remember whether it starts with a dot
+ domain_initial_dot = False
+ if domain_specified:
+ domain_initial_dot = bool(domain.startswith("."))
+ if domain is Absent:
+ req_host, erhn = eff_request_host(request)
+ domain = erhn
+ elif not domain.startswith("."):
+ domain = "."+domain
+
+ # set default port
+ port_specified = False
+ if port is not Absent:
+ if port is None:
+ # Port attr present, but has no value: default to request port.
+ # Cookie should then only be sent back on that port.
+ port = request_port(request)
+ else:
+ port_specified = True
+ port = re.sub(r"\s+", "", port)
+ else:
+ # No port attr present. Cookie can be sent back on any port.
+ port = None
+
+ # set default expires and discard
+ if expires is Absent:
+ expires = None
+ discard = True
+ elif expires <= self._now:
+ # Expiry date in past is request to delete cookie. This can't be
+ # in DefaultCookiePolicy, because can't delete cookies there.
+ try:
+ self.clear(domain, path, name)
+ except KeyError:
+ pass
+ _debug("Expiring cookie, domain='%s', path='%s', name='%s'",
+ domain, path, name)
+ return None
+
+ return Cookie(version,
+ name, value,
+ port, port_specified,
+ domain, domain_specified, domain_initial_dot,
+ path, path_specified,
+ secure,
+ expires,
+ discard,
+ comment,
+ comment_url,
+ rest)
+
+ def _cookies_from_attrs_set(self, attrs_set, request):
+ cookie_tuples = self._normalized_cookie_tuples(attrs_set)
+
+ cookies = []
+ for tup in cookie_tuples:
+ cookie = self._cookie_from_cookie_tuple(tup, request)
+ if cookie: cookies.append(cookie)
+ return cookies
+
+ def _process_rfc2109_cookies(self, cookies):
+ rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
+ if rfc2109_as_ns is None:
+ rfc2109_as_ns = not self._policy.rfc2965
+ for cookie in cookies:
+ if cookie.version == 1:
+ cookie.rfc2109 = True
+ if rfc2109_as_ns:
+ # treat 2109 cookies as Netscape cookies rather than
+ # as RFC2965 cookies
+ cookie.version = 0
+
+ def make_cookies(self, response, request):
+ """Return sequence of Cookie objects extracted from response object."""
+ # get cookie-attributes for RFC 2965 and Netscape protocols
+ headers = response.info()
+ rfc2965_hdrs = headers.get_all("Set-Cookie2", [])
+ ns_hdrs = headers.get_all("Set-Cookie", [])
+
+ rfc2965 = self._policy.rfc2965
+ netscape = self._policy.netscape
+
+ if ((not rfc2965_hdrs and not ns_hdrs) or
+ (not ns_hdrs and not rfc2965) or
+ (not rfc2965_hdrs and not netscape) or
+ (not netscape and not rfc2965)):
+ return [] # no relevant cookie headers: quick exit
+
+ try:
+ cookies = self._cookies_from_attrs_set(
+ split_header_words(rfc2965_hdrs), request)
+ except Exception:
+ _warn_unhandled_exception()
+ cookies = []
+
+ if ns_hdrs and netscape:
+ try:
+ # RFC 2109 and Netscape cookies
+ ns_cookies = self._cookies_from_attrs_set(
+ parse_ns_headers(ns_hdrs), request)
+ except Exception:
+ _warn_unhandled_exception()
+ ns_cookies = []
+ self._process_rfc2109_cookies(ns_cookies)
+
+ # Look for Netscape cookies (from Set-Cookie headers) that match
+ # corresponding RFC 2965 cookies (from Set-Cookie2 headers).
+ # For each match, keep the RFC 2965 cookie and ignore the Netscape
+ # cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
+ # bundled in with the Netscape cookies for this purpose, which is
+ # reasonable behaviour.
+ if rfc2965:
+ lookup = {}
+ for cookie in cookies:
+ lookup[(cookie.domain, cookie.path, cookie.name)] = None
+
+ def no_matching_rfc2965(ns_cookie, lookup=lookup):
+ key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
+ return key not in lookup
+ ns_cookies = filter(no_matching_rfc2965, ns_cookies)
+
+ if ns_cookies:
+ cookies.extend(ns_cookies)
+
+ return cookies
+
+ def set_cookie_if_ok(self, cookie, request):
+ """Set a cookie if policy says it's OK to do so."""
+ self._cookies_lock.acquire()
+ try:
+ self._policy._now = self._now = int(time.time())
+
+ if self._policy.set_ok(cookie, request):
+ self.set_cookie(cookie)
+
+
+ finally:
+ self._cookies_lock.release()
+
+ def set_cookie(self, cookie):
+ """Set a cookie, without checking whether or not it should be set."""
+ c = self._cookies
+ self._cookies_lock.acquire()
+ try:
+ if cookie.domain not in c: c[cookie.domain] = {}
+ c2 = c[cookie.domain]
+ if cookie.path not in c2: c2[cookie.path] = {}
+ c3 = c2[cookie.path]
+ c3[cookie.name] = cookie
+ finally:
+ self._cookies_lock.release()
+
+ def extract_cookies(self, response, request):
+ """Extract cookies from response, where allowable given the request."""
+ _debug("extract_cookies: %s", response.info())
+ self._cookies_lock.acquire()
+ try:
+ self._policy._now = self._now = int(time.time())
+
+ for cookie in self.make_cookies(response, request):
+ if self._policy.set_ok(cookie, request):
+ _debug(" setting cookie: %s", cookie)
+ self.set_cookie(cookie)
+ finally:
+ self._cookies_lock.release()
+
+ def clear(self, domain=None, path=None, name=None):
+ """Clear some cookies.
+
+ Invoking this method without arguments will clear all cookies. If
+ given a single argument, only cookies belonging to that domain will be
+ removed. If given two arguments, cookies belonging to the specified
+ path within that domain are removed. If given three arguments, then
+ the cookie with the specified name, path and domain is removed.
+
+ Raises KeyError if no matching cookie exists.
+
+ """
+ if name is not None:
+ if (domain is None) or (path is None):
+ raise ValueError(
+ "domain and path must be given to remove a cookie by name")
+ del self._cookies[domain][path][name]
+ elif path is not None:
+ if domain is None:
+ raise ValueError(
+ "domain must be given to remove cookies by path")
+ del self._cookies[domain][path]
+ elif domain is not None:
+ del self._cookies[domain]
+ else:
+ self._cookies = {}
+
+ def clear_session_cookies(self):
+ """Discard all session cookies.
+
+ Note that the .save() method won't save session cookies anyway, unless
+ you ask otherwise by passing a true ignore_discard argument.
+
+ """
+ self._cookies_lock.acquire()
+ try:
+ for cookie in self:
+ if cookie.discard:
+ self.clear(cookie.domain, cookie.path, cookie.name)
+ finally:
+ self._cookies_lock.release()
+
+ def clear_expired_cookies(self):
+ """Discard all expired cookies.
+
+ You probably don't need to call this method: expired cookies are never
+ sent back to the server (provided you're using DefaultCookiePolicy),
+ this method is called by CookieJar itself every so often, and the
+ .save() method won't save expired cookies anyway (unless you ask
+ otherwise by passing a true ignore_expires argument).
+
+ """
+ self._cookies_lock.acquire()
+ try:
+ now = time.time()
+ for cookie in self:
+ if cookie.is_expired(now):
+ self.clear(cookie.domain, cookie.path, cookie.name)
+ finally:
+ self._cookies_lock.release()
+
+ def __iter__(self):
+ return deepvalues(self._cookies)
+
+ def __len__(self):
+ """Return number of contained cookies."""
+ i = 0
+ for cookie in self: i = i + 1
+ return i
+
+ def __repr__(self):
+ r = []
+ for cookie in self: r.append(repr(cookie))
+ return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
+
+ def __str__(self):
+ r = []
+ for cookie in self: r.append(str(cookie))
+ return "<%s[%s]>" % (self.__class__.__name__, ", ".join(r))
+
+
+# derives from OSError for backwards-compatibility with Python 2.4.0
+class LoadError(OSError): pass
+
+class FileCookieJar(CookieJar):
+ """CookieJar that can be loaded from and saved to a file."""
+
+ def __init__(self, filename=None, delayload=False, policy=None):
+ """
+ Cookies are NOT loaded from the named file until either the .load() or
+ .revert() method is called.
+
+ """
+ CookieJar.__init__(self, policy)
+ if filename is not None:
+ try:
+ filename+""
+ except:
+ raise ValueError("filename must be string-like")
+ self.filename = filename
+ self.delayload = bool(delayload)
+
+ def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+ """Save cookies to a file."""
+ raise NotImplementedError()
+
+ def load(self, filename=None, ignore_discard=False, ignore_expires=False):
+ """Load cookies from a file."""
+ if filename is None:
+ if self.filename is not None: filename = self.filename
+ else: raise ValueError(MISSING_FILENAME_TEXT)
+
+ with open(filename) as f:
+ self._really_load(f, filename, ignore_discard, ignore_expires)
+
+ def revert(self, filename=None,
+ ignore_discard=False, ignore_expires=False):
+ """Clear all cookies and reload cookies from a saved file.
+
+ Raises LoadError (or OSError) if reversion is not successful; the
+ object's state will not be altered if this happens.
+
+ """
+ if filename is None:
+ if self.filename is not None: filename = self.filename
+ else: raise ValueError(MISSING_FILENAME_TEXT)
+
+ self._cookies_lock.acquire()
+ try:
+
+ old_state = copy.deepcopy(self._cookies)
+ self._cookies = {}
+ try:
+ self.load(filename, ignore_discard, ignore_expires)
+ except OSError:
+ self._cookies = old_state
+ raise
+
+ finally:
+ self._cookies_lock.release()
+
+
+def lwp_cookie_str(cookie):
+ """Return string representation of Cookie in the LWP cookie file format.
+
+ Actually, the format is extended a bit -- see module docstring.
+
+ """
+ h = [(cookie.name, cookie.value),
+ ("path", cookie.path),
+ ("domain", cookie.domain)]
+ if cookie.port is not None: h.append(("port", cookie.port))
+ if cookie.path_specified: h.append(("path_spec", None))
+ if cookie.port_specified: h.append(("port_spec", None))
+ if cookie.domain_initial_dot: h.append(("domain_dot", None))
+ if cookie.secure: h.append(("secure", None))
+ if cookie.expires: h.append(("expires",
+ time2isoz(float(cookie.expires))))
+ if cookie.discard: h.append(("discard", None))
+ if cookie.comment: h.append(("comment", cookie.comment))
+ if cookie.comment_url: h.append(("commenturl", cookie.comment_url))
+
+ keys = sorted(cookie._rest.keys())
+ for k in keys:
+ h.append((k, str(cookie._rest[k])))
+
+ h.append(("version", str(cookie.version)))
+
+ return join_header_words([h])
+
+class LWPCookieJar(FileCookieJar):
+ """
+ The LWPCookieJar saves a sequence of "Set-Cookie3" lines.
+ "Set-Cookie3" is the format used by the libwww-perl library, not known
+ to be compatible with any browser, but which is easy to read and
+ doesn't lose information about RFC 2965 cookies.
+
+ Additional methods
+
+ as_lwp_str(ignore_discard=True, ignore_expired=True)
+
+ """
+
+ def as_lwp_str(self, ignore_discard=True, ignore_expires=True):
+ """Return cookies as a string of "\\n"-separated "Set-Cookie3" headers.
+
+ ignore_discard and ignore_expires: see docstring for FileCookieJar.save
+
+ """
+ now = time.time()
+ r = []
+ for cookie in self:
+ if not ignore_discard and cookie.discard:
+ continue
+ if not ignore_expires and cookie.is_expired(now):
+ continue
+ r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie))
+ return "\n".join(r+[""])
+
+ def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+ if filename is None:
+ if self.filename is not None: filename = self.filename
+ else: raise ValueError(MISSING_FILENAME_TEXT)
+
+ with open(filename, "w") as f:
+ # There really isn't an LWP Cookies 2.0 format, but this indicates
+ # that there is extra information in here (domain_dot and
+ # port_spec) while still being compatible with libwww-perl, I hope.
+ f.write("#LWP-Cookies-2.0\n")
+ f.write(self.as_lwp_str(ignore_discard, ignore_expires))
+
+ def _really_load(self, f, filename, ignore_discard, ignore_expires):
+ magic = f.readline()
+ if not self.magic_re.search(magic):
+ msg = ("%r does not look like a Set-Cookie3 (LWP) format "
+ "file" % filename)
+ raise LoadError(msg)
+
+ now = time.time()
+
+ header = "Set-Cookie3:"
+ boolean_attrs = ("port_spec", "path_spec", "domain_dot",
+ "secure", "discard")
+ value_attrs = ("version",
+ "port", "path", "domain",
+ "expires",
+ "comment", "commenturl")
+
+ try:
+ while 1:
+ line = f.readline()
+ if line == "": break
+ if not line.startswith(header):
+ continue
+ line = line[len(header):].strip()
+
+ for data in split_header_words([line]):
+ name, value = data[0]
+ standard = {}
+ rest = {}
+ for k in boolean_attrs:
+ standard[k] = False
+ for k, v in data[1:]:
+ if k is not None:
+ lc = k.lower()
+ else:
+ lc = None
+ # don't lose case distinction for unknown fields
+ if (lc in value_attrs) or (lc in boolean_attrs):
+ k = lc
+ if k in boolean_attrs:
+ if v is None: v = True
+ standard[k] = v
+ elif k in value_attrs:
+ standard[k] = v
+ else:
+ rest[k] = v
+
+ h = standard.get
+ expires = h("expires")
+ discard = h("discard")
+ if expires is not None:
+ expires = iso2time(expires)
+ if expires is None:
+ discard = True
+ domain = h("domain")
+ domain_specified = domain.startswith(".")
+ c = Cookie(h("version"), name, value,
+ h("port"), h("port_spec"),
+ domain, domain_specified, h("domain_dot"),
+ h("path"), h("path_spec"),
+ h("secure"),
+ expires,
+ discard,
+ h("comment"),
+ h("commenturl"),
+ rest)
+ if not ignore_discard and c.discard:
+ continue
+ if not ignore_expires and c.is_expired(now):
+ continue
+ self.set_cookie(c)
+ except OSError:
+ raise
+ except Exception:
+ _warn_unhandled_exception()
+ raise LoadError("invalid Set-Cookie3 format file %r: %r" %
+ (filename, line))
+
+
+class MozillaCookieJar(FileCookieJar):
+ """
+
+ WARNING: you may want to backup your browser's cookies file if you use
+ this class to save cookies. I *think* it works, but there have been
+ bugs in the past!
+
+ This class differs from CookieJar only in the format it uses to save and
+ load cookies to and from a file. This class uses the Mozilla/Netscape
+ `cookies.txt' format. lynx uses this file format, too.
+
+ Don't expect cookies saved while the browser is running to be noticed by
+ the browser (in fact, Mozilla on unix will overwrite your saved cookies if
+ you change them on disk while it's running; on Windows, you probably can't
+ save at all while the browser is running).
+
+ Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
+ Netscape cookies on saving.
+
+ In particular, the cookie version and port number information is lost,
+ together with information about whether or not Path, Port and Discard were
+ specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
+ domain as set in the HTTP header started with a dot (yes, I'm aware some
+ domains in Netscape files start with a dot and some don't -- trust me, you
+ really don't want to know any more about this).
+
+ Note that though Mozilla and Netscape use the same format, they use
+ slightly different headers. The class saves cookies using the Netscape
+ header by default (Mozilla can cope with that).
+
+ """
+ magic_re = re.compile("#( Netscape)? HTTP Cookie File")
+ header = """\
+# Netscape HTTP Cookie File
+# http://curl.haxx.se/rfc/cookie_spec.html
+# This is a generated file! Do not edit.
+
+"""
+
+ def _really_load(self, f, filename, ignore_discard, ignore_expires):
+ now = time.time()
+
+ magic = f.readline()
+ if not self.magic_re.search(magic):
+ raise LoadError(
+ "%r does not look like a Netscape format cookies file" %
+ filename)
+
+ try:
+ while 1:
+ line = f.readline()
+ if line == "": break
+
+ # last field may be absent, so keep any trailing tab
+ if line.endswith("\n"): line = line[:-1]
+
+ # skip comments and blank lines XXX what is $ for?
+ if (line.strip().startswith(("#", "$")) or
+ line.strip() == ""):
+ continue
+
+ domain, domain_specified, path, secure, expires, name, value = \
+ line.split("\t")
+ secure = (secure == "TRUE")
+ domain_specified = (domain_specified == "TRUE")
+ if name == "":
+ # cookies.txt regards 'Set-Cookie: foo' as a cookie
+ # with no name, whereas http.cookiejar regards it as a
+ # cookie with no value.
+ name = value
+ value = None
+
+ initial_dot = domain.startswith(".")
+ assert domain_specified == initial_dot
+
+ discard = False
+ if expires == "":
+ expires = None
+ discard = True
+
+ # assume path_specified is false
+ c = Cookie(0, name, value,
+ None, False,
+ domain, domain_specified, initial_dot,
+ path, False,
+ secure,
+ expires,
+ discard,
+ None,
+ None,
+ {})
+ if not ignore_discard and c.discard:
+ continue
+ if not ignore_expires and c.is_expired(now):
+ continue
+ self.set_cookie(c)
+
+ except OSError:
+ raise
+ except Exception:
+ _warn_unhandled_exception()
+ raise LoadError("invalid Netscape format cookies file %r: %r" %
+ (filename, line))
+
+ def save(self, filename=None, ignore_discard=False, ignore_expires=False):
+ if filename is None:
+ if self.filename is not None: filename = self.filename
+ else: raise ValueError(MISSING_FILENAME_TEXT)
+
+ with open(filename, "w") as f:
+ f.write(self.header)
+ now = time.time()
+ for cookie in self:
+ if not ignore_discard and cookie.discard:
+ continue
+ if not ignore_expires and cookie.is_expired(now):
+ continue
+ if cookie.secure: secure = "TRUE"
+ else: secure = "FALSE"
+ if cookie.domain.startswith("."): initial_dot = "TRUE"
+ else: initial_dot = "FALSE"
+ if cookie.expires is not None:
+ expires = str(cookie.expires)
+ else:
+ expires = ""
+ if cookie.value is None:
+ # cookies.txt regards 'Set-Cookie: foo' as a cookie
+ # with no name, whereas http.cookiejar regards it as a
+ # cookie with no value.
+ name = ""
+ value = cookie.name
+ else:
+ name = cookie.name
+ value = cookie.value
+ f.write(
+ "\t".join([cookie.domain, initial_dot, cookie.path,
+ secure, expires, name, value])+
+ "\n")
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/http/cookies.py b/tapdown/lib/python3.11/site-packages/eventlet/green/http/cookies.py
new file mode 100644
index 0000000..d93cd71
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/http/cookies.py
@@ -0,0 +1,691 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+####
+# Copyright 2000 by Timothy O'Malley
+#
+# All Rights Reserved
+#
+# Permission to use, copy, modify, and distribute this software
+# and its documentation for any purpose and without fee is hereby
+# granted, provided that the above copyright notice appear in all
+# copies and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Timothy O'Malley not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
+# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
+# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+#
+####
+#
+# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
+# by Timothy O'Malley
+#
+# Cookie.py is a Python module for the handling of HTTP
+# cookies as a Python dictionary. See RFC 2109 for more
+# information on cookies.
+#
+# The original idea to treat Cookies as a dictionary came from
+# Dave Mitchell (davem@magnet.com) in 1995, when he released the
+# first version of nscookie.py.
+#
+####
+
+r"""
+Here's a sample session to show how to use this module.
+At the moment, this is the only documentation.
+
+The Basics
+----------
+
+Importing is easy...
+
+ >>> from http import cookies
+
+Most of the time you start by creating a cookie.
+
+ >>> C = cookies.SimpleCookie()
+
+Once you've created your Cookie, you can add values just as if it were
+a dictionary.
+
+ >>> C = cookies.SimpleCookie()
+ >>> C["fig"] = "newton"
+ >>> C["sugar"] = "wafer"
+ >>> C.output()
+ 'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
+
+Notice that the printable representation of a Cookie is the
+appropriate format for a Set-Cookie: header. This is the
+default behavior. You can change the header and printed
+attributes by using the .output() function
+
+ >>> C = cookies.SimpleCookie()
+ >>> C["rocky"] = "road"
+ >>> C["rocky"]["path"] = "/cookie"
+ >>> print(C.output(header="Cookie:"))
+ Cookie: rocky=road; Path=/cookie
+ >>> print(C.output(attrs=[], header="Cookie:"))
+ Cookie: rocky=road
+
+The load() method of a Cookie extracts cookies from a string. In a
+CGI script, you would use this method to extract the cookies from the
+HTTP_COOKIE environment variable.
+
+ >>> C = cookies.SimpleCookie()
+ >>> C.load("chips=ahoy; vienna=finger")
+ >>> C.output()
+ 'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
+
+The load() method is darn-tootin smart about identifying cookies
+within a string. Escaped quotation marks, nested semicolons, and other
+such trickeries do not confuse it.
+
+ >>> C = cookies.SimpleCookie()
+ >>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
+ >>> print(C)
+ Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
+
+Each element of the Cookie also supports all of the RFC 2109
+Cookie attributes. Here's an example which sets the Path
+attribute.
+
+ >>> C = cookies.SimpleCookie()
+ >>> C["oreo"] = "doublestuff"
+ >>> C["oreo"]["path"] = "/"
+ >>> print(C)
+ Set-Cookie: oreo=doublestuff; Path=/
+
+Each dictionary element has a 'value' attribute, which gives you
+back the value associated with the key.
+
+ >>> C = cookies.SimpleCookie()
+ >>> C["twix"] = "none for you"
+ >>> C["twix"].value
+ 'none for you'
+
+The SimpleCookie expects that all values should be standard strings.
+Just to be sure, SimpleCookie invokes the str() builtin to convert
+the value to a string, when the values are set dictionary-style.
+
+ >>> C = cookies.SimpleCookie()
+ >>> C["number"] = 7
+ >>> C["string"] = "seven"
+ >>> C["number"].value
+ '7'
+ >>> C["string"].value
+ 'seven'
+ >>> C.output()
+ 'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
+
+Finis.
+"""
+
+#
+# Import our required modules
+#
+import re
+import string
+
+__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
+
+_nulljoin = ''.join
+_semispacejoin = '; '.join
+_spacejoin = ' '.join
+
+def _warn_deprecated_setter(setter):
+ import warnings
+ msg = ('The .%s setter is deprecated. The attribute will be read-only in '
+ 'future releases. Please use the set() method instead.' % setter)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
+
+#
+# Define an exception visible to External modules
+#
+class CookieError(Exception):
+ pass
+
+
+# These quoting routines conform to the RFC2109 specification, which in
+# turn references the character definitions from RFC2068. They provide
+# a two-way quoting algorithm. Any non-text character is translated
+# into a 4 character sequence: a forward-slash followed by the
+# three-digit octal equivalent of the character. Any '\' or '"' is
+# quoted with a preceding '\' slash.
+# Because of the way browsers really handle cookies (as opposed to what
+# the RFC says) we also encode "," and ";".
+#
+# These are taken from RFC2068 and RFC2109.
+# _LegalChars is the list of chars which don't require "'s
+# _Translator hash-table for fast quoting
+#
+_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
+_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'
+
+_Translator = {n: '\\%03o' % n
+ for n in set(range(256)) - set(map(ord, _UnescapedChars))}
+_Translator.update({
+ ord('"'): '\\"',
+ ord('\\'): '\\\\',
+})
+
+# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
+_is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match
+
+def _quote(str):
+ r"""Quote a string for use in a cookie header.
+
+ If the string does not need to be double-quoted, then just return the
+ string. Otherwise, surround the string in doublequotes and quote
+ (with a \) special characters.
+ """
+ if str is None or _is_legal_key(str):
+ return str
+ else:
+ return '"' + str.translate(_Translator) + '"'
+
+
+_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
+_QuotePatt = re.compile(r"[\\].")
+
+def _unquote(str):
+ # If there aren't any doublequotes,
+ # then there can't be any special characters. See RFC 2109.
+ if str is None or len(str) < 2:
+ return str
+ if str[0] != '"' or str[-1] != '"':
+ return str
+
+ # We have to assume that we must decode this string.
+ # Down to work.
+
+ # Remove the "s
+ str = str[1:-1]
+
+ # Check for special sequences. Examples:
+ # \012 --> \n
+ # \" --> "
+ #
+ i = 0
+ n = len(str)
+ res = []
+ while 0 <= i < n:
+ o_match = _OctalPatt.search(str, i)
+ q_match = _QuotePatt.search(str, i)
+ if not o_match and not q_match: # Neither matched
+ res.append(str[i:])
+ break
+ # else:
+ j = k = -1
+ if o_match:
+ j = o_match.start(0)
+ if q_match:
+ k = q_match.start(0)
+ if q_match and (not o_match or k < j): # QuotePatt matched
+ res.append(str[i:k])
+ res.append(str[k+1])
+ i = k + 2
+ else: # OctalPatt matched
+ res.append(str[i:j])
+ res.append(chr(int(str[j+1:j+4], 8)))
+ i = j + 4
+ return _nulljoin(res)
+
+# The _getdate() routine is used to set the expiration time in the cookie's HTTP
+# header. By default, _getdate() returns the current time in the appropriate
+# "expires" format for a Set-Cookie header. The one optional argument is an
+# offset from now, in seconds. For example, an offset of -3600 means "one hour
+# ago". The offset may be a floating point number.
+#
+
+_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+_monthname = [None,
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
+ from eventlet.green.time import gmtime, time
+ now = time()
+ year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
+ return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
+ (weekdayname[wd], day, monthname[month], year, hh, mm, ss)
+
+
+class Morsel(dict):
+ """A class to hold ONE (key, value) pair.
+
+ In a cookie, each such pair may have several attributes, so this class is
+ used to keep the attributes associated with the appropriate key,value pair.
+ This class also includes a coded_value attribute, which is used to hold
+ the network representation of the value. This is most useful when Python
+ objects are pickled for network transit.
+ """
+ # RFC 2109 lists these attributes as reserved:
+ # path comment domain
+ # max-age secure version
+ #
+ # For historical reasons, these attributes are also reserved:
+ # expires
+ #
+ # This is an extension from Microsoft:
+ # httponly
+ #
+ # This dictionary provides a mapping from the lowercase
+ # variant on the left to the appropriate traditional
+ # formatting on the right.
+ _reserved = {
+ "expires" : "expires",
+ "path" : "Path",
+ "comment" : "Comment",
+ "domain" : "Domain",
+ "max-age" : "Max-Age",
+ "secure" : "Secure",
+ "httponly" : "HttpOnly",
+ "version" : "Version",
+ }
+
+ _flags = {'secure', 'httponly'}
+
+ def __init__(self):
+ # Set defaults
+ self._key = self._value = self._coded_value = None
+
+ # Set default attributes
+ for key in self._reserved:
+ dict.__setitem__(self, key, "")
+
+ @property
+ def key(self):
+ return self._key
+
+ @key.setter
+ def key(self, key):
+ _warn_deprecated_setter('key')
+ self._key = key
+
+ @property
+ def value(self):
+ return self._value
+
+ @value.setter
+ def value(self, value):
+ _warn_deprecated_setter('value')
+ self._value = value
+
+ @property
+ def coded_value(self):
+ return self._coded_value
+
+ @coded_value.setter
+ def coded_value(self, coded_value):
+ _warn_deprecated_setter('coded_value')
+ self._coded_value = coded_value
+
+ def __setitem__(self, K, V):
+ K = K.lower()
+ if not K in self._reserved:
+ raise CookieError("Invalid attribute %r" % (K,))
+ dict.__setitem__(self, K, V)
+
+ def setdefault(self, key, val=None):
+ key = key.lower()
+ if key not in self._reserved:
+ raise CookieError("Invalid attribute %r" % (key,))
+ return dict.setdefault(self, key, val)
+
+ def __eq__(self, morsel):
+ if not isinstance(morsel, Morsel):
+ return NotImplemented
+ return (dict.__eq__(self, morsel) and
+ self._value == morsel._value and
+ self._key == morsel._key and
+ self._coded_value == morsel._coded_value)
+
+ __ne__ = object.__ne__
+
+ def copy(self):
+ morsel = Morsel()
+ dict.update(morsel, self)
+ morsel.__dict__.update(self.__dict__)
+ return morsel
+
+ def update(self, values):
+ data = {}
+ for key, val in dict(values).items():
+ key = key.lower()
+ if key not in self._reserved:
+ raise CookieError("Invalid attribute %r" % (key,))
+ data[key] = val
+ dict.update(self, data)
+
+ def isReservedKey(self, K):
+ return K.lower() in self._reserved
+
+ def set(self, key, val, coded_val, LegalChars=_LegalChars):
+ if LegalChars != _LegalChars:
+ import warnings
+ warnings.warn(
+ 'LegalChars parameter is deprecated, ignored and will '
+ 'be removed in future versions.', DeprecationWarning,
+ stacklevel=2)
+
+ if key.lower() in self._reserved:
+ raise CookieError('Attempt to set a reserved key %r' % (key,))
+ if not _is_legal_key(key):
+ raise CookieError('Illegal key %r' % (key,))
+
+ # It's a good key, so save it.
+ self._key = key
+ self._value = val
+ self._coded_value = coded_val
+
+ def __getstate__(self):
+ return {
+ 'key': self._key,
+ 'value': self._value,
+ 'coded_value': self._coded_value,
+ }
+
+ def __setstate__(self, state):
+ self._key = state['key']
+ self._value = state['value']
+ self._coded_value = state['coded_value']
+
+ def output(self, attrs=None, header="Set-Cookie:"):
+ return "%s %s" % (header, self.OutputString(attrs))
+
+ __str__ = output
+
+ def __repr__(self):
+ return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
+
+ def js_output(self, attrs=None):
+ # Print javascript
+ return """
+
+ """ % (self.OutputString(attrs).replace('"', r'\"'))
+
+ def OutputString(self, attrs=None):
+ # Build up our result
+ #
+ result = []
+ append = result.append
+
+ # First, the key=value pair
+ append("%s=%s" % (self.key, self.coded_value))
+
+ # Now add any defined attributes
+ if attrs is None:
+ attrs = self._reserved
+ items = sorted(self.items())
+ for key, value in items:
+ if value == "":
+ continue
+ if key not in attrs:
+ continue
+ if key == "expires" and isinstance(value, int):
+ append("%s=%s" % (self._reserved[key], _getdate(value)))
+ elif key == "max-age" and isinstance(value, int):
+ append("%s=%d" % (self._reserved[key], value))
+ elif key in self._flags:
+ if value:
+ append(str(self._reserved[key]))
+ else:
+ append("%s=%s" % (self._reserved[key], value))
+
+ # Return the result
+ return _semispacejoin(result)
+
+
+#
+# Pattern for finding cookie
+#
+# This used to be strict parsing based on the RFC2109 and RFC2068
+# specifications. I have since discovered that MSIE 3.0x doesn't
+# follow the character rules outlined in those specs. As a
+# result, the parsing rules here are less strict.
+#
+
+_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
+_LegalValueChars = _LegalKeyChars + r'\[\]'
+_CookiePattern = re.compile(r"""
+ (?x) # This is a verbose pattern
+ \s* # Optional whitespace at start of cookie
+ (?P # Start of group 'key'
+ [""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
+ ) # End of group 'key'
+ ( # Optional group: there may not be a value.
+ \s*=\s* # Equal Sign
+ (?P # Start of group 'val'
+ "(?:[^\\"]|\\.)*" # Any doublequoted string
+ | # or
+ \w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
+ | # or
+ [""" + _LegalValueChars + r"""]* # Any word or empty string
+ ) # End of group 'val'
+ )? # End of optional value group
+ \s* # Any number of spaces.
+ (\s+|;|$) # Ending either at space, semicolon, or EOS.
+ """, re.ASCII) # May be removed if safe.
+
+
+# At long last, here is the cookie class. Using this class is almost just like
+# using a dictionary. See this module's docstring for example usage.
+#
+class BaseCookie(dict):
+ """A container class for a set of Morsels."""
+
+ def value_decode(self, val):
+ """real_value, coded_value = value_decode(STRING)
+ Called prior to setting a cookie's value from the network
+ representation. The VALUE is the value read from HTTP
+ header.
+ Override this function to modify the behavior of cookies.
+ """
+ return val, val
+
+ def value_encode(self, val):
+ """real_value, coded_value = value_encode(VALUE)
+ Called prior to setting a cookie's value from the dictionary
+ representation. The VALUE is the value being assigned.
+ Override this function to modify the behavior of cookies.
+ """
+ strval = str(val)
+ return strval, strval
+
+ def __init__(self, input=None):
+ if input:
+ self.load(input)
+
+ def __set(self, key, real_value, coded_value):
+ """Private method for setting a cookie's value"""
+ M = self.get(key, Morsel())
+ M.set(key, real_value, coded_value)
+ dict.__setitem__(self, key, M)
+
+ def __setitem__(self, key, value):
+ """Dictionary style assignment."""
+ if isinstance(value, Morsel):
+ # allow assignment of constructed Morsels (e.g. for pickling)
+ dict.__setitem__(self, key, value)
+ else:
+ rval, cval = self.value_encode(value)
+ self.__set(key, rval, cval)
+
+ def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
+ """Return a string suitable for HTTP."""
+ result = []
+ items = sorted(self.items())
+ for key, value in items:
+ result.append(value.output(attrs, header))
+ return sep.join(result)
+
+ __str__ = output
+
+ def __repr__(self):
+ l = []
+ items = sorted(self.items())
+ for key, value in items:
+ l.append('%s=%s' % (key, repr(value.value)))
+ return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
+
+ def js_output(self, attrs=None):
+ """Return a string suitable for JavaScript."""
+ result = []
+ items = sorted(self.items())
+ for key, value in items:
+ result.append(value.js_output(attrs))
+ return _nulljoin(result)
+
+ def load(self, rawdata):
+ """Load cookies from a string (presumably HTTP_COOKIE) or
+ from a dictionary. Loading cookies from a dictionary 'd'
+ is equivalent to calling:
+ map(Cookie.__setitem__, d.keys(), d.values())
+ """
+ if isinstance(rawdata, str):
+ self.__parse_string(rawdata)
+ else:
+ # self.update() wouldn't call our custom __setitem__
+ for key, value in rawdata.items():
+ self[key] = value
+ return
+
+ def __parse_string(self, str, patt=_CookiePattern):
+ i = 0 # Our starting point
+ n = len(str) # Length of string
+ parsed_items = [] # Parsed (type, key, value) triples
+ morsel_seen = False # A key=value pair was previously encountered
+
+ TYPE_ATTRIBUTE = 1
+ TYPE_KEYVALUE = 2
+
+ # We first parse the whole cookie string and reject it if it's
+ # syntactically invalid (this helps avoid some classes of injection
+ # attacks).
+ while 0 <= i < n:
+ # Start looking for a cookie
+ match = patt.match(str, i)
+ if not match:
+ # No more cookies
+ break
+
+ key, value = match.group("key"), match.group("val")
+ i = match.end(0)
+
+ if key[0] == "$":
+ if not morsel_seen:
+ # We ignore attributes which pertain to the cookie
+ # mechanism as a whole, such as "$Version".
+ # See RFC 2965. (Does anyone care?)
+ continue
+ parsed_items.append((TYPE_ATTRIBUTE, key[1:], value))
+ elif key.lower() in Morsel._reserved:
+ if not morsel_seen:
+ # Invalid cookie string
+ return
+ if value is None:
+ if key.lower() in Morsel._flags:
+ parsed_items.append((TYPE_ATTRIBUTE, key, True))
+ else:
+ # Invalid cookie string
+ return
+ else:
+ parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value)))
+ elif value is not None:
+ parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value)))
+ morsel_seen = True
+ else:
+ # Invalid cookie string
+ return
+
+ # The cookie string is valid, apply it.
+ M = None # current morsel
+ for tp, key, value in parsed_items:
+ if tp == TYPE_ATTRIBUTE:
+ assert M is not None
+ M[key] = value
+ else:
+ assert tp == TYPE_KEYVALUE
+ rval, cval = value
+ self.__set(key, rval, cval)
+ M = self[key]
+
+
+class SimpleCookie(BaseCookie):
+ """
+ SimpleCookie supports strings as cookie values. When setting
+ the value using the dictionary assignment notation, SimpleCookie
+ calls the builtin str() to convert the value to a string. Values
+ received from HTTP are kept as strings.
+ """
+ def value_decode(self, val):
+ return _unquote(val), val
+
+ def value_encode(self, val):
+ strval = str(val)
+ return strval, _quote(strval)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/http/server.py b/tapdown/lib/python3.11/site-packages/eventlet/green/http/server.py
new file mode 100644
index 0000000..190bdb9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/http/server.py
@@ -0,0 +1,1266 @@
+# This is part of Python source code with Eventlet-specific modifications.
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved
+#
+# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+# --------------------------------------------
+#
+# 1. This LICENSE AGREEMENT is between the Python Software Foundation
+# ("PSF"), and the Individual or Organization ("Licensee") accessing and
+# otherwise using this software ("Python") in source or binary form and
+# its associated documentation.
+#
+# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
+# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+# analyze, test, perform and/or display publicly, prepare derivative works,
+# distribute, and otherwise use Python alone or in any derivative version,
+# provided, however, that PSF's License Agreement and PSF's notice of copyright,
+# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
+# Reserved" are retained in Python alone or in any derivative version prepared by
+# Licensee.
+#
+# 3. In the event Licensee prepares a derivative work that is based on
+# or incorporates Python or any part thereof, and wants to make
+# the derivative work available to others as provided herein, then
+# Licensee hereby agrees to include in any such work a brief summary of
+# the changes made to Python.
+#
+# 4. PSF is making Python available to Licensee on an "AS IS"
+# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+# INFRINGE ANY THIRD PARTY RIGHTS.
+#
+# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+#
+# 6. This License Agreement will automatically terminate upon a material
+# breach of its terms and conditions.
+#
+# 7. Nothing in this License Agreement shall be deemed to create any
+# relationship of agency, partnership, or joint venture between PSF and
+# Licensee. This License Agreement does not grant permission to use PSF
+# trademarks or trade name in a trademark sense to endorse or promote
+# products or services of Licensee, or any third party.
+#
+# 8. By copying, installing or otherwise using Python, Licensee
+# agrees to be bound by the terms and conditions of this License
+# Agreement.
+"""HTTP server classes.
+
+Note: BaseHTTPRequestHandler doesn't implement any HTTP request; see
+SimpleHTTPRequestHandler for simple implementations of GET, HEAD and POST,
+and CGIHTTPRequestHandler for CGI scripts.
+
+It does, however, optionally implement HTTP/1.1 persistent connections,
+as of version 0.3.
+
+Notes on CGIHTTPRequestHandler
+------------------------------
+
+This class implements GET and POST requests to cgi-bin scripts.
+
+If the os.fork() function is not present (e.g. on Windows),
+subprocess.Popen() is used as a fallback, with slightly altered semantics.
+
+In all cases, the implementation is intentionally naive -- all
+requests are executed synchronously.
+
+SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
+-- it may execute arbitrary Python code or external programs.
+
+Note that status code 200 is sent prior to execution of a CGI script, so
+scripts cannot send other status codes such as 302 (redirect).
+
+XXX To do:
+
+- log requests even later (to capture byte count)
+- log user-agent header and other interesting goodies
+- send error log to separate file
+"""
+
+
+# See also:
+#
+# HTTP Working Group T. Berners-Lee
+# INTERNET-DRAFT R. T. Fielding
+# H. Frystyk Nielsen
+# Expires September 8, 1995 March 8, 1995
+#
+# URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt
+#
+# and
+#
+# Network Working Group R. Fielding
+# Request for Comments: 2616 et al
+# Obsoletes: 2068 June 1999
+# Category: Standards Track
+#
+# URL: http://www.faqs.org/rfcs/rfc2616.html
+
+# Log files
+# ---------
+#
+# Here's a quote from the NCSA httpd docs about log file format.
+#
+# | The logfile format is as follows. Each line consists of:
+# |
+# | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb
+# |
+# | host: Either the DNS name or the IP number of the remote client
+# | rfc931: Any information returned by identd for this person,
+# | - otherwise.
+# | authuser: If user sent a userid for authentication, the user name,
+# | - otherwise.
+# | DD: Day
+# | Mon: Month (calendar name)
+# | YYYY: Year
+# | hh: hour (24-hour format, the machine's timezone)
+# | mm: minutes
+# | ss: seconds
+# | request: The first line of the HTTP request as sent by the client.
+# | ddd: the status code returned by the server, - if not available.
+# | bbbb: the total number of bytes sent,
+# | *not including the HTTP/1.0 header*, - if not available
+# |
+# | You can determine the name of the file accessed through request.
+#
+# (Actually, the latter is only true if you know the server configuration
+# at the time the request was made!)
+
+__version__ = "0.6"
+
+__all__ = [
+ "HTTPServer", "BaseHTTPRequestHandler",
+ "SimpleHTTPRequestHandler", "CGIHTTPRequestHandler",
+]
+
+import email.utils
+import html
+import io
+import mimetypes
+import posixpath
+import shutil
+import sys
+import urllib.parse
+import copy
+import argparse
+
+from eventlet.green import (
+ os,
+ time,
+ select,
+ socket,
+ SocketServer as socketserver,
+ subprocess,
+)
+from eventlet.green.http import client as http_client, HTTPStatus
+
+
+# Default error message template
+DEFAULT_ERROR_MESSAGE = """\
+
+
+
+
+ Error response
+
+
+
Error response
+
Error code: %(code)d
+
Message: %(message)s.
+
Error code explanation: %(code)s - %(explain)s.
+
+
+"""
+
+DEFAULT_ERROR_CONTENT_TYPE = "text/html;charset=utf-8"
+
+class HTTPServer(socketserver.TCPServer):
+
+ allow_reuse_address = 1 # Seems to make sense in testing environment
+
+ def server_bind(self):
+ """Override server_bind to store the server name."""
+ socketserver.TCPServer.server_bind(self)
+ host, port = self.server_address[:2]
+ self.server_name = socket.getfqdn(host)
+ self.server_port = port
+
+
+class BaseHTTPRequestHandler(socketserver.StreamRequestHandler):
+
+ """HTTP request handler base class.
+
+ The following explanation of HTTP serves to guide you through the
+ code as well as to expose any misunderstandings I may have about
+ HTTP (so you don't need to read the code to figure out I'm wrong
+ :-).
+
+ HTTP (HyperText Transfer Protocol) is an extensible protocol on
+ top of a reliable stream transport (e.g. TCP/IP). The protocol
+ recognizes three parts to a request:
+
+ 1. One line identifying the request type and path
+ 2. An optional set of RFC-822-style headers
+ 3. An optional data part
+
+ The headers and data are separated by a blank line.
+
+ The first line of the request has the form
+
+
+
+ where is a (case-sensitive) keyword such as GET or POST,
+ is a string containing path information for the request,
+ and should be the string "HTTP/1.0" or "HTTP/1.1".
+ is encoded using the URL encoding scheme (using %xx to signify
+ the ASCII character with hex code xx).
+
+ The specification specifies that lines are separated by CRLF but
+ for compatibility with the widest range of clients recommends
+ servers also handle LF. Similarly, whitespace in the request line
+ is treated sensibly (allowing multiple spaces between components
+ and allowing trailing whitespace).
+
+ Similarly, for output, lines ought to be separated by CRLF pairs
+ but most clients grok LF characters just fine.
+
+ If the first line of the request has the form
+
+
+
+ (i.e. is left out) then this is assumed to be an HTTP
+ 0.9 request; this form has no optional headers and data part and
+ the reply consists of just the data.
+
+ The reply form of the HTTP 1.x protocol again has three parts:
+
+ 1. One line giving the response code
+ 2. An optional set of RFC-822-style headers
+ 3. The data
+
+ Again, the headers and data are separated by a blank line.
+
+ The response code line has the form
+
+
+
+ where is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
+ is a 3-digit response code indicating success or
+ failure of the request, and is an optional
+ human-readable string explaining what the response code means.
+
+ This server parses the request and the headers, and then calls a
+ function specific to the request type (). Specifically,
+ a request SPAM will be handled by a method do_SPAM(). If no
+ such method exists the server sends an error response to the
+ client. If it exists, it is called with no arguments:
+
+ do_SPAM()
+
+ Note that the request name is case sensitive (i.e. SPAM and spam
+ are different requests).
+
+ The various request details are stored in instance variables:
+
+ - client_address is the client IP address in the form (host,
+ port);
+
+ - command, path and version are the broken-down request line;
+
+ - headers is an instance of email.message.Message (or a derived
+ class) containing the header information;
+
+ - rfile is a file object open for reading positioned at the
+ start of the optional input data part;
+
+ - wfile is a file object open for writing.
+
+ IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
+
+ The first thing to be written must be the response line. Then
+ follow 0 or more header lines, then a blank line, and then the
+ actual data (if any). The meaning of the header lines depends on
+ the command executed by the server; in most cases, when data is
+ returned, there should be at least one header line of the form
+
+ Content-type: /
+
+ where and should be registered MIME types,
+ e.g. "text/html" or "text/plain".
+
+ """
+
+ # The Python system version, truncated to its first component.
+ sys_version = "Python/" + sys.version.split()[0]
+
+ # The server software version. You may want to override this.
+ # The format is multiple whitespace-separated strings,
+ # where each string is of the form name[/version].
+ server_version = "BaseHTTP/" + __version__
+
+ error_message_format = DEFAULT_ERROR_MESSAGE
+ error_content_type = DEFAULT_ERROR_CONTENT_TYPE
+
+ # The default request version. This only affects responses up until
+ # the point where the request line is parsed, so it mainly decides what
+ # the client gets back when sending a malformed request line.
+ # Most web servers default to HTTP 0.9, i.e. don't send a status line.
+ default_request_version = "HTTP/0.9"
+
+ def parse_request(self):
+ """Parse a request (internal).
+
+ The request should be stored in self.raw_requestline; the results
+ are in self.command, self.path, self.request_version and
+ self.headers.
+
+ Return True for success, False for failure; on failure, an
+ error is sent back.
+
+ """
+ self.command = None # set in case of error on the first line
+ self.request_version = version = self.default_request_version
+ self.close_connection = True
+ requestline = str(self.raw_requestline, 'iso-8859-1')
+ requestline = requestline.rstrip('\r\n')
+ self.requestline = requestline
+ words = requestline.split()
+ if len(words) == 3:
+ command, path, version = words
+ try:
+ if version[:5] != 'HTTP/':
+ raise ValueError
+ base_version_number = version.split('/', 1)[1]
+ version_number = base_version_number.split(".")
+ # RFC 2145 section 3.1 says there can be only one "." and
+ # - major and minor numbers MUST be treated as
+ # separate integers;
+ # - HTTP/2.4 is a lower version than HTTP/2.13, which in
+ # turn is lower than HTTP/12.3;
+ # - Leading zeros MUST be ignored by recipients.
+ if len(version_number) != 2:
+ raise ValueError
+ version_number = int(version_number[0]), int(version_number[1])
+ except (ValueError, IndexError):
+ self.send_error(
+ HTTPStatus.BAD_REQUEST,
+ "Bad request version (%r)" % version)
+ return False
+ if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
+ self.close_connection = False
+ if version_number >= (2, 0):
+ self.send_error(
+ HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,
+ "Invalid HTTP version (%s)" % base_version_number)
+ return False
+ elif len(words) == 2:
+ command, path = words
+ self.close_connection = True
+ if command != 'GET':
+ self.send_error(
+ HTTPStatus.BAD_REQUEST,
+ "Bad HTTP/0.9 request type (%r)" % command)
+ return False
+ elif not words:
+ return False
+ else:
+ self.send_error(
+ HTTPStatus.BAD_REQUEST,
+ "Bad request syntax (%r)" % requestline)
+ return False
+ self.command, self.path, self.request_version = command, path, version
+
+ # Examine the headers and look for a Connection directive.
+ try:
+ self.headers = http_client.parse_headers(self.rfile,
+ _class=self.MessageClass)
+ except http_client.LineTooLong as err:
+ self.send_error(
+ HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
+ "Line too long",
+ str(err))
+ return False
+ except http_client.HTTPException as err:
+ self.send_error(
+ HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
+ "Too many headers",
+ str(err)
+ )
+ return False
+
+ conntype = self.headers.get('Connection', "")
+ if conntype.lower() == 'close':
+ self.close_connection = True
+ elif (conntype.lower() == 'keep-alive' and
+ self.protocol_version >= "HTTP/1.1"):
+ self.close_connection = False
+ # Examine the headers and look for an Expect directive
+ expect = self.headers.get('Expect', "")
+ if (expect.lower() == "100-continue" and
+ self.protocol_version >= "HTTP/1.1" and
+ self.request_version >= "HTTP/1.1"):
+ if not self.handle_expect_100():
+ return False
+ return True
+
+ def handle_expect_100(self):
+ """Decide what to do with an "Expect: 100-continue" header.
+
+ If the client is expecting a 100 Continue response, we must
+ respond with either a 100 Continue or a final response before
+ waiting for the request body. The default is to always respond
+ with a 100 Continue. You can behave differently (for example,
+ reject unauthorized requests) by overriding this method.
+
+ This method should either return True (possibly after sending
+ a 100 Continue response) or send an error response and return
+ False.
+
+ """
+ self.send_response_only(HTTPStatus.CONTINUE)
+ self.end_headers()
+ return True
+
+ def handle_one_request(self):
+ """Handle a single HTTP request.
+
+ You normally don't need to override this method; see the class
+ __doc__ string for information on how to handle specific HTTP
+ commands such as GET and POST.
+
+ """
+ try:
+ self.raw_requestline = self.rfile.readline(65537)
+ if len(self.raw_requestline) > 65536:
+ self.requestline = ''
+ self.request_version = ''
+ self.command = ''
+ self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)
+ return
+ if not self.raw_requestline:
+ self.close_connection = True
+ return
+ if not self.parse_request():
+ # An error code has been sent, just exit
+ return
+ mname = 'do_' + self.command
+ if not hasattr(self, mname):
+ self.send_error(
+ HTTPStatus.NOT_IMPLEMENTED,
+ "Unsupported method (%r)" % self.command)
+ return
+ method = getattr(self, mname)
+ method()
+ self.wfile.flush() #actually send the response if not already done.
+ except socket.timeout as e:
+ #a read or a write timed out. Discard this connection
+ self.log_error("Request timed out: %r", e)
+ self.close_connection = True
+ return
+
+ def handle(self):
+ """Handle multiple requests if necessary."""
+ self.close_connection = True
+
+ self.handle_one_request()
+ while not self.close_connection:
+ self.handle_one_request()
+
+ def send_error(self, code, message=None, explain=None):
+ """Send and log an error reply.
+
+ Arguments are
+ * code: an HTTP error code
+ 3 digits
+ * message: a simple optional 1 line reason phrase.
+ *( HTAB / SP / VCHAR / %x80-FF )
+ defaults to short entry matching the response code
+ * explain: a detailed message defaults to the long entry
+ matching the response code.
+
+ This sends an error response (so it must be called before any
+ output has been generated), logs the error, and finally sends
+ a piece of HTML explaining the error to the user.
+
+ """
+
+ try:
+ shortmsg, longmsg = self.responses[code]
+ except KeyError:
+ shortmsg, longmsg = '???', '???'
+ if message is None:
+ message = shortmsg
+ if explain is None:
+ explain = longmsg
+ self.log_error("code %d, message %s", code, message)
+ self.send_response(code, message)
+ self.send_header('Connection', 'close')
+
+ # Message body is omitted for cases described in:
+ # - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified)
+ # - RFC7231: 6.3.6. 205(Reset Content)
+ body = None
+ if (code >= 200 and
+ code not in (HTTPStatus.NO_CONTENT,
+ HTTPStatus.RESET_CONTENT,
+ HTTPStatus.NOT_MODIFIED)):
+ # HTML encode to prevent Cross Site Scripting attacks
+ # (see bug #1100201)
+ content = (self.error_message_format % {
+ 'code': code,
+ 'message': html.escape(message, quote=False),
+ 'explain': html.escape(explain, quote=False)
+ })
+ body = content.encode('UTF-8', 'replace')
+ self.send_header("Content-Type", self.error_content_type)
+ self.send_header('Content-Length', int(len(body)))
+ self.end_headers()
+
+ if self.command != 'HEAD' and body:
+ self.wfile.write(body)
+
+ def send_response(self, code, message=None):
+ """Add the response header to the headers buffer and log the
+ response code.
+
+ Also send two standard headers with the server software
+ version and the current date.
+
+ """
+ self.log_request(code)
+ self.send_response_only(code, message)
+ self.send_header('Server', self.version_string())
+ self.send_header('Date', self.date_time_string())
+
+ def send_response_only(self, code, message=None):
+ """Send the response header only."""
+ if self.request_version != 'HTTP/0.9':
+ if message is None:
+ if code in self.responses:
+ message = self.responses[code][0]
+ else:
+ message = ''
+ if not hasattr(self, '_headers_buffer'):
+ self._headers_buffer = []
+ self._headers_buffer.append(("%s %d %s\r\n" %
+ (self.protocol_version, code, message)).encode(
+ 'latin-1', 'strict'))
+
+ def send_header(self, keyword, value):
+ """Send a MIME header to the headers buffer."""
+ if self.request_version != 'HTTP/0.9':
+ if not hasattr(self, '_headers_buffer'):
+ self._headers_buffer = []
+ self._headers_buffer.append(
+ ("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict'))
+
+ if keyword.lower() == 'connection':
+ if value.lower() == 'close':
+ self.close_connection = True
+ elif value.lower() == 'keep-alive':
+ self.close_connection = False
+
+ def end_headers(self):
+ """Send the blank line ending the MIME headers."""
+ if self.request_version != 'HTTP/0.9':
+ self._headers_buffer.append(b"\r\n")
+ self.flush_headers()
+
+ def flush_headers(self):
+ if hasattr(self, '_headers_buffer'):
+ self.wfile.write(b"".join(self._headers_buffer))
+ self._headers_buffer = []
+
+ def log_request(self, code='-', size='-'):
+ """Log an accepted request.
+
+ This is called by send_response().
+
+ """
+ if isinstance(code, HTTPStatus):
+ code = code.value
+ self.log_message('"%s" %s %s',
+ self.requestline, str(code), str(size))
+
+ def log_error(self, format, *args):
+ """Log an error.
+
+ This is called when a request cannot be fulfilled. By
+ default it passes the message on to log_message().
+
+ Arguments are the same as for log_message().
+
+ XXX This should go to the separate error log.
+
+ """
+
+ self.log_message(format, *args)
+
+ def log_message(self, format, *args):
+ """Log an arbitrary message.
+
+ This is used by all other logging functions. Override
+ it if you have specific logging wishes.
+
+ The first argument, FORMAT, is a format string for the
+ message to be logged. If the format string contains
+ any % escapes requiring parameters, they should be
+ specified as subsequent arguments (it's just like
+ printf!).
+
+ The client ip and current date/time are prefixed to
+ every message.
+
+ """
+
+ sys.stderr.write("%s - - [%s] %s\n" %
+ (self.address_string(),
+ self.log_date_time_string(),
+ format%args))
+
+ def version_string(self):
+ """Return the server software version string."""
+ return self.server_version + ' ' + self.sys_version
+
+ def date_time_string(self, timestamp=None):
+ """Return the current date and time formatted for a message header."""
+ if timestamp is None:
+ timestamp = time.time()
+ return email.utils.formatdate(timestamp, usegmt=True)
+
+ def log_date_time_string(self):
+ """Return the current time formatted for logging."""
+ now = time.time()
+ year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
+ s = "%02d/%3s/%04d %02d:%02d:%02d" % (
+ day, self.monthname[month], year, hh, mm, ss)
+ return s
+
+ weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
+
+ monthname = [None,
+ 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+ 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
+
+ def address_string(self):
+ """Return the client address."""
+
+ return self.client_address[0]
+
+ # Essentially static class variables
+
+ # The version of the HTTP protocol we support.
+ # Set this to HTTP/1.1 to enable automatic keepalive
+ protocol_version = "HTTP/1.0"
+
+ # MessageClass used to parse headers
+ MessageClass = http_client.HTTPMessage
+
+ # hack to maintain backwards compatibility
+ responses = {
+ v: (v.phrase, v.description)
+ for v in HTTPStatus.__members__.values()
+ }
+
+
+class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
+
+ """Simple HTTP request handler with GET and HEAD commands.
+
+ This serves files from the current directory and any of its
+ subdirectories. The MIME type for files is determined by
+ calling the .guess_type() method.
+
+ The GET and HEAD requests are identical except that the HEAD
+ request omits the actual contents of the file.
+
+ """
+
+ server_version = "SimpleHTTP/" + __version__
+
+ def do_GET(self):
+ """Serve a GET request."""
+ f = self.send_head()
+ if f:
+ try:
+ self.copyfile(f, self.wfile)
+ finally:
+ f.close()
+
+ def do_HEAD(self):
+ """Serve a HEAD request."""
+ f = self.send_head()
+ if f:
+ f.close()
+
+ def send_head(self):
+ """Common code for GET and HEAD commands.
+
+ This sends the response code and MIME headers.
+
+ Return value is either a file object (which has to be copied
+ to the outputfile by the caller unless the command was HEAD,
+ and must be closed by the caller under all circumstances), or
+ None, in which case the caller has nothing further to do.
+
+ """
+ path = self.translate_path(self.path)
+ f = None
+ if os.path.isdir(path):
+ parts = urllib.parse.urlsplit(self.path)
+ if not parts.path.endswith('/'):
+ # redirect browser - doing basically what apache does
+ self.send_response(HTTPStatus.MOVED_PERMANENTLY)
+ new_parts = (parts[0], parts[1], parts[2] + '/',
+ parts[3], parts[4])
+ new_url = urllib.parse.urlunsplit(new_parts)
+ self.send_header("Location", new_url)
+ self.end_headers()
+ return None
+ for index in "index.html", "index.htm":
+ index = os.path.join(path, index)
+ if os.path.exists(index):
+ path = index
+ break
+ else:
+ return self.list_directory(path)
+ ctype = self.guess_type(path)
+ try:
+ f = open(path, 'rb')
+ except OSError:
+ self.send_error(HTTPStatus.NOT_FOUND, "File not found")
+ return None
+ try:
+ self.send_response(HTTPStatus.OK)
+ self.send_header("Content-type", ctype)
+ fs = os.fstat(f.fileno())
+ self.send_header("Content-Length", str(fs[6]))
+ self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
+ self.end_headers()
+ return f
+ except:
+ f.close()
+ raise
+
+ def list_directory(self, path):
+ """Helper to produce a directory listing (absent index.html).
+
+ Return value is either a file object, or None (indicating an
+ error). In either case, the headers are sent, making the
+ interface the same as for send_head().
+
+ """
+ try:
+ list = os.listdir(path)
+ except OSError:
+ self.send_error(
+ HTTPStatus.NOT_FOUND,
+ "No permission to list directory")
+ return None
+ list.sort(key=lambda a: a.lower())
+ r = []
+ try:
+ displaypath = urllib.parse.unquote(self.path,
+ errors='surrogatepass')
+ except UnicodeDecodeError:
+ displaypath = urllib.parse.unquote(path)
+ displaypath = html.escape(displaypath, quote=False)
+ enc = sys.getfilesystemencoding()
+ title = 'Directory listing for %s' % displaypath
+ r.append('')
+ r.append('\n')
+ r.append('' % enc)
+ r.append('%s\n' % title)
+ r.append('\n
%s
' % title)
+ r.append('\n
')
+ for name in list:
+ fullname = os.path.join(path, name)
+ displayname = linkname = name
+ # Append / for directories or @ for symbolic links
+ if os.path.isdir(fullname):
+ displayname = name + "/"
+ linkname = name + "/"
+ if os.path.islink(fullname):
+ displayname = name + "@"
+ # Note: a link to a directory displays with @ and links with /
+ r.append('
\n\n\n\n')
+ encoded = '\n'.join(r).encode(enc, 'surrogateescape')
+ f = io.BytesIO()
+ f.write(encoded)
+ f.seek(0)
+ self.send_response(HTTPStatus.OK)
+ self.send_header("Content-type", "text/html; charset=%s" % enc)
+ self.send_header("Content-Length", str(len(encoded)))
+ self.end_headers()
+ return f
+
+ def translate_path(self, path):
+ """Translate a /-separated PATH to the local filename syntax.
+
+ Components that mean special things to the local file system
+ (e.g. drive or directory names) are ignored. (XXX They should
+ probably be diagnosed.)
+
+ """
+ # abandon query parameters
+ path = path.split('?',1)[0]
+ path = path.split('#',1)[0]
+ # Don't forget explicit trailing slash when normalizing. Issue17324
+ trailing_slash = path.rstrip().endswith('/')
+ try:
+ path = urllib.parse.unquote(path, errors='surrogatepass')
+ except UnicodeDecodeError:
+ path = urllib.parse.unquote(path)
+ path = posixpath.normpath(path)
+ words = path.split('/')
+ words = filter(None, words)
+ path = os.getcwd()
+ for word in words:
+ if os.path.dirname(word) or word in (os.curdir, os.pardir):
+ # Ignore components that are not a simple file/directory name
+ continue
+ path = os.path.join(path, word)
+ if trailing_slash:
+ path += '/'
+ return path
+
+ def copyfile(self, source, outputfile):
+ """Copy all data between two file objects.
+
+ The SOURCE argument is a file object open for reading
+ (or anything with a read() method) and the DESTINATION
+ argument is a file object open for writing (or
+ anything with a write() method).
+
+ The only reason for overriding this would be to change
+ the block size or perhaps to replace newlines by CRLF
+ -- note however that this the default server uses this
+ to copy binary data as well.
+
+ """
+ shutil.copyfileobj(source, outputfile)
+
+ def guess_type(self, path):
+ """Guess the type of a file.
+
+ Argument is a PATH (a filename).
+
+ Return value is a string of the form type/subtype,
+ usable for a MIME Content-type header.
+
+ The default implementation looks the file's extension
+ up in the table self.extensions_map, using application/octet-stream
+ as a default; however it would be permissible (if
+ slow) to look inside the data to make a better guess.
+
+ """
+
+ base, ext = posixpath.splitext(path)
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ ext = ext.lower()
+ if ext in self.extensions_map:
+ return self.extensions_map[ext]
+ else:
+ return self.extensions_map['']
+
+ if not mimetypes.inited:
+ mimetypes.init() # try to read system mime.types
+ extensions_map = mimetypes.types_map.copy()
+ extensions_map.update({
+ '': 'application/octet-stream', # Default
+ '.py': 'text/plain',
+ '.c': 'text/plain',
+ '.h': 'text/plain',
+ })
+
+
+# Utilities for CGIHTTPRequestHandler
+
+def _url_collapse_path(path):
+ """
+ Given a URL path, remove extra '/'s and '.' path elements and collapse
+ any '..' references and returns a collapsed path.
+
+ Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
+ The utility of this function is limited to is_cgi method and helps
+ preventing some security attacks.
+
+ Returns: The reconstituted URL, which will always start with a '/'.
+
+ Raises: IndexError if too many '..' occur within the path.
+
+ """
+ # Query component should not be involved.
+ path, _, query = path.partition('?')
+ path = urllib.parse.unquote(path)
+
+ # Similar to os.path.split(os.path.normpath(path)) but specific to URL
+ # path semantics rather than local operating system semantics.
+ path_parts = path.split('/')
+ head_parts = []
+ for part in path_parts[:-1]:
+ if part == '..':
+ head_parts.pop() # IndexError if more '..' than prior parts
+ elif part and part != '.':
+ head_parts.append( part )
+ if path_parts:
+ tail_part = path_parts.pop()
+ if tail_part:
+ if tail_part == '..':
+ head_parts.pop()
+ tail_part = ''
+ elif tail_part == '.':
+ tail_part = ''
+ else:
+ tail_part = ''
+
+ if query:
+ tail_part = '?'.join((tail_part, query))
+
+ splitpath = ('/' + '/'.join(head_parts), tail_part)
+ collapsed_path = "/".join(splitpath)
+
+ return collapsed_path
+
+
+
+nobody = None
+
+def nobody_uid():
+ """Internal routine to get nobody's uid"""
+ global nobody
+ if nobody:
+ return nobody
+ try:
+ import pwd
+ except ImportError:
+ return -1
+ try:
+ nobody = pwd.getpwnam('nobody')[2]
+ except KeyError:
+ nobody = 1 + max(x[2] for x in pwd.getpwall())
+ return nobody
+
+
+def executable(path):
+ """Test for executable file."""
+ return os.access(path, os.X_OK)
+
+
+class CGIHTTPRequestHandler(SimpleHTTPRequestHandler):
+
+ """Complete HTTP server with GET, HEAD and POST commands.
+
+ GET and HEAD also support running CGI scripts.
+
+ The POST command is *only* implemented for CGI scripts.
+
+ """
+
+ # Determine platform specifics
+ have_fork = hasattr(os, 'fork')
+
+ # Make rfile unbuffered -- we need to read one line and then pass
+ # the rest to a subprocess, so we can't use buffered input.
+ rbufsize = 0
+
+ def do_POST(self):
+ """Serve a POST request.
+
+ This is only implemented for CGI scripts.
+
+ """
+
+ if self.is_cgi():
+ self.run_cgi()
+ else:
+ self.send_error(
+ HTTPStatus.NOT_IMPLEMENTED,
+ "Can only POST to CGI scripts")
+
+ def send_head(self):
+ """Version of send_head that support CGI scripts"""
+ if self.is_cgi():
+ return self.run_cgi()
+ else:
+ return SimpleHTTPRequestHandler.send_head(self)
+
+ def is_cgi(self):
+ """Test whether self.path corresponds to a CGI script.
+
+ Returns True and updates the cgi_info attribute to the tuple
+ (dir, rest) if self.path requires running a CGI script.
+ Returns False otherwise.
+
+ If any exception is raised, the caller should assume that
+ self.path was rejected as invalid and act accordingly.
+
+ The default implementation tests whether the normalized url
+ path begins with one of the strings in self.cgi_directories
+ (and the next character is a '/' or the end of the string).
+
+ """
+ collapsed_path = _url_collapse_path(self.path)
+ dir_sep = collapsed_path.find('/', 1)
+ head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
+ if head in self.cgi_directories:
+ self.cgi_info = head, tail
+ return True
+ return False
+
+
+ cgi_directories = ['/cgi-bin', '/htbin']
+
+ def is_executable(self, path):
+ """Test whether argument path is an executable file."""
+ return executable(path)
+
+ def is_python(self, path):
+ """Test whether argument path is a Python script."""
+ head, tail = os.path.splitext(path)
+ return tail.lower() in (".py", ".pyw")
+
+ def run_cgi(self):
+ """Execute a CGI script."""
+ dir, rest = self.cgi_info
+ path = dir + '/' + rest
+ i = path.find('/', len(dir)+1)
+ while i >= 0:
+ nextdir = path[:i]
+ nextrest = path[i+1:]
+
+ scriptdir = self.translate_path(nextdir)
+ if os.path.isdir(scriptdir):
+ dir, rest = nextdir, nextrest
+ i = path.find('/', len(dir)+1)
+ else:
+ break
+
+ # find an explicit query string, if present.
+ rest, _, query = rest.partition('?')
+
+ # dissect the part after the directory name into a script name &
+ # a possible additional path, to be stored in PATH_INFO.
+ i = rest.find('/')
+ if i >= 0:
+ script, rest = rest[:i], rest[i:]
+ else:
+ script, rest = rest, ''
+
+ scriptname = dir + '/' + script
+ scriptfile = self.translate_path(scriptname)
+ if not os.path.exists(scriptfile):
+ self.send_error(
+ HTTPStatus.NOT_FOUND,
+ "No such CGI script (%r)" % scriptname)
+ return
+ if not os.path.isfile(scriptfile):
+ self.send_error(
+ HTTPStatus.FORBIDDEN,
+ "CGI script is not a plain file (%r)" % scriptname)
+ return
+ ispy = self.is_python(scriptname)
+ if self.have_fork or not ispy:
+ if not self.is_executable(scriptfile):
+ self.send_error(
+ HTTPStatus.FORBIDDEN,
+ "CGI script is not executable (%r)" % scriptname)
+ return
+
+ # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
+ # XXX Much of the following could be prepared ahead of time!
+ env = copy.deepcopy(os.environ)
+ env['SERVER_SOFTWARE'] = self.version_string()
+ env['SERVER_NAME'] = self.server.server_name
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+ env['SERVER_PROTOCOL'] = self.protocol_version
+ env['SERVER_PORT'] = str(self.server.server_port)
+ env['REQUEST_METHOD'] = self.command
+ uqrest = urllib.parse.unquote(rest)
+ env['PATH_INFO'] = uqrest
+ env['PATH_TRANSLATED'] = self.translate_path(uqrest)
+ env['SCRIPT_NAME'] = scriptname
+ if query:
+ env['QUERY_STRING'] = query
+ env['REMOTE_ADDR'] = self.client_address[0]
+ authorization = self.headers.get("authorization")
+ if authorization:
+ authorization = authorization.split()
+ if len(authorization) == 2:
+ import base64, binascii
+ env['AUTH_TYPE'] = authorization[0]
+ if authorization[0].lower() == "basic":
+ try:
+ authorization = authorization[1].encode('ascii')
+ authorization = base64.decodebytes(authorization).\
+ decode('ascii')
+ except (binascii.Error, UnicodeError):
+ pass
+ else:
+ authorization = authorization.split(':')
+ if len(authorization) == 2:
+ env['REMOTE_USER'] = authorization[0]
+ # XXX REMOTE_IDENT
+ if self.headers.get('content-type') is None:
+ env['CONTENT_TYPE'] = self.headers.get_content_type()
+ else:
+ env['CONTENT_TYPE'] = self.headers['content-type']
+ length = self.headers.get('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ referer = self.headers.get('referer')
+ if referer:
+ env['HTTP_REFERER'] = referer
+ accept = []
+ for line in self.headers.getallmatchingheaders('accept'):
+ if line[:1] in "\t\n\r ":
+ accept.append(line.strip())
+ else:
+ accept = accept + line[7:].split(',')
+ env['HTTP_ACCEPT'] = ','.join(accept)
+ ua = self.headers.get('user-agent')
+ if ua:
+ env['HTTP_USER_AGENT'] = ua
+ co = filter(None, self.headers.get_all('cookie', []))
+ cookie_str = ', '.join(co)
+ if cookie_str:
+ env['HTTP_COOKIE'] = cookie_str
+ # XXX Other HTTP_* headers
+ # Since we're setting the env in the parent, provide empty
+ # values to override previously set values
+ for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
+ 'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
+ env.setdefault(k, "")
+
+ self.send_response(HTTPStatus.OK, "Script output follows")
+ self.flush_headers()
+
+ decoded_query = query.replace('+', ' ')
+
+ if self.have_fork:
+ # Unix -- fork as we should
+ args = [script]
+ if '=' not in decoded_query:
+ args.append(decoded_query)
+ nobody = nobody_uid()
+ self.wfile.flush() # Always flush before forking
+ pid = os.fork()
+ if pid != 0:
+ # Parent
+ pid, sts = os.waitpid(pid, 0)
+ # throw away additional data [see bug #427345]
+ while select.select([self.rfile], [], [], 0)[0]:
+ if not self.rfile.read(1):
+ break
+ if sts:
+ self.log_error("CGI script exit status %#x", sts)
+ return
+ # Child
+ try:
+ try:
+ os.setuid(nobody)
+ except OSError:
+ pass
+ os.dup2(self.rfile.fileno(), 0)
+ os.dup2(self.wfile.fileno(), 1)
+ os.execve(scriptfile, args, env)
+ except:
+ self.server.handle_error(self.request, self.client_address)
+ os._exit(127)
+
+ else:
+ # Non-Unix -- use subprocess
+ cmdline = [scriptfile]
+ if self.is_python(scriptfile):
+ interp = sys.executable
+ if interp.lower().endswith("w.exe"):
+ # On Windows, use python.exe, not pythonw.exe
+ interp = interp[:-5] + interp[-4:]
+ cmdline = [interp, '-u'] + cmdline
+ if '=' not in query:
+ cmdline.append(query)
+ self.log_message("command: %s", subprocess.list2cmdline(cmdline))
+ try:
+ nbytes = int(length)
+ except (TypeError, ValueError):
+ nbytes = 0
+ p = subprocess.Popen(cmdline,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env = env
+ )
+ if self.command.lower() == "post" and nbytes > 0:
+ data = self.rfile.read(nbytes)
+ else:
+ data = None
+ # throw away additional data [see bug #427345]
+ while select.select([self.rfile._sock], [], [], 0)[0]:
+ if not self.rfile._sock.recv(1):
+ break
+ stdout, stderr = p.communicate(data)
+ self.wfile.write(stdout)
+ if stderr:
+ self.log_error('%s', stderr)
+ p.stderr.close()
+ p.stdout.close()
+ status = p.returncode
+ if status:
+ self.log_error("CGI script exit status %#x", status)
+ else:
+ self.log_message("CGI script exited OK")
+
+
+def test(HandlerClass=BaseHTTPRequestHandler,
+ ServerClass=HTTPServer, protocol="HTTP/1.0", port=8000, bind=""):
+ """Test the HTTP request handler class.
+
+ This runs an HTTP server on port 8000 (or the port argument).
+
+ """
+ server_address = (bind, port)
+
+ HandlerClass.protocol_version = protocol
+ with ServerClass(server_address, HandlerClass) as httpd:
+ sa = httpd.socket.getsockname()
+ serve_message = "Serving HTTP on {host} port {port} (http://{host}:{port}/) ..."
+ print(serve_message.format(host=sa[0], port=sa[1]))
+ try:
+ httpd.serve_forever()
+ except KeyboardInterrupt:
+ print("\nKeyboard interrupt received, exiting.")
+ sys.exit(0)
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cgi', action='store_true',
+ help='Run as CGI Server')
+ parser.add_argument('--bind', '-b', default='', metavar='ADDRESS',
+ help='Specify alternate bind address '
+ '[default: all interfaces]')
+ parser.add_argument('port', action='store',
+ default=8000, type=int,
+ nargs='?',
+ help='Specify alternate port [default: 8000]')
+ args = parser.parse_args()
+ if args.cgi:
+ handler_class = CGIHTTPRequestHandler
+ else:
+ handler_class = SimpleHTTPRequestHandler
+ test(HandlerClass=handler_class, port=args.port, bind=args.bind)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/httplib.py b/tapdown/lib/python3.11/site-packages/eventlet/green/httplib.py
new file mode 100644
index 0000000..f67dbfe
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/httplib.py
@@ -0,0 +1,18 @@
+from eventlet import patcher
+from eventlet.green import socket
+
+to_patch = [('socket', socket)]
+
+try:
+ from eventlet.green import ssl
+ to_patch.append(('ssl', ssl))
+except ImportError:
+ pass
+
+from eventlet.green.http import client
+for name in dir(client):
+ if name not in patcher.__exclude:
+ globals()[name] = getattr(client, name)
+
+if __name__ == '__main__':
+ test()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/os.py b/tapdown/lib/python3.11/site-packages/eventlet/green/os.py
new file mode 100644
index 0000000..5942f36
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/os.py
@@ -0,0 +1,133 @@
+os_orig = __import__("os")
+import errno
+socket = __import__("socket")
+from stat import S_ISREG
+
+from eventlet import greenio
+from eventlet.support import get_errno
+from eventlet import greenthread
+from eventlet import hubs
+from eventlet.patcher import slurp_properties
+
+__all__ = os_orig.__all__
+__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
+
+slurp_properties(
+ os_orig,
+ globals(),
+ ignore=__patched__,
+ srckeys=dir(os_orig))
+
+
+def fdopen(fd, *args, **kw):
+ """fdopen(fd [, mode='r' [, bufsize]]) -> file_object
+
+ Return an open file object connected to a file descriptor."""
+ if not isinstance(fd, int):
+ raise TypeError('fd should be int, not %r' % fd)
+ try:
+ return greenio.GreenPipe(fd, *args, **kw)
+ except OSError as e:
+ raise OSError(*e.args)
+
+
+__original_read__ = os_orig.read
+
+
+def read(fd, n):
+ """read(fd, buffersize) -> string
+
+ Read a file descriptor."""
+ while True:
+ # don't wait to read for regular files
+ # select/poll will always return True while epoll will simply crash
+ st_mode = os_orig.stat(fd).st_mode
+ if not S_ISREG(st_mode):
+ try:
+ hubs.trampoline(fd, read=True)
+ except hubs.IOClosed:
+ return ''
+
+ try:
+ return __original_read__(fd, n)
+ except OSError as e:
+ if get_errno(e) == errno.EPIPE:
+ return ''
+ if get_errno(e) != errno.EAGAIN:
+ raise
+
+
+__original_write__ = os_orig.write
+
+
+def write(fd, st):
+ """write(fd, string) -> byteswritten
+
+ Write a string to a file descriptor.
+ """
+ while True:
+ # don't wait to write for regular files
+ # select/poll will always return True while epoll will simply crash
+ st_mode = os_orig.stat(fd).st_mode
+ if not S_ISREG(st_mode):
+ try:
+ hubs.trampoline(fd, write=True)
+ except hubs.IOClosed:
+ return 0
+
+ try:
+ return __original_write__(fd, st)
+ except OSError as e:
+ if get_errno(e) not in [errno.EAGAIN, errno.EPIPE]:
+ raise
+
+
+def wait():
+ """wait() -> (pid, status)
+
+ Wait for completion of a child process."""
+ return waitpid(0, 0)
+
+
+__original_waitpid__ = os_orig.waitpid
+
+
+def waitpid(pid, options):
+ """waitpid(...)
+ waitpid(pid, options) -> (pid, status)
+
+ Wait for completion of a given child process."""
+ if options & os_orig.WNOHANG != 0:
+ return __original_waitpid__(pid, options)
+ else:
+ new_options = options | os_orig.WNOHANG
+ while True:
+ rpid, status = __original_waitpid__(pid, new_options)
+ if rpid and status >= 0:
+ return rpid, status
+ greenthread.sleep(0.01)
+
+
+__original_open__ = os_orig.open
+
+
+def open(file, flags, mode=0o777, dir_fd=None):
+ """ Wrap os.open
+ This behaves identically, but collaborates with
+ the hub's notify_opened protocol.
+ """
+ # pathlib workaround #534 pathlib._NormalAccessor wraps `open` in
+ # `staticmethod` for py < 3.7 but not 3.7. That means we get here with
+ # `file` being a pathlib._NormalAccessor object, and the other arguments
+ # shifted. Fortunately pathlib doesn't use the `dir_fd` argument, so we
+ # have space in the parameter list. We use some heuristics to detect this
+ # and adjust the parameters (without importing pathlib)
+ if type(file).__name__ == '_NormalAccessor':
+ file, flags, mode, dir_fd = flags, mode, dir_fd, None
+
+ if dir_fd is not None:
+ fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
+ else:
+ fd = __original_open__(file, flags, mode)
+ hubs.notify_opened(fd)
+ return fd
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/profile.py b/tapdown/lib/python3.11/site-packages/eventlet/green/profile.py
new file mode 100644
index 0000000..a03b507
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/profile.py
@@ -0,0 +1,257 @@
+# Copyright (c) 2010, CCP Games
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+# * Neither the name of CCP Games nor the
+# names of its contributors may be used to endorse or promote products
+# derived from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""This module is API-equivalent to the standard library :mod:`profile` module
+lbut it is greenthread-aware as well as thread-aware. Use this module
+to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
+FIXME: No testcases for this module.
+"""
+
+profile_orig = __import__('profile')
+__all__ = profile_orig.__all__
+
+from eventlet.patcher import slurp_properties
+slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
+
+import sys
+import functools
+
+from eventlet import greenthread
+from eventlet import patcher
+import _thread
+
+thread = patcher.original(_thread.__name__) # non-monkeypatched module needed
+
+
+# This class provides the start() and stop() functions
+class Profile(profile_orig.Profile):
+ base = profile_orig.Profile
+
+ def __init__(self, timer=None, bias=None):
+ self.current_tasklet = greenthread.getcurrent()
+ self.thread_id = thread.get_ident()
+ self.base.__init__(self, timer, bias)
+ self.sleeping = {}
+
+ def __call__(self, *args):
+ """make callable, allowing an instance to be the profiler"""
+ self.dispatcher(*args)
+
+ def _setup(self):
+ self._has_setup = True
+ self.cur = None
+ self.timings = {}
+ self.current_tasklet = greenthread.getcurrent()
+ self.thread_id = thread.get_ident()
+ self.simulate_call("profiler")
+
+ def start(self, name="start"):
+ if getattr(self, "running", False):
+ return
+ self._setup()
+ self.simulate_call("start")
+ self.running = True
+ sys.setprofile(self.dispatcher)
+
+ def stop(self):
+ sys.setprofile(None)
+ self.running = False
+ self.TallyTimings()
+
+ # special cases for the original run commands, makin sure to
+ # clear the timer context.
+ def runctx(self, cmd, globals, locals):
+ if not getattr(self, "_has_setup", False):
+ self._setup()
+ try:
+ return profile_orig.Profile.runctx(self, cmd, globals, locals)
+ finally:
+ self.TallyTimings()
+
+ def runcall(self, func, *args, **kw):
+ if not getattr(self, "_has_setup", False):
+ self._setup()
+ try:
+ return profile_orig.Profile.runcall(self, func, *args, **kw)
+ finally:
+ self.TallyTimings()
+
+ def trace_dispatch_return_extend_back(self, frame, t):
+ """A hack function to override error checking in parent class. It
+ allows invalid returns (where frames weren't preveiously entered into
+ the profiler) which can happen for all the tasklets that suddenly start
+ to get monitored. This means that the time will eventually be attributed
+ to a call high in the chain, when there is a tasklet switch
+ """
+ if isinstance(self.cur[-2], Profile.fake_frame):
+ return False
+ self.trace_dispatch_call(frame, 0)
+ return self.trace_dispatch_return(frame, t)
+
+ def trace_dispatch_c_return_extend_back(self, frame, t):
+ # same for c return
+ if isinstance(self.cur[-2], Profile.fake_frame):
+ return False # ignore bogus returns
+ self.trace_dispatch_c_call(frame, 0)
+ return self.trace_dispatch_return(frame, t)
+
+ def SwitchTasklet(self, t0, t1, t):
+ # tally the time spent in the old tasklet
+ pt, it, et, fn, frame, rcur = self.cur
+ cur = (pt, it + t, et, fn, frame, rcur)
+
+ # we are switching to a new tasklet, store the old
+ self.sleeping[t0] = cur, self.timings
+ self.current_tasklet = t1
+
+ # find the new one
+ try:
+ self.cur, self.timings = self.sleeping.pop(t1)
+ except KeyError:
+ self.cur, self.timings = None, {}
+ self.simulate_call("profiler")
+ self.simulate_call("new_tasklet")
+
+ def TallyTimings(self):
+ oldtimings = self.sleeping
+ self.sleeping = {}
+
+ # first, unwind the main "cur"
+ self.cur = self.Unwind(self.cur, self.timings)
+
+ # we must keep the timings dicts separate for each tasklet, since it contains
+ # the 'ns' item, recursion count of each function in that tasklet. This is
+ # used in the Unwind dude.
+ for tasklet, (cur, timings) in oldtimings.items():
+ self.Unwind(cur, timings)
+
+ for k, v in timings.items():
+ if k not in self.timings:
+ self.timings[k] = v
+ else:
+ # accumulate all to the self.timings
+ cc, ns, tt, ct, callers = self.timings[k]
+ # ns should be 0 after unwinding
+ cc += v[0]
+ tt += v[2]
+ ct += v[3]
+ for k1, v1 in v[4].items():
+ callers[k1] = callers.get(k1, 0) + v1
+ self.timings[k] = cc, ns, tt, ct, callers
+
+ def Unwind(self, cur, timings):
+ "A function to unwind a 'cur' frame and tally the results"
+ "see profile.trace_dispatch_return() for details"
+ # also see simulate_cmd_complete()
+ while(cur[-1]):
+ rpt, rit, ret, rfn, frame, rcur = cur
+ frame_total = rit + ret
+
+ if rfn in timings:
+ cc, ns, tt, ct, callers = timings[rfn]
+ else:
+ cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
+
+ if not ns:
+ ct = ct + frame_total
+ cc = cc + 1
+
+ if rcur:
+ ppt, pit, pet, pfn, pframe, pcur = rcur
+ else:
+ pfn = None
+
+ if pfn in callers:
+ callers[pfn] = callers[pfn] + 1 # hack: gather more
+ elif pfn:
+ callers[pfn] = 1
+
+ timings[rfn] = cc, ns - 1, tt + rit, ct, callers
+
+ ppt, pit, pet, pfn, pframe, pcur = rcur
+ rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
+ cur = rcur
+ return cur
+
+
+def ContextWrap(f):
+ @functools.wraps(f)
+ def ContextWrapper(self, arg, t):
+ current = greenthread.getcurrent()
+ if current != self.current_tasklet:
+ self.SwitchTasklet(self.current_tasklet, current, t)
+ t = 0.0 # the time was billed to the previous tasklet
+ return f(self, arg, t)
+ return ContextWrapper
+
+
+# Add "return safety" to the dispatchers
+Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
+ 'return': Profile.trace_dispatch_return_extend_back,
+ 'c_return': Profile.trace_dispatch_c_return_extend_back,
+})
+# Add automatic tasklet detection to the callbacks.
+Profile.dispatch = {k: ContextWrap(v) for k, v in Profile.dispatch.items()}
+
+
+# run statements shamelessly stolen from profile.py
+def run(statement, filename=None, sort=-1):
+ """Run statement under profiler optionally saving results in filename
+
+ This function takes a single argument that can be passed to the
+ "exec" statement, and an optional file name. In all cases this
+ routine attempts to "exec" its first argument and gather profiling
+ statistics from the execution. If no file name is present, then this
+ function automatically prints a simple profiling report, sorted by the
+ standard name string (file/line/function-name) that is presented in
+ each line.
+ """
+ prof = Profile()
+ try:
+ prof = prof.run(statement)
+ except SystemExit:
+ pass
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ return prof.print_stats(sort)
+
+
+def runctx(statement, globals, locals, filename=None):
+ """Run statement under profiler, supplying your own globals and locals,
+ optionally saving results in filename.
+
+ statement and filename have the same semantics as profile.run
+ """
+ prof = Profile()
+ try:
+ prof = prof.runctx(statement, globals, locals)
+ except SystemExit:
+ pass
+
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ return prof.print_stats()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/select.py b/tapdown/lib/python3.11/site-packages/eventlet/green/select.py
new file mode 100644
index 0000000..a87d10d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/select.py
@@ -0,0 +1,86 @@
+import eventlet
+from eventlet.hubs import get_hub
+__select = eventlet.patcher.original('select')
+error = __select.error
+
+
+__patched__ = ['select']
+__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
+
+
+def get_fileno(obj):
+ # The purpose of this function is to exactly replicate
+ # the behavior of the select module when confronted with
+ # abnormal filenos; the details are extensively tested in
+ # the stdlib test/test_select.py.
+ try:
+ f = obj.fileno
+ except AttributeError:
+ if not isinstance(obj, int):
+ raise TypeError("Expected int or long, got %s" % type(obj))
+ return obj
+ else:
+ rv = f()
+ if not isinstance(rv, int):
+ raise TypeError("Expected int or long, got %s" % type(rv))
+ return rv
+
+
+def select(read_list, write_list, error_list, timeout=None):
+ # error checking like this is required by the stdlib unit tests
+ if timeout is not None:
+ try:
+ timeout = float(timeout)
+ except ValueError:
+ raise TypeError("Expected number for timeout")
+ hub = get_hub()
+ timers = []
+ current = eventlet.getcurrent()
+ if hub.greenlet is current:
+ raise RuntimeError('do not call blocking functions from the mainloop')
+ ds = {}
+ for r in read_list:
+ ds[get_fileno(r)] = {'read': r}
+ for w in write_list:
+ ds.setdefault(get_fileno(w), {})['write'] = w
+ for e in error_list:
+ ds.setdefault(get_fileno(e), {})['error'] = e
+
+ listeners = []
+
+ def on_read(d):
+ original = ds[get_fileno(d)]['read']
+ current.switch(([original], [], []))
+
+ def on_write(d):
+ original = ds[get_fileno(d)]['write']
+ current.switch(([], [original], []))
+
+ def on_timeout2():
+ current.switch(([], [], []))
+
+ def on_timeout():
+ # ensure that BaseHub.run() has a chance to call self.wait()
+ # at least once before timed out. otherwise the following code
+ # can time out erroneously.
+ #
+ # s1, s2 = socket.socketpair()
+ # print(select.select([], [s1], [], 0))
+ timers.append(hub.schedule_call_global(0, on_timeout2))
+
+ if timeout is not None:
+ timers.append(hub.schedule_call_global(timeout, on_timeout))
+ try:
+ for k, v in ds.items():
+ if v.get('read'):
+ listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
+ if v.get('write'):
+ listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
+ try:
+ return hub.switch()
+ finally:
+ for l in listeners:
+ hub.remove(l)
+ finally:
+ for t in timers:
+ t.cancel()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/selectors.py b/tapdown/lib/python3.11/site-packages/eventlet/green/selectors.py
new file mode 100644
index 0000000..81fc862
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/selectors.py
@@ -0,0 +1,34 @@
+import sys
+
+from eventlet import patcher
+from eventlet.green import select
+
+__patched__ = [
+ 'DefaultSelector',
+ 'SelectSelector',
+]
+
+# We only have green select so the options are:
+# * leave it be and have selectors that block
+# * try to pretend the "bad" selectors don't exist
+# * replace all with SelectSelector for the price of possibly different
+# performance characteristic and missing fileno() method (if someone
+# uses it it'll result in a crash, we may want to implement it in the future)
+#
+# This module used to follow the third approach but just removing the offending
+# selectors is less error prone and less confusing approach.
+__deleted__ = [
+ 'PollSelector',
+ 'EpollSelector',
+ 'DevpollSelector',
+ 'KqueueSelector',
+]
+
+patcher.inject('selectors', globals(), ('select', select))
+
+del patcher
+
+if sys.platform != 'win32':
+ SelectSelector._select = staticmethod(select.select)
+
+DefaultSelector = SelectSelector
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/socket.py b/tapdown/lib/python3.11/site-packages/eventlet/green/socket.py
new file mode 100644
index 0000000..6a39caf
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/socket.py
@@ -0,0 +1,63 @@
+import os
+import sys
+
+__import__('eventlet.green._socket_nodns')
+__socket = sys.modules['eventlet.green._socket_nodns']
+
+__all__ = __socket.__all__
+__patched__ = __socket.__patched__ + [
+ 'create_connection',
+ 'getaddrinfo',
+ 'gethostbyname',
+ 'gethostbyname_ex',
+ 'getnameinfo',
+]
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__socket, globals(), srckeys=dir(__socket))
+
+
+if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
+ from eventlet.support import greendns
+ gethostbyname = greendns.gethostbyname
+ getaddrinfo = greendns.getaddrinfo
+ gethostbyname_ex = greendns.gethostbyname_ex
+ getnameinfo = greendns.getnameinfo
+ del greendns
+
+
+def create_connection(address,
+ timeout=_GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used.
+ """
+
+ err = "getaddrinfo returns an empty list"
+ host, port = address
+ for res in getaddrinfo(host, port, 0, SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket(af, socktype, proto)
+ if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+
+ if not isinstance(err, error):
+ err = error(err)
+ raise err
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/ssl.py b/tapdown/lib/python3.11/site-packages/eventlet/green/ssl.py
new file mode 100644
index 0000000..7ceb3c7
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/ssl.py
@@ -0,0 +1,487 @@
+__ssl = __import__('ssl')
+
+from eventlet.patcher import slurp_properties
+slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
+
+import sys
+from eventlet import greenio, hubs
+from eventlet.greenio import (
+ GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
+)
+from eventlet.hubs import trampoline, IOClosed
+from eventlet.support import get_errno, PY33
+from contextlib import contextmanager
+
+orig_socket = __import__('socket')
+socket = orig_socket.socket
+timeout_exc = orig_socket.timeout
+
+__patched__ = [
+ 'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
+ 'create_default_context', '_create_default_https_context']
+
+_original_sslsocket = __ssl.SSLSocket
+_original_sslcontext = __ssl.SSLContext
+_is_py_3_7 = sys.version_info[:2] == (3, 7)
+_original_wrap_socket = __ssl.SSLContext.wrap_socket
+
+
+@contextmanager
+def _original_ssl_context(*args, **kwargs):
+ tmp_sslcontext = _original_wrap_socket.__globals__.get('SSLContext', None)
+ tmp_sslsocket = _original_sslsocket._create.__globals__.get('SSLSocket', None)
+ _original_sslsocket._create.__globals__['SSLSocket'] = _original_sslsocket
+ _original_wrap_socket.__globals__['SSLContext'] = _original_sslcontext
+ try:
+ yield
+ finally:
+ _original_wrap_socket.__globals__['SSLContext'] = tmp_sslcontext
+ _original_sslsocket._create.__globals__['SSLSocket'] = tmp_sslsocket
+
+
+class GreenSSLSocket(_original_sslsocket):
+ """ This is a green version of the SSLSocket class from the ssl module added
+ in 2.6. For documentation on it, please see the Python standard
+ documentation.
+
+ Python nonblocking ssl objects don't give errors when the other end
+ of the socket is closed (they do notice when the other end is shutdown,
+ though). Any write/read operations will simply hang if the socket is
+ closed from the other end. There is no obvious fix for this problem;
+ it appears to be a limitation of Python's ssl object implementation.
+ A workaround is to set a reasonable timeout on the socket using
+ settimeout(), and to close/reopen the connection when a timeout
+ occurs at an unexpected juncture in the code.
+ """
+ def __new__(cls, sock=None, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=CERT_NONE,
+ ssl_version=PROTOCOL_TLS, ca_certs=None,
+ do_handshake_on_connect=True, *args, **kw):
+ if not isinstance(sock, GreenSocket):
+ sock = GreenSocket(sock)
+ with _original_ssl_context():
+ context = kw.get('_context')
+ if context:
+ ret = _original_sslsocket._create(
+ sock=sock.fd,
+ server_side=server_side,
+ do_handshake_on_connect=False,
+ suppress_ragged_eofs=kw.get('suppress_ragged_eofs', True),
+ server_hostname=kw.get('server_hostname'),
+ context=context,
+ session=kw.get('session'),
+ )
+ else:
+ ret = cls._wrap_socket(
+ sock=sock.fd,
+ keyfile=keyfile,
+ certfile=certfile,
+ server_side=server_side,
+ cert_reqs=cert_reqs,
+ ssl_version=ssl_version,
+ ca_certs=ca_certs,
+ do_handshake_on_connect=False,
+ ciphers=kw.get('ciphers'),
+ )
+ ret.keyfile = keyfile
+ ret.certfile = certfile
+ ret.cert_reqs = cert_reqs
+ ret.ssl_version = ssl_version
+ ret.ca_certs = ca_certs
+ ret.__class__ = GreenSSLSocket
+ return ret
+
+ @staticmethod
+ def _wrap_socket(sock, keyfile, certfile, server_side, cert_reqs,
+ ssl_version, ca_certs, do_handshake_on_connect, ciphers):
+ context = _original_sslcontext(protocol=ssl_version)
+ context.options |= cert_reqs
+ if certfile or keyfile:
+ context.load_cert_chain(
+ certfile=certfile,
+ keyfile=keyfile,
+ )
+ if ca_certs:
+ context.load_verify_locations(ca_certs)
+ if ciphers:
+ context.set_ciphers(ciphers)
+ return context.wrap_socket(
+ sock=sock,
+ server_side=server_side,
+ do_handshake_on_connect=do_handshake_on_connect,
+ )
+
+ # we are inheriting from SSLSocket because its constructor calls
+ # do_handshake whose behavior we wish to override
+ def __init__(self, sock, keyfile=None, certfile=None,
+ server_side=False, cert_reqs=CERT_NONE,
+ ssl_version=PROTOCOL_TLS, ca_certs=None,
+ do_handshake_on_connect=True, *args, **kw):
+ if not isinstance(sock, GreenSocket):
+ sock = GreenSocket(sock)
+ self.act_non_blocking = sock.act_non_blocking
+
+ # the superclass initializer trashes the methods so we remove
+ # the local-object versions of them and let the actual class
+ # methods shine through
+ # Note: This for Python 2
+ try:
+ for fn in orig_socket._delegate_methods:
+ delattr(self, fn)
+ except AttributeError:
+ pass
+
+ # Python 3 SSLSocket construction process overwrites the timeout so restore it
+ self._timeout = sock.gettimeout()
+
+ # it also sets timeout to None internally apparently (tested with 3.4.2)
+ _original_sslsocket.settimeout(self, 0.0)
+ assert _original_sslsocket.gettimeout(self) == 0.0
+
+ # see note above about handshaking
+ self.do_handshake_on_connect = do_handshake_on_connect
+ if do_handshake_on_connect and self._connected:
+ self.do_handshake()
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setblocking(self, flag):
+ if flag:
+ self.act_non_blocking = False
+ self._timeout = None
+ else:
+ self.act_non_blocking = True
+ self._timeout = 0.0
+
+ def _call_trampolining(self, func, *a, **kw):
+ if self.act_non_blocking:
+ return func(*a, **kw)
+ else:
+ while True:
+ try:
+ return func(*a, **kw)
+ except SSLError as exc:
+ if get_errno(exc) == SSL_ERROR_WANT_READ:
+ trampoline(self,
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
+ trampoline(self,
+ write=True,
+ timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ elif _is_py_3_7 and "unexpected eof" in exc.args[1]:
+ # For reasons I don't understand on 3.7 we get [ssl:
+ # KRB5_S_TKT_NYV] unexpected eof while reading]
+ # errors...
+ raise IOClosed
+ else:
+ raise
+
+ def write(self, data):
+ """Write DATA to the underlying SSL channel. Returns
+ number of bytes of DATA actually transmitted."""
+ return self._call_trampolining(
+ super().write, data)
+
+ def read(self, len=1024, buffer=None):
+ """Read up to LEN bytes and return them.
+ Return zero-length string on EOF."""
+ try:
+ return self._call_trampolining(
+ super().read, len, buffer)
+ except IOClosed:
+ if buffer is None:
+ return b''
+ else:
+ return 0
+
+ def send(self, data, flags=0):
+ if self._sslobj:
+ return self._call_trampolining(
+ super().send, data, flags)
+ else:
+ trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+ return socket.send(self, data, flags)
+
+ def sendto(self, data, addr, flags=0):
+ # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+ if self._sslobj:
+ raise ValueError("sendto not allowed on instances of %s" %
+ self.__class__)
+ else:
+ trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+ return socket.sendto(self, data, addr, flags)
+
+ def sendall(self, data, flags=0):
+ # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+ if self._sslobj:
+ if flags != 0:
+ raise ValueError(
+ "non-zero flags not allowed in calls to sendall() on %s" %
+ self.__class__)
+ amount = len(data)
+ count = 0
+ data_to_send = data
+ while (count < amount):
+ v = self.send(data_to_send)
+ count += v
+ if v == 0:
+ trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
+ else:
+ data_to_send = data[count:]
+ return amount
+ else:
+ while True:
+ try:
+ return socket.sendall(self, data, flags)
+ except orig_socket.error as e:
+ if self.act_non_blocking:
+ raise
+ erno = get_errno(e)
+ if erno in greenio.SOCKET_BLOCKING:
+ trampoline(self, write=True,
+ timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+ elif erno in greenio.SOCKET_CLOSED:
+ return ''
+ raise
+
+ def recv(self, buflen=1024, flags=0):
+ return self._base_recv(buflen, flags, into=False)
+
+ def recv_into(self, buffer, nbytes=None, flags=0):
+ # Copied verbatim from CPython
+ if buffer and nbytes is None:
+ nbytes = len(buffer)
+ elif nbytes is None:
+ nbytes = 1024
+ # end of CPython code
+
+ return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
+
+ def _base_recv(self, nbytes, flags, into, buffer_=None):
+ if into:
+ plain_socket_function = socket.recv_into
+ else:
+ plain_socket_function = socket.recv
+
+ # *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
+ if self._sslobj:
+ if flags != 0:
+ raise ValueError(
+ "non-zero flags not allowed in calls to %s() on %s" %
+ plain_socket_function.__name__, self.__class__)
+ if into:
+ read = self.read(nbytes, buffer_)
+ else:
+ read = self.read(nbytes)
+ return read
+ else:
+ while True:
+ try:
+ args = [self, nbytes, flags]
+ if into:
+ args.insert(1, buffer_)
+ return plain_socket_function(*args)
+ except orig_socket.error as e:
+ if self.act_non_blocking:
+ raise
+ erno = get_errno(e)
+ if erno in greenio.SOCKET_BLOCKING:
+ try:
+ trampoline(
+ self, read=True,
+ timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
+ except IOClosed:
+ return b''
+ elif erno in greenio.SOCKET_CLOSED:
+ return b''
+ raise
+
+ def recvfrom(self, addr, buflen=1024, flags=0):
+ if not self.act_non_blocking:
+ trampoline(self, read=True, timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ return super().recvfrom(addr, buflen, flags)
+
+ def recvfrom_into(self, buffer, nbytes=None, flags=0):
+ if not self.act_non_blocking:
+ trampoline(self, read=True, timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+ return super().recvfrom_into(buffer, nbytes, flags)
+
+ def unwrap(self):
+ return GreenSocket(self._call_trampolining(
+ super().unwrap))
+
+ def do_handshake(self):
+ """Perform a TLS/SSL handshake."""
+ return self._call_trampolining(
+ super().do_handshake)
+
+ def _socket_connect(self, addr):
+ real_connect = socket.connect
+ if self.act_non_blocking:
+ return real_connect(self, addr)
+ else:
+ clock = hubs.get_hub().clock
+ # *NOTE: gross, copied code from greenio because it's not factored
+ # well enough to reuse
+ if self.gettimeout() is None:
+ while True:
+ try:
+ return real_connect(self, addr)
+ except orig_socket.error as exc:
+ if get_errno(exc) in CONNECT_ERR:
+ trampoline(self, write=True)
+ elif get_errno(exc) in CONNECT_SUCCESS:
+ return
+ else:
+ raise
+ else:
+ end = clock() + self.gettimeout()
+ while True:
+ try:
+ real_connect(self, addr)
+ except orig_socket.error as exc:
+ if get_errno(exc) in CONNECT_ERR:
+ trampoline(
+ self, write=True,
+ timeout=end - clock(), timeout_exc=timeout_exc('timed out'))
+ elif get_errno(exc) in CONNECT_SUCCESS:
+ return
+ else:
+ raise
+ if clock() >= end:
+ raise timeout_exc('timed out')
+
+ def connect(self, addr):
+ """Connects to remote ADDR, and then wraps the connection in
+ an SSL channel."""
+ # *NOTE: grrrrr copied this code from ssl.py because of the reference
+ # to socket.connect which we don't want to call directly
+ if self._sslobj:
+ raise ValueError("attempt to connect already-connected SSLSocket!")
+ self._socket_connect(addr)
+ server_side = False
+ try:
+ sslwrap = _ssl.sslwrap
+ except AttributeError:
+ # sslwrap was removed in 3.x and later in 2.7.9
+ context = self.context if PY33 else self._context
+ sslobj = context._wrap_socket(self, server_side, server_hostname=self.server_hostname)
+ else:
+ sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
+ self.cert_reqs, self.ssl_version,
+ self.ca_certs, *self.ciphers)
+
+ try:
+ # This is added in Python 3.5, http://bugs.python.org/issue21965
+ SSLObject
+ except NameError:
+ self._sslobj = sslobj
+ else:
+ self._sslobj = sslobj
+
+ if self.do_handshake_on_connect:
+ self.do_handshake()
+
+ def accept(self):
+ """Accepts a new connection from a remote client, and returns
+ a tuple containing that new connection wrapped with a server-side
+ SSL channel, and the address of the remote client."""
+ # RDW grr duplication of code from greenio
+ if self.act_non_blocking:
+ newsock, addr = socket.accept(self)
+ else:
+ while True:
+ try:
+ newsock, addr = socket.accept(self)
+ break
+ except orig_socket.error as e:
+ if get_errno(e) not in greenio.SOCKET_BLOCKING:
+ raise
+ trampoline(self, read=True, timeout=self.gettimeout(),
+ timeout_exc=timeout_exc('timed out'))
+
+ new_ssl = type(self)(
+ newsock,
+ server_side=True,
+ do_handshake_on_connect=False,
+ suppress_ragged_eofs=self.suppress_ragged_eofs,
+ _context=self._context,
+ )
+ return (new_ssl, addr)
+
+ def dup(self):
+ raise NotImplementedError("Can't dup an ssl object")
+
+
+SSLSocket = GreenSSLSocket
+
+
+def wrap_socket(sock, *a, **kw):
+ return GreenSSLSocket(sock, *a, **kw)
+
+
+class GreenSSLContext(_original_sslcontext):
+ __slots__ = ()
+
+ def wrap_socket(self, sock, *a, **kw):
+ return GreenSSLSocket(sock, *a, _context=self, **kw)
+
+ # https://github.com/eventlet/eventlet/issues/371
+ # Thanks to Gevent developers for sharing patch to this problem.
+ if hasattr(_original_sslcontext.options, 'setter'):
+ # In 3.6, these became properties. They want to access the
+ # property __set__ method in the superclass, and they do so by using
+ # super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
+ # patch, which causes infinite recursion.
+ # https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
+ @_original_sslcontext.options.setter
+ def options(self, value):
+ super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
+
+ @_original_sslcontext.verify_flags.setter
+ def verify_flags(self, value):
+ super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
+
+ @_original_sslcontext.verify_mode.setter
+ def verify_mode(self, value):
+ super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
+
+ if hasattr(_original_sslcontext, "maximum_version"):
+ @_original_sslcontext.maximum_version.setter
+ def maximum_version(self, value):
+ super(_original_sslcontext, _original_sslcontext).maximum_version.__set__(self, value)
+
+ if hasattr(_original_sslcontext, "minimum_version"):
+ @_original_sslcontext.minimum_version.setter
+ def minimum_version(self, value):
+ super(_original_sslcontext, _original_sslcontext).minimum_version.__set__(self, value)
+
+
+SSLContext = GreenSSLContext
+
+
+# TODO: ssl.create_default_context() was added in 2.7.9.
+# Not clear we're still trying to support Python versions even older than that.
+if hasattr(__ssl, 'create_default_context'):
+ _original_create_default_context = __ssl.create_default_context
+
+ def green_create_default_context(*a, **kw):
+ # We can't just monkey-patch on the green version of `wrap_socket`
+ # on to SSLContext instances, but SSLContext.create_default_context
+ # does a bunch of work. Rather than re-implementing it all, just
+ # switch out the __class__ to get our `wrap_socket` implementation
+ context = _original_create_default_context(*a, **kw)
+ context.__class__ = GreenSSLContext
+ return context
+
+ create_default_context = green_create_default_context
+ _create_default_https_context = green_create_default_context
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/subprocess.py b/tapdown/lib/python3.11/site-packages/eventlet/green/subprocess.py
new file mode 100644
index 0000000..4509208
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/subprocess.py
@@ -0,0 +1,137 @@
+import errno
+import sys
+from types import FunctionType
+
+import eventlet
+from eventlet import greenio
+from eventlet import patcher
+from eventlet.green import select, threading, time
+
+
+__patched__ = ['call', 'check_call', 'Popen']
+to_patch = [('select', select), ('threading', threading), ('time', time)]
+
+from eventlet.green import selectors
+to_patch.append(('selectors', selectors))
+
+patcher.inject('subprocess', globals(), *to_patch)
+subprocess_orig = patcher.original("subprocess")
+subprocess_imported = sys.modules.get('subprocess', subprocess_orig)
+mswindows = sys.platform == "win32"
+
+
+if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
+ # Backported from Python 3.3.
+ # https://bitbucket.org/eventlet/eventlet/issue/89
+ class TimeoutExpired(Exception):
+ """This exception is raised when the timeout expires while waiting for
+ a child process.
+ """
+
+ def __init__(self, cmd, timeout, output=None):
+ self.cmd = cmd
+ self.timeout = timeout
+ self.output = output
+
+ def __str__(self):
+ return ("Command '%s' timed out after %s seconds" %
+ (self.cmd, self.timeout))
+else:
+ TimeoutExpired = subprocess_imported.TimeoutExpired
+
+
+# This is the meat of this module, the green version of Popen.
+class Popen(subprocess_orig.Popen):
+ """eventlet-friendly version of subprocess.Popen"""
+ # We do not believe that Windows pipes support non-blocking I/O. At least,
+ # the Python file objects stored on our base-class object have no
+ # setblocking() method, and the Python fcntl module doesn't exist on
+ # Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
+ # this __init__() override is to wrap the pipes for eventlet-friendly
+ # non-blocking I/O, don't even bother overriding it on Windows.
+ if not mswindows:
+ def __init__(self, args, bufsize=0, *argss, **kwds):
+ self.args = args
+ # Forward the call to base-class constructor
+ subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
+ # Now wrap the pipes, if any. This logic is loosely borrowed from
+ # eventlet.processes.Process.run() method.
+ for attr in "stdin", "stdout", "stderr":
+ pipe = getattr(self, attr)
+ if pipe is not None and type(pipe) != greenio.GreenPipe:
+ # https://github.com/eventlet/eventlet/issues/243
+ # AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
+ mode = getattr(pipe, 'mode', '')
+ if not mode:
+ if pipe.readable():
+ mode += 'r'
+ if pipe.writable():
+ mode += 'w'
+ # ValueError: can't have unbuffered text I/O
+ if bufsize == 0:
+ bufsize = -1
+ wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
+ setattr(self, attr, wrapped_pipe)
+ __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
+
+ def wait(self, timeout=None, check_interval=0.01):
+ # Instead of a blocking OS call, this version of wait() uses logic
+ # borrowed from the eventlet 0.2 processes.Process.wait() method.
+ if timeout is not None:
+ endtime = time.time() + timeout
+ try:
+ while True:
+ status = self.poll()
+ if status is not None:
+ return status
+ if timeout is not None and time.time() > endtime:
+ raise TimeoutExpired(self.args, timeout)
+ eventlet.sleep(check_interval)
+ except OSError as e:
+ if e.errno == errno.ECHILD:
+ # no child process, this happens if the child process
+ # already died and has been cleaned up
+ return -1
+ else:
+ raise
+ wait.__doc__ = subprocess_orig.Popen.wait.__doc__
+
+ if not mswindows:
+ # don't want to rewrite the original _communicate() method, we
+ # just want a version that uses eventlet.green.select.select()
+ # instead of select.select().
+ _communicate = FunctionType(
+ subprocess_orig.Popen._communicate.__code__,
+ globals())
+ try:
+ _communicate_with_select = FunctionType(
+ subprocess_orig.Popen._communicate_with_select.__code__,
+ globals())
+ _communicate_with_poll = FunctionType(
+ subprocess_orig.Popen._communicate_with_poll.__code__,
+ globals())
+ except AttributeError:
+ pass
+
+
+# Borrow subprocess.call() and check_call(), but patch them so they reference
+# OUR Popen class rather than subprocess.Popen.
+def patched_function(function):
+ new_function = FunctionType(function.__code__, globals())
+ new_function.__kwdefaults__ = function.__kwdefaults__
+ new_function.__defaults__ = function.__defaults__
+ return new_function
+
+
+call = patched_function(subprocess_orig.call)
+check_call = patched_function(subprocess_orig.check_call)
+# check_output is Python 2.7+
+if hasattr(subprocess_orig, 'check_output'):
+ __patched__.append('check_output')
+ check_output = patched_function(subprocess_orig.check_output)
+del patched_function
+
+# Keep exceptions identity.
+# https://github.com/eventlet/eventlet/issues/413
+CalledProcessError = subprocess_imported.CalledProcessError
+del subprocess_imported
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/thread.py b/tapdown/lib/python3.11/site-packages/eventlet/green/thread.py
new file mode 100644
index 0000000..224cd1c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/thread.py
@@ -0,0 +1,178 @@
+"""Implements the standard thread module, using greenthreads."""
+import _thread as __thread
+from eventlet.support import greenlets as greenlet
+from eventlet import greenthread
+from eventlet.timeout import with_timeout
+from eventlet.lock import Lock
+import sys
+
+
+__patched__ = ['Lock', 'LockType', '_ThreadHandle', '_count',
+ '_get_main_thread_ident', '_local', '_make_thread_handle',
+ 'allocate', 'allocate_lock', 'exit', 'get_ident',
+ 'interrupt_main', 'stack_size', 'start_joinable_thread',
+ 'start_new', 'start_new_thread']
+
+error = __thread.error
+LockType = Lock
+__threadcount = 0
+
+if hasattr(__thread, "_is_main_interpreter"):
+ _is_main_interpreter = __thread._is_main_interpreter
+
+
+def _set_sentinel():
+ # TODO this is a dummy code, reimplementing this may be needed:
+ # https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
+ return allocate_lock()
+
+
+TIMEOUT_MAX = __thread.TIMEOUT_MAX
+
+
+def _count():
+ return __threadcount
+
+
+def get_ident(gr=None):
+ if gr is None:
+ return id(greenlet.getcurrent())
+ else:
+ return id(gr)
+
+
+def __thread_body(func, args, kwargs):
+ global __threadcount
+ __threadcount += 1
+ try:
+ func(*args, **kwargs)
+ finally:
+ __threadcount -= 1
+
+
+class _ThreadHandle:
+ def __init__(self, greenthread=None):
+ self._greenthread = greenthread
+ self._done = False
+
+ def _set_done(self):
+ self._done = True
+
+ def is_done(self):
+ if self._greenthread is not None:
+ return self._greenthread.dead
+ return self._done
+
+ @property
+ def ident(self):
+ return get_ident(self._greenthread)
+
+ def join(self, timeout=None):
+ if not hasattr(self._greenthread, "wait"):
+ return
+ if timeout is not None:
+ return with_timeout(timeout, self._greenthread.wait)
+ return self._greenthread.wait()
+
+
+def _make_thread_handle(ident):
+ greenthread = greenlet.getcurrent()
+ assert ident == get_ident(greenthread)
+ return _ThreadHandle(greenthread=greenthread)
+
+
+def __spawn_green(function, args=(), kwargs=None, joinable=False):
+ if ((3, 4) <= sys.version_info < (3, 13)
+ and getattr(function, '__module__', '') == 'threading'
+ and hasattr(function, '__self__')):
+ # In Python 3.4-3.12, threading.Thread uses an internal lock
+ # automatically released when the python thread state is deleted.
+ # With monkey patching, eventlet uses green threads without python
+ # thread state, so the lock is not automatically released.
+ #
+ # Wrap _bootstrap_inner() to release explicitly the thread state lock
+ # when the thread completes.
+ thread = function.__self__
+ bootstrap_inner = thread._bootstrap_inner
+
+ def wrap_bootstrap_inner():
+ try:
+ bootstrap_inner()
+ finally:
+ # The lock can be cleared (ex: by a fork())
+ if getattr(thread, "_tstate_lock", None) is not None:
+ thread._tstate_lock.release()
+
+ thread._bootstrap_inner = wrap_bootstrap_inner
+
+ kwargs = kwargs or {}
+ spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
+ return spawn_func(__thread_body, function, args, kwargs)
+
+
+def start_joinable_thread(function, handle=None, daemon=True):
+ g = __spawn_green(function, joinable=True)
+ if handle is None:
+ handle = _ThreadHandle(greenthread=g)
+ else:
+ handle._greenthread = g
+ return handle
+
+
+def start_new_thread(function, args=(), kwargs=None):
+ g = __spawn_green(function, args=args, kwargs=kwargs)
+ return get_ident(g)
+
+
+start_new = start_new_thread
+
+
+def _get_main_thread_ident():
+ greenthread = greenlet.getcurrent()
+ while greenthread.parent is not None:
+ greenthread = greenthread.parent
+ return get_ident(greenthread)
+
+
+def allocate_lock(*a):
+ return LockType(1)
+
+
+allocate = allocate_lock
+
+
+def exit():
+ raise greenlet.GreenletExit
+
+
+exit_thread = __thread.exit_thread
+
+
+def interrupt_main():
+ curr = greenlet.getcurrent()
+ if curr.parent and not curr.parent.dead:
+ curr.parent.throw(KeyboardInterrupt())
+ else:
+ raise KeyboardInterrupt()
+
+
+if hasattr(__thread, 'stack_size'):
+ __original_stack_size__ = __thread.stack_size
+
+ def stack_size(size=None):
+ if size is None:
+ return __original_stack_size__()
+ if size > __original_stack_size__():
+ return __original_stack_size__(size)
+ else:
+ pass
+ # not going to decrease stack_size, because otherwise other greenlets in
+ # this thread will suffer
+
+from eventlet.corolocal import local as _local
+
+if hasattr(__thread, 'daemon_threads_allowed'):
+ daemon_threads_allowed = __thread.daemon_threads_allowed
+
+if hasattr(__thread, '_shutdown'):
+ _shutdown = __thread._shutdown
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/threading.py b/tapdown/lib/python3.11/site-packages/eventlet/green/threading.py
new file mode 100644
index 0000000..ae01a5b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/threading.py
@@ -0,0 +1,133 @@
+"""Implements the standard threading module, using greenthreads."""
+import eventlet
+from eventlet.green import thread
+from eventlet.green import time
+from eventlet.support import greenlets as greenlet
+
+__patched__ = ['Lock', '_allocate_lock', '_get_main_thread_ident',
+ '_make_thread_handle', '_shutdown', '_sleep',
+ '_start_joinable_thread', '_start_new_thread', '_ThreadHandle',
+ 'currentThread', 'current_thread', 'local', 'stack_size',
+ "_active", "_limbo"]
+
+__patched__ += ['get_ident', '_set_sentinel']
+
+__orig_threading = eventlet.patcher.original('threading')
+__threadlocal = __orig_threading.local()
+__patched_enumerate = None
+
+
+eventlet.patcher.inject(
+ 'threading',
+ globals(),
+ ('_thread', thread),
+ ('time', time))
+
+
+_count = 1
+
+
+class _GreenThread:
+ """Wrapper for GreenThread objects to provide Thread-like attributes
+ and methods"""
+
+ def __init__(self, g):
+ global _count
+ self._g = g
+ self._name = 'GreenThread-%d' % _count
+ _count += 1
+
+ def __repr__(self):
+ return '<_GreenThread(%s, %r)>' % (self._name, self._g)
+
+ def join(self, timeout=None):
+ return self._g.wait()
+
+ def getName(self):
+ return self._name
+ get_name = getName
+
+ def setName(self, name):
+ self._name = str(name)
+ set_name = setName
+
+ name = property(getName, setName)
+
+ ident = property(lambda self: id(self._g))
+
+ def isAlive(self):
+ return True
+ is_alive = isAlive
+
+ daemon = property(lambda self: True)
+
+ def isDaemon(self):
+ return self.daemon
+ is_daemon = isDaemon
+
+
+__threading = None
+
+
+def _fixup_thread(t):
+ # Some third-party packages (lockfile) will try to patch the
+ # threading.Thread class with a get_name attribute if it doesn't
+ # exist. Since we might return Thread objects from the original
+ # threading package that won't get patched, let's make sure each
+ # individual object gets patched too our patched threading.Thread
+ # class has been patched. This is why monkey patching can be bad...
+ global __threading
+ if not __threading:
+ __threading = __import__('threading')
+
+ if (hasattr(__threading.Thread, 'get_name') and
+ not hasattr(t, 'get_name')):
+ t.get_name = t.getName
+ return t
+
+
+def current_thread():
+ global __patched_enumerate
+ g = greenlet.getcurrent()
+ if not g:
+ # Not currently in a greenthread, fall back to standard function
+ return _fixup_thread(__orig_threading.current_thread())
+
+ try:
+ active = __threadlocal.active
+ except AttributeError:
+ active = __threadlocal.active = {}
+
+ g_id = id(g)
+ t = active.get(g_id)
+ if t is not None:
+ return t
+
+ # FIXME: move import from function body to top
+ # (jaketesler@github) Furthermore, I was unable to have the current_thread() return correct results from
+ # threading.enumerate() unless the enumerate() function was a) imported at runtime using the gross __import__() call
+ # and b) was hot-patched using patch_function().
+ # https://github.com/eventlet/eventlet/issues/172#issuecomment-379421165
+ if __patched_enumerate is None:
+ __patched_enumerate = eventlet.patcher.patch_function(__import__('threading').enumerate)
+ found = [th for th in __patched_enumerate() if th.ident == g_id]
+ if found:
+ return found[0]
+
+ # Add green thread to active if we can clean it up on exit
+ def cleanup(g):
+ del active[g_id]
+ try:
+ g.link(cleanup)
+ except AttributeError:
+ # Not a GreenThread type, so there's no way to hook into
+ # the green thread exiting. Fall back to the standard
+ # function then.
+ t = _fixup_thread(__orig_threading.current_thread())
+ else:
+ t = active[g_id] = _GreenThread(g)
+
+ return t
+
+
+currentThread = current_thread
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/time.py b/tapdown/lib/python3.11/site-packages/eventlet/green/time.py
new file mode 100644
index 0000000..0fbe30e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/time.py
@@ -0,0 +1,6 @@
+__time = __import__('time')
+from eventlet.patcher import slurp_properties
+__patched__ = ['sleep']
+slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
+from eventlet.greenthread import sleep
+sleep # silence pyflakes
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/__init__.py
new file mode 100644
index 0000000..44335dd
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/__init__.py
@@ -0,0 +1,5 @@
+from eventlet import patcher
+from eventlet.green import socket
+from eventlet.green import time
+from eventlet.green import httplib
+from eventlet.green import ftplib
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/error.py b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/error.py
new file mode 100644
index 0000000..6913813
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/error.py
@@ -0,0 +1,4 @@
+from eventlet import patcher
+from eventlet.green.urllib import response
+patcher.inject('urllib.error', globals(), ('urllib.response', response))
+del patcher
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/parse.py b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/parse.py
new file mode 100644
index 0000000..f3a8924
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/parse.py
@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.parse', globals())
+del patcher
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/request.py b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/request.py
new file mode 100644
index 0000000..43c198e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/request.py
@@ -0,0 +1,57 @@
+import sys
+
+from eventlet import patcher
+from eventlet.green import ftplib, http, os, socket, time
+from eventlet.green.http import client as http_client
+from eventlet.green.urllib import error, parse, response
+
+# TODO should we also have green email version?
+# import email
+
+
+to_patch = [
+ # This (http module) is needed here, otherwise test__greenness hangs
+ # forever on Python 3 because parts of non-green http (including
+ # http.client) leak into our patched urllib.request. There may be a nicer
+ # way to handle this (I didn't dig too deep) but this does the job. Jakub
+ ('http', http),
+
+ ('http.client', http_client),
+ ('os', os),
+ ('socket', socket),
+ ('time', time),
+ ('urllib.error', error),
+ ('urllib.parse', parse),
+ ('urllib.response', response),
+]
+
+try:
+ from eventlet.green import ssl
+except ImportError:
+ pass
+else:
+ to_patch.append(('ssl', ssl))
+
+patcher.inject('urllib.request', globals(), *to_patch)
+del to_patch
+
+to_patch_in_functions = [('ftplib', ftplib)]
+del ftplib
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
+
+if sys.version_info < (3, 14):
+ URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
+else:
+ # Removed in python3.14+, nothing to do
+ pass
+
+ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
+
+ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
+ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
+
+del error
+del parse
+del response
+del to_patch_in_functions
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/response.py b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/response.py
new file mode 100644
index 0000000..f9aaba5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib/response.py
@@ -0,0 +1,3 @@
+from eventlet import patcher
+patcher.inject('urllib.response', globals())
+del patcher
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/urllib2.py b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib2.py
new file mode 100644
index 0000000..c53ecbb
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/urllib2.py
@@ -0,0 +1,20 @@
+from eventlet import patcher
+from eventlet.green import ftplib
+from eventlet.green import httplib
+from eventlet.green import socket
+from eventlet.green import ssl
+from eventlet.green import time
+from eventlet.green import urllib
+
+patcher.inject(
+ 'urllib2',
+ globals(),
+ ('httplib', httplib),
+ ('socket', socket),
+ ('ssl', ssl),
+ ('time', time),
+ ('urllib', urllib))
+
+FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
+
+del patcher
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/green/zmq.py b/tapdown/lib/python3.11/site-packages/eventlet/green/zmq.py
new file mode 100644
index 0000000..865ee13
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/green/zmq.py
@@ -0,0 +1,465 @@
+"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
+found in :mod:`pyzmq ` to be non blocking.
+"""
+__zmq__ = __import__('zmq')
+import eventlet.hubs
+from eventlet.patcher import slurp_properties
+from eventlet.support import greenlets as greenlet
+
+__patched__ = ['Context', 'Socket']
+slurp_properties(__zmq__, globals(), ignore=__patched__)
+
+from collections import deque
+
+try:
+ # alias XREQ/XREP to DEALER/ROUTER if available
+ if not hasattr(__zmq__, 'XREQ'):
+ XREQ = DEALER
+ if not hasattr(__zmq__, 'XREP'):
+ XREP = ROUTER
+except NameError:
+ pass
+
+
+class LockReleaseError(Exception):
+ pass
+
+
+class _QueueLock:
+ """A Lock that can be acquired by at most one thread. Any other
+ thread calling acquire will be blocked in a queue. When release
+ is called, the threads are awoken in the order they blocked,
+ one at a time. This lock can be required recursively by the same
+ thread."""
+
+ def __init__(self):
+ self._waiters = deque()
+ self._count = 0
+ self._holder = None
+ self._hub = eventlet.hubs.get_hub()
+
+ def __nonzero__(self):
+ return bool(self._count)
+
+ __bool__ = __nonzero__
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, type, value, traceback):
+ self.release()
+
+ def acquire(self):
+ current = greenlet.getcurrent()
+ if (self._waiters or self._count > 0) and self._holder is not current:
+ # block until lock is free
+ self._waiters.append(current)
+ self._hub.switch()
+ w = self._waiters.popleft()
+
+ assert w is current, 'Waiting threads woken out of order'
+ assert self._count == 0, 'After waking a thread, the lock must be unacquired'
+
+ self._holder = current
+ self._count += 1
+
+ def release(self):
+ if self._count <= 0:
+ raise LockReleaseError("Cannot release unacquired lock")
+
+ self._count -= 1
+ if self._count == 0:
+ self._holder = None
+ if self._waiters:
+ # wake next
+ self._hub.schedule_call_global(0, self._waiters[0].switch)
+
+
+class _BlockedThread:
+ """Is either empty, or represents a single blocked thread that
+ blocked itself by calling the block() method. The thread can be
+ awoken by calling wake(). Wake() can be called multiple times and
+ all but the first call will have no effect."""
+
+ def __init__(self):
+ self._blocked_thread = None
+ self._wakeupper = None
+ self._hub = eventlet.hubs.get_hub()
+
+ def __nonzero__(self):
+ return self._blocked_thread is not None
+
+ __bool__ = __nonzero__
+
+ def block(self, deadline=None):
+ if self._blocked_thread is not None:
+ raise Exception("Cannot block more than one thread on one BlockedThread")
+ self._blocked_thread = greenlet.getcurrent()
+
+ if deadline is not None:
+ self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
+
+ try:
+ self._hub.switch()
+ finally:
+ self._blocked_thread = None
+ # cleanup the wakeup task
+ if self._wakeupper is not None:
+ # Important to cancel the wakeup task so it doesn't
+ # spuriously wake this greenthread later on.
+ self._wakeupper.cancel()
+ self._wakeupper = None
+
+ def wake(self):
+ """Schedules the blocked thread to be awoken and return
+ True. If wake has already been called or if there is no
+ blocked thread, then this call has no effect and returns
+ False."""
+ if self._blocked_thread is not None and self._wakeupper is None:
+ self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
+ return True
+ return False
+
+
+class Context(__zmq__.Context):
+ """Subclass of :class:`zmq.Context`
+ """
+
+ def socket(self, socket_type):
+ """Overridden method to ensure that the green version of socket is used
+
+ Behaves the same as :meth:`zmq.Context.socket`, but ensures
+ that a :class:`Socket` with all of its send and recv methods set to be
+ non-blocking is returned
+ """
+ if self.closed:
+ raise ZMQError(ENOTSUP)
+ return Socket(self, socket_type)
+
+
+def _wraps(source_fn):
+ """A decorator that copies the __name__ and __doc__ from the given
+ function
+ """
+ def wrapper(dest_fn):
+ dest_fn.__name__ = source_fn.__name__
+ dest_fn.__doc__ = source_fn.__doc__
+ return dest_fn
+ return wrapper
+
+
+# Implementation notes: Each socket in 0mq contains a pipe that the
+# background IO threads use to communicate with the socket. These
+# events are important because they tell the socket when it is able to
+# send and when it has messages waiting to be received. The read end
+# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
+#
+# Events are read from the socket's event pipe only on the thread that
+# the 0mq context is associated with, which is the native thread the
+# greenthreads are running on, and the only operations that cause the
+# events to be read and processed are send(), recv() and
+# getsockopt(zmq.EVENTS). This means that after doing any of these
+# three operations, the ability of the socket to send or receive a
+# message without blocking may have changed, but after the events are
+# read the FD is no longer readable so the hub may not signal our
+# listener.
+#
+# If we understand that after calling send() a message might be ready
+# to be received and that after calling recv() a message might be able
+# to be sent, what should we do next? There are two approaches:
+#
+# 1. Always wake the other thread if there is one waiting. This
+# wakeup may be spurious because the socket might not actually be
+# ready for a send() or recv(). However, if a thread is in a
+# tight-loop successfully calling send() or recv() then the wakeups
+# are naturally batched and there's very little cost added to each
+# send/recv call.
+#
+# or
+#
+# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
+# thread should be woken up. This avoids spurious wake-ups but may
+# add overhead because getsockopt will cause all events to be
+# processed, whereas send and recv throttle processing
+# events. Admittedly, all of the events will need to be processed
+# eventually, but it is likely faster to batch the processing.
+#
+# Which approach is better? I have no idea.
+#
+# TODO:
+# - Support MessageTrackers and make MessageTracker.wait green
+
+_Socket = __zmq__.Socket
+_Socket_recv = _Socket.recv
+_Socket_send = _Socket.send
+_Socket_send_multipart = _Socket.send_multipart
+_Socket_recv_multipart = _Socket.recv_multipart
+_Socket_send_string = _Socket.send_string
+_Socket_recv_string = _Socket.recv_string
+_Socket_send_pyobj = _Socket.send_pyobj
+_Socket_recv_pyobj = _Socket.recv_pyobj
+_Socket_send_json = _Socket.send_json
+_Socket_recv_json = _Socket.recv_json
+_Socket_getsockopt = _Socket.getsockopt
+
+
+class Socket(_Socket):
+ """Green version of :class:``zmq.core.socket.Socket``.
+
+ The following three methods are always overridden:
+ * send
+ * recv
+ * getsockopt
+ To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
+ is deferred to the hub (using :func:``eventlet.hubs.trampoline``) if a
+ ``zmq.EAGAIN`` (retry) error is raised.
+
+ For some socket types, the following methods are also overridden:
+ * send_multipart
+ * recv_multipart
+ """
+
+ def __init__(self, context, socket_type):
+ super().__init__(context, socket_type)
+
+ self.__dict__['_eventlet_send_event'] = _BlockedThread()
+ self.__dict__['_eventlet_recv_event'] = _BlockedThread()
+ self.__dict__['_eventlet_send_lock'] = _QueueLock()
+ self.__dict__['_eventlet_recv_lock'] = _QueueLock()
+
+ def event(fd):
+ # Some events arrived at the zmq socket. This may mean
+ # there's a message that can be read or there's space for
+ # a message to be written.
+ send_wake = self._eventlet_send_event.wake()
+ recv_wake = self._eventlet_recv_event.wake()
+ if not send_wake and not recv_wake:
+ # if no waiting send or recv thread was woken up, then
+ # force the zmq socket's events to be processed to
+ # avoid repeated wakeups
+ _Socket_getsockopt(self, EVENTS)
+
+ hub = eventlet.hubs.get_hub()
+ self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
+ self.getsockopt(FD),
+ event,
+ lambda _: None,
+ lambda: None)
+ self.__dict__['_eventlet_clock'] = hub.clock
+
+ @_wraps(_Socket.close)
+ def close(self, linger=None):
+ super().close(linger)
+ if self._eventlet_listener is not None:
+ eventlet.hubs.get_hub().remove(self._eventlet_listener)
+ self.__dict__['_eventlet_listener'] = None
+ # wake any blocked threads
+ self._eventlet_send_event.wake()
+ self._eventlet_recv_event.wake()
+
+ @_wraps(_Socket.getsockopt)
+ def getsockopt(self, option):
+ result = _Socket_getsockopt(self, option)
+ if option == EVENTS:
+ # Getting the events causes the zmq socket to process
+ # events which may mean a msg can be sent or received. If
+ # there is a greenthread blocked and waiting for events,
+ # it will miss the edge-triggered read event, so wake it
+ # up.
+ if (result & POLLOUT):
+ self._eventlet_send_event.wake()
+ if (result & POLLIN):
+ self._eventlet_recv_event.wake()
+ return result
+
+ @_wraps(_Socket.send)
+ def send(self, msg, flags=0, copy=True, track=False):
+ """A send method that's safe to use when multiple greenthreads
+ are calling send, send_multipart, recv and recv_multipart on
+ the same socket.
+ """
+ if flags & NOBLOCK:
+ result = _Socket_send(self, msg, flags, copy, track)
+ # Instead of calling both wake methods, could call
+ # self.getsockopt(EVENTS) which would trigger wakeups if
+ # needed.
+ self._eventlet_send_event.wake()
+ self._eventlet_recv_event.wake()
+ return result
+
+ # TODO: pyzmq will copy the message buffer and create Message
+ # objects under some circumstances. We could do that work here
+ # once to avoid doing it every time the send is retried.
+ flags |= NOBLOCK
+ with self._eventlet_send_lock:
+ while True:
+ try:
+ return _Socket_send(self, msg, flags, copy, track)
+ except ZMQError as e:
+ if e.errno == EAGAIN:
+ self._eventlet_send_event.block()
+ else:
+ raise
+ finally:
+ # The call to send processes 0mq events and may
+ # make the socket ready to recv. Wake the next
+ # receiver. (Could check EVENTS for POLLIN here)
+ self._eventlet_recv_event.wake()
+
+ @_wraps(_Socket.send_multipart)
+ def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
+ """A send_multipart method that's safe to use when multiple
+ greenthreads are calling send, send_multipart, recv and
+ recv_multipart on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_multipart(self, msg_parts, flags, copy, track)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_multipart(self, msg_parts, flags, copy, track)
+
+ @_wraps(_Socket.send_string)
+ def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
+ """A send_string method that's safe to use when multiple
+ greenthreads are calling send, send_string, recv and
+ recv_string on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_string(self, u, flags, copy, encoding)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_string(self, u, flags, copy, encoding)
+
+ @_wraps(_Socket.send_pyobj)
+ def send_pyobj(self, obj, flags=0, protocol=2):
+ """A send_pyobj method that's safe to use when multiple
+ greenthreads are calling send, send_pyobj, recv and
+ recv_pyobj on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_pyobj(self, obj, flags, protocol)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_pyobj(self, obj, flags, protocol)
+
+ @_wraps(_Socket.send_json)
+ def send_json(self, obj, flags=0, **kwargs):
+ """A send_json method that's safe to use when multiple
+ greenthreads are calling send, send_json, recv and
+ recv_json on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_send_json(self, obj, flags, **kwargs)
+
+ # acquire lock here so the subsequent calls to send for the
+ # message parts after the first don't block
+ with self._eventlet_send_lock:
+ return _Socket_send_json(self, obj, flags, **kwargs)
+
+ @_wraps(_Socket.recv)
+ def recv(self, flags=0, copy=True, track=False):
+ """A recv method that's safe to use when multiple greenthreads
+ are calling send, send_multipart, recv and recv_multipart on
+ the same socket.
+ """
+ if flags & NOBLOCK:
+ msg = _Socket_recv(self, flags, copy, track)
+ # Instead of calling both wake methods, could call
+ # self.getsockopt(EVENTS) which would trigger wakeups if
+ # needed.
+ self._eventlet_send_event.wake()
+ self._eventlet_recv_event.wake()
+ return msg
+
+ deadline = None
+ if hasattr(__zmq__, 'RCVTIMEO'):
+ sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
+ if sock_timeout == -1:
+ pass
+ elif sock_timeout > 0:
+ deadline = self._eventlet_clock() + sock_timeout / 1000.0
+ else:
+ raise ValueError(sock_timeout)
+
+ flags |= NOBLOCK
+ with self._eventlet_recv_lock:
+ while True:
+ try:
+ return _Socket_recv(self, flags, copy, track)
+ except ZMQError as e:
+ if e.errno == EAGAIN:
+ # zmq in its wisdom decided to reuse EAGAIN for timeouts
+ if deadline is not None and self._eventlet_clock() > deadline:
+ e.is_timeout = True
+ raise
+
+ self._eventlet_recv_event.block(deadline=deadline)
+ else:
+ raise
+ finally:
+ # The call to recv processes 0mq events and may
+ # make the socket ready to send. Wake the next
+ # receiver. (Could check EVENTS for POLLOUT here)
+ self._eventlet_send_event.wake()
+
+ @_wraps(_Socket.recv_multipart)
+ def recv_multipart(self, flags=0, copy=True, track=False):
+ """A recv_multipart method that's safe to use when multiple
+ greenthreads are calling send, send_multipart, recv and
+ recv_multipart on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_multipart(self, flags, copy, track)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_multipart(self, flags, copy, track)
+
+ @_wraps(_Socket.recv_string)
+ def recv_string(self, flags=0, encoding='utf-8'):
+ """A recv_string method that's safe to use when multiple
+ greenthreads are calling send, send_string, recv and
+ recv_string on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_string(self, flags, encoding)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_string(self, flags, encoding)
+
+ @_wraps(_Socket.recv_json)
+ def recv_json(self, flags=0, **kwargs):
+ """A recv_json method that's safe to use when multiple
+ greenthreads are calling send, send_json, recv and
+ recv_json on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_json(self, flags, **kwargs)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_json(self, flags, **kwargs)
+
+ @_wraps(_Socket.recv_pyobj)
+ def recv_pyobj(self, flags=0):
+ """A recv_pyobj method that's safe to use when multiple
+ greenthreads are calling send, send_pyobj, recv and
+ recv_pyobj on the same socket.
+ """
+ if flags & NOBLOCK:
+ return _Socket_recv_pyobj(self, flags)
+
+ # acquire lock here so the subsequent calls to recv for the
+ # message parts after the first don't block
+ with self._eventlet_recv_lock:
+ return _Socket_recv_pyobj(self, flags)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/greenio/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/greenio/__init__.py
new file mode 100644
index 0000000..513c4a5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/greenio/__init__.py
@@ -0,0 +1,3 @@
+from eventlet.greenio.base import * # noqa
+
+from eventlet.greenio.py3 import * # noqa
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/greenio/base.py b/tapdown/lib/python3.11/site-packages/eventlet/greenio/base.py
new file mode 100644
index 0000000..3bb7d02
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/greenio/base.py
@@ -0,0 +1,485 @@
+import errno
+import os
+import socket
+import sys
+import time
+import warnings
+
+import eventlet
+from eventlet.hubs import trampoline, notify_opened, IOClosed
+from eventlet.support import get_errno
+
+__all__ = [
+ 'GreenSocket', '_GLOBAL_DEFAULT_TIMEOUT', 'set_nonblocking',
+ 'SOCKET_BLOCKING', 'SOCKET_CLOSED', 'CONNECT_ERR', 'CONNECT_SUCCESS',
+ 'shutdown_safe', 'SSL',
+ 'socket_timeout',
+]
+
+BUFFER_SIZE = 4096
+CONNECT_ERR = {errno.EINPROGRESS, errno.EALREADY, errno.EWOULDBLOCK}
+CONNECT_SUCCESS = {0, errno.EISCONN}
+if sys.platform[:3] == "win":
+ CONNECT_ERR.add(errno.WSAEINVAL) # Bug 67
+
+_original_socket = eventlet.patcher.original('socket').socket
+
+
+if sys.version_info >= (3, 10):
+ socket_timeout = socket.timeout # Really, TimeoutError
+else:
+ socket_timeout = eventlet.timeout.wrap_is_timeout(socket.timeout)
+
+
+def socket_connect(descriptor, address):
+ """
+ Attempts to connect to the address, returns the descriptor if it succeeds,
+ returns None if it needs to trampoline, and raises any exceptions.
+ """
+ err = descriptor.connect_ex(address)
+ if err in CONNECT_ERR:
+ return None
+ if err not in CONNECT_SUCCESS:
+ raise OSError(err, errno.errorcode[err])
+ return descriptor
+
+
+def socket_checkerr(descriptor):
+ err = descriptor.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
+ if err not in CONNECT_SUCCESS:
+ raise OSError(err, errno.errorcode[err])
+
+
+def socket_accept(descriptor):
+ """
+ Attempts to accept() on the descriptor, returns a client,address tuple
+ if it succeeds; returns None if it needs to trampoline, and raises
+ any exceptions.
+ """
+ try:
+ return descriptor.accept()
+ except OSError as e:
+ if get_errno(e) == errno.EWOULDBLOCK:
+ return None
+ raise
+
+
+if sys.platform[:3] == "win":
+ # winsock sometimes throws ENOTCONN
+ SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK}
+ SOCKET_CLOSED = {errno.ECONNRESET, errno.ENOTCONN, errno.ESHUTDOWN}
+else:
+ # oddly, on linux/darwin, an unconnected socket is expected to block,
+ # so we treat ENOTCONN the same as EWOULDBLOCK
+ SOCKET_BLOCKING = {errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOTCONN}
+ SOCKET_CLOSED = {errno.ECONNRESET, errno.ESHUTDOWN, errno.EPIPE}
+
+
+def set_nonblocking(fd):
+ """
+ Sets the descriptor to be nonblocking. Works on many file-like
+ objects as well as sockets. Only sockets can be nonblocking on
+ Windows, however.
+ """
+ try:
+ setblocking = fd.setblocking
+ except AttributeError:
+ # fd has no setblocking() method. It could be that this version of
+ # Python predates socket.setblocking(). In that case, we can still set
+ # the flag "by hand" on the underlying OS fileno using the fcntl
+ # module.
+ try:
+ import fcntl
+ except ImportError:
+ # Whoops, Windows has no fcntl module. This might not be a socket
+ # at all, but rather a file-like object with no setblocking()
+ # method. In particular, on Windows, pipes don't support
+ # non-blocking I/O and therefore don't have that method. Which
+ # means fcntl wouldn't help even if we could load it.
+ raise NotImplementedError("set_nonblocking() on a file object "
+ "with no setblocking() method "
+ "(Windows pipes don't support non-blocking I/O)")
+ # We managed to import fcntl.
+ fileno = fd.fileno()
+ orig_flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
+ new_flags = orig_flags | os.O_NONBLOCK
+ if new_flags != orig_flags:
+ fcntl.fcntl(fileno, fcntl.F_SETFL, new_flags)
+ else:
+ # socket supports setblocking()
+ setblocking(0)
+
+
+try:
+ from socket import _GLOBAL_DEFAULT_TIMEOUT
+except ImportError:
+ _GLOBAL_DEFAULT_TIMEOUT = object()
+
+
+class GreenSocket:
+ """
+ Green version of socket.socket class, that is intended to be 100%
+ API-compatible.
+
+ It also recognizes the keyword parameter, 'set_nonblocking=True'.
+ Pass False to indicate that socket is already in non-blocking mode
+ to save syscalls.
+ """
+
+ # This placeholder is to prevent __getattr__ from creating an infinite call loop
+ fd = None
+
+ def __init__(self, family=socket.AF_INET, *args, **kwargs):
+ should_set_nonblocking = kwargs.pop('set_nonblocking', True)
+ if isinstance(family, int):
+ fd = _original_socket(family, *args, **kwargs)
+ # Notify the hub that this is a newly-opened socket.
+ notify_opened(fd.fileno())
+ else:
+ fd = family
+
+ # import timeout from other socket, if it was there
+ try:
+ self._timeout = fd.gettimeout() or socket.getdefaulttimeout()
+ except AttributeError:
+ self._timeout = socket.getdefaulttimeout()
+
+ # Filter fd.fileno() != -1 so that won't call set non-blocking on
+ # closed socket
+ if should_set_nonblocking and fd.fileno() != -1:
+ set_nonblocking(fd)
+ self.fd = fd
+ # when client calls setblocking(0) or settimeout(0) the socket must
+ # act non-blocking
+ self.act_non_blocking = False
+
+ # Copy some attributes from underlying real socket.
+ # This is the easiest way that i found to fix
+ # https://bitbucket.org/eventlet/eventlet/issue/136
+ # Only `getsockopt` is required to fix that issue, others
+ # are just premature optimization to save __getattr__ call.
+ self.bind = fd.bind
+ self.close = fd.close
+ self.fileno = fd.fileno
+ self.getsockname = fd.getsockname
+ self.getsockopt = fd.getsockopt
+ self.listen = fd.listen
+ self.setsockopt = fd.setsockopt
+ self.shutdown = fd.shutdown
+ self._closed = False
+
+ @property
+ def _sock(self):
+ return self
+
+ def _get_io_refs(self):
+ return self.fd._io_refs
+
+ def _set_io_refs(self, value):
+ self.fd._io_refs = value
+
+ _io_refs = property(_get_io_refs, _set_io_refs)
+
+ # Forward unknown attributes to fd, cache the value for future use.
+ # I do not see any simple attribute which could be changed
+ # so caching everything in self is fine.
+ # If we find such attributes - only attributes having __get__ might be cached.
+ # For now - I do not want to complicate it.
+ def __getattr__(self, name):
+ if self.fd is None:
+ raise AttributeError(name)
+ attr = getattr(self.fd, name)
+ setattr(self, name, attr)
+ return attr
+
+ def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+ """ We need to trampoline via the event hub.
+ We catch any signal back from the hub indicating that the operation we
+ were waiting on was associated with a filehandle that's since been
+ invalidated.
+ """
+ if self._closed:
+ # If we did any logging, alerting to a second trampoline attempt on a closed
+ # socket here would be useful.
+ raise IOClosed()
+ try:
+ return trampoline(fd, read=read, write=write, timeout=timeout,
+ timeout_exc=timeout_exc,
+ mark_as_closed=self._mark_as_closed)
+ except IOClosed:
+ # This socket's been obsoleted. De-fang it.
+ self._mark_as_closed()
+ raise
+
+ def accept(self):
+ if self.act_non_blocking:
+ res = self.fd.accept()
+ notify_opened(res[0].fileno())
+ return res
+ fd = self.fd
+ _timeout_exc = socket_timeout('timed out')
+ while True:
+ res = socket_accept(fd)
+ if res is not None:
+ client, addr = res
+ notify_opened(client.fileno())
+ set_nonblocking(client)
+ return type(self)(client), addr
+ self._trampoline(fd, read=True, timeout=self.gettimeout(), timeout_exc=_timeout_exc)
+
+ def _mark_as_closed(self):
+ """ Mark this socket as being closed """
+ self._closed = True
+
+ def __del__(self):
+ # This is in case self.close is not assigned yet (currently the constructor does it)
+ close = getattr(self, 'close', None)
+ if close is not None:
+ close()
+
+ def connect(self, address):
+ if self.act_non_blocking:
+ return self.fd.connect(address)
+ fd = self.fd
+ _timeout_exc = socket_timeout('timed out')
+ if self.gettimeout() is None:
+ while not socket_connect(fd, address):
+ try:
+ self._trampoline(fd, write=True)
+ except IOClosed:
+ raise OSError(errno.EBADFD)
+ socket_checkerr(fd)
+ else:
+ end = time.time() + self.gettimeout()
+ while True:
+ if socket_connect(fd, address):
+ return
+ if time.time() >= end:
+ raise _timeout_exc
+ timeout = end - time.time()
+ try:
+ self._trampoline(fd, write=True, timeout=timeout, timeout_exc=_timeout_exc)
+ except IOClosed:
+ # ... we need some workable errno here.
+ raise OSError(errno.EBADFD)
+ socket_checkerr(fd)
+
+ def connect_ex(self, address):
+ if self.act_non_blocking:
+ return self.fd.connect_ex(address)
+ fd = self.fd
+ if self.gettimeout() is None:
+ while not socket_connect(fd, address):
+ try:
+ self._trampoline(fd, write=True)
+ socket_checkerr(fd)
+ except OSError as ex:
+ return get_errno(ex)
+ except IOClosed:
+ return errno.EBADFD
+ return 0
+ else:
+ end = time.time() + self.gettimeout()
+ timeout_exc = socket.timeout(errno.EAGAIN)
+ while True:
+ try:
+ if socket_connect(fd, address):
+ return 0
+ if time.time() >= end:
+ raise timeout_exc
+ self._trampoline(fd, write=True, timeout=end - time.time(),
+ timeout_exc=timeout_exc)
+ socket_checkerr(fd)
+ except OSError as ex:
+ return get_errno(ex)
+ except IOClosed:
+ return errno.EBADFD
+ return 0
+
+ def dup(self, *args, **kw):
+ sock = self.fd.dup(*args, **kw)
+ newsock = type(self)(sock, set_nonblocking=False)
+ newsock.settimeout(self.gettimeout())
+ return newsock
+
+ def makefile(self, *args, **kwargs):
+ return _original_socket.makefile(self, *args, **kwargs)
+
+ def makeGreenFile(self, *args, **kw):
+ warnings.warn("makeGreenFile has been deprecated, please use "
+ "makefile instead", DeprecationWarning, stacklevel=2)
+ return self.makefile(*args, **kw)
+
+ def _read_trampoline(self):
+ self._trampoline(
+ self.fd,
+ read=True,
+ timeout=self.gettimeout(),
+ timeout_exc=socket_timeout('timed out'))
+
+ def _recv_loop(self, recv_meth, empty_val, *args):
+ if self.act_non_blocking:
+ return recv_meth(*args)
+
+ while True:
+ try:
+ # recv: bufsize=0?
+ # recv_into: buffer is empty?
+ # This is needed because behind the scenes we use sockets in
+ # nonblocking mode and builtin recv* methods. Attempting to read
+ # 0 bytes from a nonblocking socket using a builtin recv* method
+ # does not raise a timeout exception. Since we're simulating
+ # a blocking socket here we need to produce a timeout exception
+ # if needed, hence the call to trampoline.
+ if not args[0]:
+ self._read_trampoline()
+ return recv_meth(*args)
+ except OSError as e:
+ if get_errno(e) in SOCKET_BLOCKING:
+ pass
+ elif get_errno(e) in SOCKET_CLOSED:
+ return empty_val
+ else:
+ raise
+
+ try:
+ self._read_trampoline()
+ except IOClosed as e:
+ # Perhaps we should return '' instead?
+ raise EOFError()
+
+ def recv(self, bufsize, flags=0):
+ return self._recv_loop(self.fd.recv, b'', bufsize, flags)
+
+ def recvfrom(self, bufsize, flags=0):
+ return self._recv_loop(self.fd.recvfrom, b'', bufsize, flags)
+
+ def recv_into(self, buffer, nbytes=0, flags=0):
+ return self._recv_loop(self.fd.recv_into, 0, buffer, nbytes, flags)
+
+ def recvfrom_into(self, buffer, nbytes=0, flags=0):
+ return self._recv_loop(self.fd.recvfrom_into, 0, buffer, nbytes, flags)
+
+ def _send_loop(self, send_method, data, *args):
+ if self.act_non_blocking:
+ return send_method(data, *args)
+
+ _timeout_exc = socket_timeout('timed out')
+ while True:
+ try:
+ return send_method(data, *args)
+ except OSError as e:
+ eno = get_errno(e)
+ if eno == errno.ENOTCONN or eno not in SOCKET_BLOCKING:
+ raise
+
+ try:
+ self._trampoline(self.fd, write=True, timeout=self.gettimeout(),
+ timeout_exc=_timeout_exc)
+ except IOClosed:
+ raise OSError(errno.ECONNRESET, 'Connection closed by another thread')
+
+ def send(self, data, flags=0):
+ return self._send_loop(self.fd.send, data, flags)
+
+ def sendto(self, data, *args):
+ return self._send_loop(self.fd.sendto, data, *args)
+
+ def sendall(self, data, flags=0):
+ tail = self.send(data, flags)
+ len_data = len(data)
+ while tail < len_data:
+ tail += self.send(data[tail:], flags)
+
+ def setblocking(self, flag):
+ if flag:
+ self.act_non_blocking = False
+ self._timeout = None
+ else:
+ self.act_non_blocking = True
+ self._timeout = 0.0
+
+ def settimeout(self, howlong):
+ if howlong is None or howlong == _GLOBAL_DEFAULT_TIMEOUT:
+ self.setblocking(True)
+ return
+ try:
+ f = howlong.__float__
+ except AttributeError:
+ raise TypeError('a float is required')
+ howlong = f()
+ if howlong < 0.0:
+ raise ValueError('Timeout value out of range')
+ if howlong == 0.0:
+ self.act_non_blocking = True
+ self._timeout = 0.0
+ else:
+ self.act_non_blocking = False
+ self._timeout = howlong
+
+ def gettimeout(self):
+ return self._timeout
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+def _operation_on_closed_file(*args, **kwargs):
+ raise ValueError("I/O operation on closed file")
+
+
+greenpipe_doc = """
+ GreenPipe is a cooperative replacement for file class.
+ It will cooperate on pipes. It will block on regular file.
+ Differences from file class:
+ - mode is r/w property. Should re r/o
+ - encoding property not implemented
+ - write/writelines will not raise TypeError exception when non-string data is written
+ it will write str(data) instead
+ - Universal new lines are not supported and newlines property not implementeded
+ - file argument can be descriptor, file name or file object.
+ """
+
+# import SSL module here so we can refer to greenio.SSL.exceptionclass
+try:
+ from OpenSSL import SSL
+except ImportError:
+ # pyOpenSSL not installed, define exceptions anyway for convenience
+ class SSL:
+ class WantWriteError(Exception):
+ pass
+
+ class WantReadError(Exception):
+ pass
+
+ class ZeroReturnError(Exception):
+ pass
+
+ class SysCallError(Exception):
+ pass
+
+
+def shutdown_safe(sock):
+ """Shuts down the socket. This is a convenience method for
+ code that wants to gracefully handle regular sockets, SSL.Connection
+ sockets from PyOpenSSL and ssl.SSLSocket objects from Python 2.7 interchangeably.
+ Both types of ssl socket require a shutdown() before close,
+ but they have different arity on their shutdown method.
+
+ Regular sockets don't need a shutdown before close, but it doesn't hurt.
+ """
+ try:
+ try:
+ # socket, ssl.SSLSocket
+ return sock.shutdown(socket.SHUT_RDWR)
+ except TypeError:
+ # SSL.Connection
+ return sock.shutdown()
+ except OSError as e:
+ # we don't care if the socket is already closed;
+ # this will often be the case in an http server context
+ if get_errno(e) not in (errno.ENOTCONN, errno.EBADF, errno.ENOTSOCK):
+ raise
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/greenio/py3.py b/tapdown/lib/python3.11/site-packages/eventlet/greenio/py3.py
new file mode 100644
index 0000000..d3811df
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/greenio/py3.py
@@ -0,0 +1,227 @@
+import _pyio as _original_pyio
+import errno
+import os as _original_os
+import socket as _original_socket
+from io import (
+ BufferedRandom as _OriginalBufferedRandom,
+ BufferedReader as _OriginalBufferedReader,
+ BufferedWriter as _OriginalBufferedWriter,
+ DEFAULT_BUFFER_SIZE,
+ TextIOWrapper as _OriginalTextIOWrapper,
+ IOBase as _OriginalIOBase,
+)
+from types import FunctionType
+
+from eventlet.greenio.base import (
+ _operation_on_closed_file,
+ greenpipe_doc,
+ set_nonblocking,
+ SOCKET_BLOCKING,
+)
+from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
+from eventlet.support import get_errno
+
+__all__ = ['_fileobject', 'GreenPipe']
+
+# TODO get rid of this, it only seems like the original _fileobject
+_fileobject = _original_socket.SocketIO
+
+# Large part of the following code is copied from the original
+# eventlet.greenio module
+
+
+class GreenFileIO(_OriginalIOBase):
+
+ _blksize = 128 * 1024
+
+ def __init__(self, name, mode='r', closefd=True, opener=None):
+ if isinstance(name, int):
+ fileno = name
+ self._name = "" % fileno
+ else:
+ assert isinstance(name, str)
+ with open(name, mode) as fd:
+ self._name = fd.name
+ fileno = _original_os.dup(fd.fileno())
+
+ notify_opened(fileno)
+ self._fileno = fileno
+ self._mode = mode
+ self._closed = False
+ set_nonblocking(self)
+ self._seekable = None
+
+ @property
+ def closed(self):
+ return self._closed
+
+ def seekable(self):
+ if self._seekable is None:
+ try:
+ _original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
+ except OSError as e:
+ if get_errno(e) == errno.ESPIPE:
+ self._seekable = False
+ else:
+ raise
+ else:
+ self._seekable = True
+
+ return self._seekable
+
+ def readable(self):
+ return 'r' in self._mode or '+' in self._mode
+
+ def writable(self):
+ return 'w' in self._mode or '+' in self._mode or 'a' in self._mode
+
+ def fileno(self):
+ return self._fileno
+
+ def read(self, size=-1):
+ if size == -1:
+ return self.readall()
+
+ while True:
+ try:
+ return _original_os.read(self._fileno, size)
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise OSError(*e.args)
+ self._trampoline(self, read=True)
+
+ def readall(self):
+ buf = []
+ while True:
+ try:
+ chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
+ if chunk == b'':
+ return b''.join(buf)
+ buf.append(chunk)
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise OSError(*e.args)
+ self._trampoline(self, read=True)
+
+ def readinto(self, b):
+ up_to = len(b)
+ data = self.read(up_to)
+ bytes_read = len(data)
+ b[:bytes_read] = data
+ return bytes_read
+
+ def isatty(self):
+ try:
+ return _original_os.isatty(self.fileno())
+ except OSError as e:
+ raise OSError(*e.args)
+
+ def _isatty_open_only(self):
+ # Python does an optimization here, not going to bother and just do the
+ # slow path.
+ return self.isatty()
+
+ def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
+ if self._closed:
+ # Don't trampoline if we're already closed.
+ raise IOClosed()
+ try:
+ return trampoline(fd, read=read, write=write, timeout=timeout,
+ timeout_exc=timeout_exc,
+ mark_as_closed=self._mark_as_closed)
+ except IOClosed:
+ # Our fileno has been obsoleted. Defang ourselves to
+ # prevent spurious closes.
+ self._mark_as_closed()
+ raise
+
+ def _mark_as_closed(self):
+ """ Mark this socket as being closed """
+ self._closed = True
+
+ def write(self, data):
+ view = memoryview(data)
+ datalen = len(data)
+ offset = 0
+ while offset < datalen:
+ try:
+ written = _original_os.write(self._fileno, view[offset:])
+ except OSError as e:
+ if get_errno(e) not in SOCKET_BLOCKING:
+ raise OSError(*e.args)
+ trampoline(self, write=True)
+ else:
+ offset += written
+ return offset
+
+ def close(self):
+ if not self._closed:
+ self._closed = True
+ _original_os.close(self._fileno)
+ notify_close(self._fileno)
+ for method in [
+ 'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
+ 'readline', 'readlines', 'seek', 'tell', 'truncate',
+ 'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
+ setattr(self, method, _operation_on_closed_file)
+
+ def truncate(self, size=-1):
+ if size is None:
+ size = -1
+ if size == -1:
+ size = self.tell()
+ try:
+ rv = _original_os.ftruncate(self._fileno, size)
+ except OSError as e:
+ raise OSError(*e.args)
+ else:
+ self.seek(size) # move position&clear buffer
+ return rv
+
+ def seek(self, offset, whence=_original_os.SEEK_SET):
+ try:
+ return _original_os.lseek(self._fileno, offset, whence)
+ except OSError as e:
+ raise OSError(*e.args)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+
+_open_environment = dict(globals())
+_open_environment.update(dict(
+ BufferedRandom=_OriginalBufferedRandom,
+ BufferedWriter=_OriginalBufferedWriter,
+ BufferedReader=_OriginalBufferedReader,
+ TextIOWrapper=_OriginalTextIOWrapper,
+ FileIO=GreenFileIO,
+ os=_original_os,
+))
+if hasattr(_original_pyio, 'text_encoding'):
+ _open_environment['text_encoding'] = _original_pyio.text_encoding
+
+_pyio_open = getattr(_original_pyio.open, '__wrapped__', _original_pyio.open)
+_open = FunctionType(
+ _pyio_open.__code__,
+ _open_environment,
+)
+
+
+def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
+ newline=None, closefd=True, opener=None):
+ try:
+ fileno = name.fileno()
+ except AttributeError:
+ pass
+ else:
+ fileno = _original_os.dup(fileno)
+ name.close()
+ name = fileno
+
+ return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
+
+
+GreenPipe.__doc__ = greenpipe_doc
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/greenpool.py b/tapdown/lib/python3.11/site-packages/eventlet/greenpool.py
new file mode 100644
index 0000000..f907e38
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/greenpool.py
@@ -0,0 +1,254 @@
+import traceback
+
+import eventlet
+from eventlet import queue
+from eventlet.support import greenlets as greenlet
+
+__all__ = ['GreenPool', 'GreenPile']
+
+DEBUG = True
+
+
+class GreenPool:
+ """The GreenPool class is a pool of green threads.
+ """
+
+ def __init__(self, size=1000):
+ try:
+ size = int(size)
+ except ValueError as e:
+ msg = 'GreenPool() expect size :: int, actual: {} {}'.format(type(size), str(e))
+ raise TypeError(msg)
+ if size < 0:
+ msg = 'GreenPool() expect size >= 0, actual: {}'.format(repr(size))
+ raise ValueError(msg)
+ self.size = size
+ self.coroutines_running = set()
+ self.sem = eventlet.Semaphore(size)
+ self.no_coros_running = eventlet.Event()
+
+ def resize(self, new_size):
+ """ Change the max number of greenthreads doing work at any given time.
+
+ If resize is called when there are more than *new_size* greenthreads
+ already working on tasks, they will be allowed to complete but no new
+ tasks will be allowed to get launched until enough greenthreads finish
+ their tasks to drop the overall quantity below *new_size*. Until
+ then, the return value of free() will be negative.
+ """
+ size_delta = new_size - self.size
+ self.sem.counter += size_delta
+ self.size = new_size
+
+ def running(self):
+ """ Returns the number of greenthreads that are currently executing
+ functions in the GreenPool."""
+ return len(self.coroutines_running)
+
+ def free(self):
+ """ Returns the number of greenthreads available for use.
+
+ If zero or less, the next call to :meth:`spawn` or :meth:`spawn_n` will
+ block the calling greenthread until a slot becomes available."""
+ return self.sem.counter
+
+ def spawn(self, function, *args, **kwargs):
+ """Run the *function* with its arguments in its own green thread.
+ Returns the :class:`GreenThread `
+ object that is running the function, which can be used to retrieve the
+ results.
+
+ If the pool is currently at capacity, ``spawn`` will block until one of
+ the running greenthreads completes its task and frees up a slot.
+
+ This function is reentrant; *function* can call ``spawn`` on the same
+ pool without risk of deadlocking the whole thing.
+ """
+ # if reentering an empty pool, don't try to wait on a coroutine freeing
+ # itself -- instead, just execute in the current coroutine
+ current = eventlet.getcurrent()
+ if self.sem.locked() and current in self.coroutines_running:
+ # a bit hacky to use the GT without switching to it
+ gt = eventlet.greenthread.GreenThread(current)
+ gt.main(function, args, kwargs)
+ return gt
+ else:
+ self.sem.acquire()
+ gt = eventlet.spawn(function, *args, **kwargs)
+ if not self.coroutines_running:
+ self.no_coros_running = eventlet.Event()
+ self.coroutines_running.add(gt)
+ gt.link(self._spawn_done)
+ return gt
+
+ def _spawn_n_impl(self, func, args, kwargs, coro):
+ try:
+ try:
+ func(*args, **kwargs)
+ except (KeyboardInterrupt, SystemExit, greenlet.GreenletExit):
+ raise
+ except:
+ if DEBUG:
+ traceback.print_exc()
+ finally:
+ if coro is not None:
+ coro = eventlet.getcurrent()
+ self._spawn_done(coro)
+
+ def spawn_n(self, function, *args, **kwargs):
+ """Create a greenthread to run the *function*, the same as
+ :meth:`spawn`. The difference is that :meth:`spawn_n` returns
+ None; the results of *function* are not retrievable.
+ """
+ # if reentering an empty pool, don't try to wait on a coroutine freeing
+ # itself -- instead, just execute in the current coroutine
+ current = eventlet.getcurrent()
+ if self.sem.locked() and current in self.coroutines_running:
+ self._spawn_n_impl(function, args, kwargs, None)
+ else:
+ self.sem.acquire()
+ g = eventlet.spawn_n(
+ self._spawn_n_impl,
+ function, args, kwargs, True)
+ if not self.coroutines_running:
+ self.no_coros_running = eventlet.Event()
+ self.coroutines_running.add(g)
+
+ def waitall(self):
+ """Waits until all greenthreads in the pool are finished working."""
+ assert eventlet.getcurrent() not in self.coroutines_running, \
+ "Calling waitall() from within one of the " \
+ "GreenPool's greenthreads will never terminate."
+ if self.running():
+ self.no_coros_running.wait()
+
+ def _spawn_done(self, coro):
+ self.sem.release()
+ if coro is not None:
+ self.coroutines_running.remove(coro)
+ # if done processing (no more work is waiting for processing),
+ # we can finish off any waitall() calls that might be pending
+ if self.sem.balance == self.size:
+ self.no_coros_running.send(None)
+
+ def waiting(self):
+ """Return the number of greenthreads waiting to spawn.
+ """
+ if self.sem.balance < 0:
+ return -self.sem.balance
+ else:
+ return 0
+
+ def _do_map(self, func, it, gi):
+ for args in it:
+ gi.spawn(func, *args)
+ gi.done_spawning()
+
+ def starmap(self, function, iterable):
+ """This is the same as :func:`itertools.starmap`, except that *func* is
+ executed in a separate green thread for each item, with the concurrency
+ limited by the pool's size. In operation, starmap consumes a constant
+ amount of memory, proportional to the size of the pool, and is thus
+ suited for iterating over extremely long input lists.
+ """
+ if function is None:
+ function = lambda *a: a
+ # We use a whole separate greenthread so its spawn() calls can block
+ # without blocking OUR caller. On the other hand, we must assume that
+ # our caller will immediately start trying to iterate over whatever we
+ # return. If that were a GreenPile, our caller would always see an
+ # empty sequence because the hub hasn't even entered _do_map() yet --
+ # _do_map() hasn't had a chance to spawn a single greenthread on this
+ # GreenPool! A GreenMap is safe to use with different producer and
+ # consumer greenthreads, because it doesn't raise StopIteration until
+ # the producer has explicitly called done_spawning().
+ gi = GreenMap(self.size)
+ eventlet.spawn_n(self._do_map, function, iterable, gi)
+ return gi
+
+ def imap(self, function, *iterables):
+ """This is the same as :func:`itertools.imap`, and has the same
+ concurrency and memory behavior as :meth:`starmap`.
+
+ It's quite convenient for, e.g., farming out jobs from a file::
+
+ def worker(line):
+ return do_something(line)
+ pool = GreenPool()
+ for result in pool.imap(worker, open("filename", 'r')):
+ print(result)
+ """
+ return self.starmap(function, zip(*iterables))
+
+
+class GreenPile:
+ """GreenPile is an abstraction representing a bunch of I/O-related tasks.
+
+ Construct a GreenPile with an existing GreenPool object. The GreenPile will
+ then use that pool's concurrency as it processes its jobs. There can be
+ many GreenPiles associated with a single GreenPool.
+
+ A GreenPile can also be constructed standalone, not associated with any
+ GreenPool. To do this, construct it with an integer size parameter instead
+ of a GreenPool.
+
+ It is not advisable to iterate over a GreenPile in a different greenthread
+ than the one which is calling spawn. The iterator will exit early in that
+ situation.
+ """
+
+ def __init__(self, size_or_pool=1000):
+ if isinstance(size_or_pool, GreenPool):
+ self.pool = size_or_pool
+ else:
+ self.pool = GreenPool(size_or_pool)
+ self.waiters = queue.LightQueue()
+ self.counter = 0
+
+ def spawn(self, func, *args, **kw):
+ """Runs *func* in its own green thread, with the result available by
+ iterating over the GreenPile object."""
+ self.counter += 1
+ try:
+ gt = self.pool.spawn(func, *args, **kw)
+ self.waiters.put(gt)
+ except:
+ self.counter -= 1
+ raise
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ """Wait for the next result, suspending the current greenthread until it
+ is available. Raises StopIteration when there are no more results."""
+ if self.counter == 0:
+ raise StopIteration()
+ return self._next()
+ __next__ = next
+
+ def _next(self):
+ try:
+ return self.waiters.get().wait()
+ finally:
+ self.counter -= 1
+
+
+# this is identical to GreenPile but it blocks on spawn if the results
+# aren't consumed, and it doesn't generate its own StopIteration exception,
+# instead relying on the spawning process to send one in when it's done
+class GreenMap(GreenPile):
+ def __init__(self, size_or_pool):
+ super().__init__(size_or_pool)
+ self.waiters = queue.LightQueue(maxsize=self.pool.size)
+
+ def done_spawning(self):
+ self.spawn(lambda: StopIteration())
+
+ def next(self):
+ val = self._next()
+ if isinstance(val, StopIteration):
+ raise val
+ else:
+ return val
+ __next__ = next
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/greenthread.py b/tapdown/lib/python3.11/site-packages/eventlet/greenthread.py
new file mode 100644
index 0000000..d1be005
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/greenthread.py
@@ -0,0 +1,353 @@
+from collections import deque
+import sys
+
+from greenlet import GreenletExit
+
+from eventlet import event
+from eventlet import hubs
+from eventlet import support
+from eventlet import timeout
+from eventlet.hubs import timer
+from eventlet.support import greenlets as greenlet
+import warnings
+
+__all__ = ['getcurrent', 'sleep', 'spawn', 'spawn_n',
+ 'kill',
+ 'spawn_after', 'spawn_after_local', 'GreenThread']
+
+getcurrent = greenlet.getcurrent
+
+
+def sleep(seconds=0):
+ """Yield control to another eligible coroutine until at least *seconds* have
+ elapsed.
+
+ *seconds* may be specified as an integer, or a float if fractional seconds
+ are desired. Calling :func:`~greenthread.sleep` with *seconds* of 0 is the
+ canonical way of expressing a cooperative yield. For example, if one is
+ looping over a large list performing an expensive calculation without
+ calling any socket methods, it's a good idea to call ``sleep(0)``
+ occasionally; otherwise nothing else will run.
+ """
+ hub = hubs.get_hub()
+ current = getcurrent()
+ if hub.greenlet is current:
+ if seconds <= 0:
+ # In this case, sleep(0) got called in the event loop threadlet.
+ # This isn't blocking, so it's not harmful. And it will not be
+ # possible to switch in this situation. So not much we can do other
+ # than just keep running. This does get triggered in real code,
+ # unfortunately.
+ return
+ raise RuntimeError('do not call blocking functions from the mainloop')
+ timer = hub.schedule_call_global(seconds, current.switch)
+ try:
+ hub.switch()
+ finally:
+ timer.cancel()
+
+
+def spawn(func, *args, **kwargs):
+ """Create a greenthread to run ``func(*args, **kwargs)``. Returns a
+ :class:`GreenThread` object which you can use to get the results of the
+ call.
+
+ Execution control returns immediately to the caller; the created greenthread
+ is merely scheduled to be run at the next available opportunity.
+ Use :func:`spawn_after` to arrange for greenthreads to be spawned
+ after a finite delay.
+ """
+ hub = hubs.get_hub()
+ g = GreenThread(hub.greenlet)
+ hub.schedule_call_global(0, g.switch, func, args, kwargs)
+ return g
+
+
+def spawn_n(func, *args, **kwargs):
+ """Same as :func:`spawn`, but returns a ``greenlet`` object from
+ which it is not possible to retrieve either a return value or
+ whether it raised any exceptions. This is faster than
+ :func:`spawn`; it is fastest if there are no keyword arguments.
+
+ If an exception is raised in the function, spawn_n prints a stack
+ trace; the print can be disabled by calling
+ :func:`eventlet.debug.hub_exceptions` with False.
+ """
+ return _spawn_n(0, func, args, kwargs)[1]
+
+
+def spawn_after(seconds, func, *args, **kwargs):
+ """Spawns *func* after *seconds* have elapsed. It runs as scheduled even if
+ the current greenthread has completed.
+
+ *seconds* may be specified as an integer, or a float if fractional seconds
+ are desired. The *func* will be called with the given *args* and
+ keyword arguments *kwargs*, and will be executed within its own greenthread.
+
+ The return value of :func:`spawn_after` is a :class:`GreenThread` object,
+ which can be used to retrieve the results of the call.
+
+ To cancel the spawn and prevent *func* from being called,
+ call :meth:`GreenThread.cancel` on the return value of :func:`spawn_after`.
+ This will not abort the function if it's already started running, which is
+ generally the desired behavior. If terminating *func* regardless of whether
+ it's started or not is the desired behavior, call :meth:`GreenThread.kill`.
+ """
+ hub = hubs.get_hub()
+ g = GreenThread(hub.greenlet)
+ hub.schedule_call_global(seconds, g.switch, func, args, kwargs)
+ return g
+
+
+def spawn_after_local(seconds, func, *args, **kwargs):
+ """Spawns *func* after *seconds* have elapsed. The function will NOT be
+ called if the current greenthread has exited.
+
+ *seconds* may be specified as an integer, or a float if fractional seconds
+ are desired. The *func* will be called with the given *args* and
+ keyword arguments *kwargs*, and will be executed within its own greenthread.
+
+ The return value of :func:`spawn_after` is a :class:`GreenThread` object,
+ which can be used to retrieve the results of the call.
+
+ To cancel the spawn and prevent *func* from being called,
+ call :meth:`GreenThread.cancel` on the return value. This will not abort the
+ function if it's already started running. If terminating *func* regardless
+ of whether it's started or not is the desired behavior, call
+ :meth:`GreenThread.kill`.
+ """
+ hub = hubs.get_hub()
+ g = GreenThread(hub.greenlet)
+ hub.schedule_call_local(seconds, g.switch, func, args, kwargs)
+ return g
+
+
+def call_after_global(seconds, func, *args, **kwargs):
+ warnings.warn(
+ "call_after_global is renamed to spawn_after, which"
+ "has the same signature and semantics (plus a bit extra). Please do a"
+ " quick search-and-replace on your codebase, thanks!",
+ DeprecationWarning, stacklevel=2)
+ return _spawn_n(seconds, func, args, kwargs)[0]
+
+
+def call_after_local(seconds, function, *args, **kwargs):
+ warnings.warn(
+ "call_after_local is renamed to spawn_after_local, which"
+ "has the same signature and semantics (plus a bit extra).",
+ DeprecationWarning, stacklevel=2)
+ hub = hubs.get_hub()
+ g = greenlet.greenlet(function, parent=hub.greenlet)
+ t = hub.schedule_call_local(seconds, g.switch, *args, **kwargs)
+ return t
+
+
+call_after = call_after_local
+
+
+def exc_after(seconds, *throw_args):
+ warnings.warn("Instead of exc_after, which is deprecated, use "
+ "Timeout(seconds, exception)",
+ DeprecationWarning, stacklevel=2)
+ if seconds is None: # dummy argument, do nothing
+ return timer.Timer(seconds, lambda: None)
+ hub = hubs.get_hub()
+ return hub.schedule_call_local(seconds, getcurrent().throw, *throw_args)
+
+
+# deprecate, remove
+TimeoutError, with_timeout = (
+ support.wrap_deprecated(old, new)(fun) for old, new, fun in (
+ ('greenthread.TimeoutError', 'Timeout', timeout.Timeout),
+ ('greenthread.with_timeout', 'with_timeout', timeout.with_timeout),
+ ))
+
+
+def _spawn_n(seconds, func, args, kwargs):
+ hub = hubs.get_hub()
+ g = greenlet.greenlet(func, parent=hub.greenlet)
+ t = hub.schedule_call_global(seconds, g.switch, *args, **kwargs)
+ return t, g
+
+
+class GreenThread(greenlet.greenlet):
+ """The GreenThread class is a type of Greenlet which has the additional
+ property of being able to retrieve the return value of the main function.
+ Do not construct GreenThread objects directly; call :func:`spawn` to get one.
+ """
+
+ def __init__(self, parent):
+ greenlet.greenlet.__init__(self, self.main, parent)
+ self._exit_event = event.Event()
+ self._resolving_links = False
+ self._exit_funcs = None
+
+ def __await__(self):
+ """
+ Enable ``GreenThread``s to be ``await``ed in ``async`` functions.
+ """
+ from eventlet.hubs.asyncio import Hub
+ hub = hubs.get_hub()
+ if not isinstance(hub, Hub):
+ raise RuntimeError(
+ "This API only works with eventlet's asyncio hub. "
+ + "To use it, set an EVENTLET_HUB=asyncio environment variable."
+ )
+
+ future = hub.loop.create_future()
+
+ # When the Future finishes, check if it was due to cancellation:
+ def got_future_result(future):
+ if future.cancelled() and not self.dead:
+ # GreenThread is still running, so kill it:
+ self.kill()
+
+ future.add_done_callback(got_future_result)
+
+ # When the GreenThread finishes, set its result on the Future:
+ def got_gthread_result(gthread):
+ if future.done():
+ # Can't set values any more.
+ return
+
+ try:
+ # Should return immediately:
+ result = gthread.wait()
+ future.set_result(result)
+ except GreenletExit:
+ future.cancel()
+ except BaseException as e:
+ future.set_exception(e)
+
+ self.link(got_gthread_result)
+
+ return future.__await__()
+
+ def wait(self):
+ """ Returns the result of the main function of this GreenThread. If the
+ result is a normal return value, :meth:`wait` returns it. If it raised
+ an exception, :meth:`wait` will raise the same exception (though the
+ stack trace will unavoidably contain some frames from within the
+ greenthread module)."""
+ return self._exit_event.wait()
+
+ def link(self, func, *curried_args, **curried_kwargs):
+ """ Set up a function to be called with the results of the GreenThread.
+
+ The function must have the following signature::
+
+ def func(gt, [curried args/kwargs]):
+
+ When the GreenThread finishes its run, it calls *func* with itself
+ and with the `curried arguments `_ supplied
+ at link-time. If the function wants to retrieve the result of the GreenThread,
+ it should call wait() on its first argument.
+
+ Note that *func* is called within execution context of
+ the GreenThread, so it is possible to interfere with other linked
+ functions by doing things like switching explicitly to another
+ greenthread.
+ """
+ if self._exit_funcs is None:
+ self._exit_funcs = deque()
+ self._exit_funcs.append((func, curried_args, curried_kwargs))
+ if self._exit_event.ready():
+ self._resolve_links()
+
+ def unlink(self, func, *curried_args, **curried_kwargs):
+ """ remove linked function set by :meth:`link`
+
+ Remove successfully return True, otherwise False
+ """
+ if not self._exit_funcs:
+ return False
+ try:
+ self._exit_funcs.remove((func, curried_args, curried_kwargs))
+ return True
+ except ValueError:
+ return False
+
+ def main(self, function, args, kwargs):
+ try:
+ result = function(*args, **kwargs)
+ except:
+ self._exit_event.send_exception(*sys.exc_info())
+ self._resolve_links()
+ raise
+ else:
+ self._exit_event.send(result)
+ self._resolve_links()
+
+ def _resolve_links(self):
+ # ca and ckw are the curried function arguments
+ if self._resolving_links:
+ return
+ if not self._exit_funcs:
+ return
+ self._resolving_links = True
+ try:
+ while self._exit_funcs:
+ f, ca, ckw = self._exit_funcs.popleft()
+ f(self, *ca, **ckw)
+ finally:
+ self._resolving_links = False
+
+ def kill(self, *throw_args):
+ """Kills the greenthread using :func:`kill`. After being killed
+ all calls to :meth:`wait` will raise *throw_args* (which default
+ to :class:`greenlet.GreenletExit`)."""
+ return kill(self, *throw_args)
+
+ def cancel(self, *throw_args):
+ """Kills the greenthread using :func:`kill`, but only if it hasn't
+ already started running. After being canceled,
+ all calls to :meth:`wait` will raise *throw_args* (which default
+ to :class:`greenlet.GreenletExit`)."""
+ return cancel(self, *throw_args)
+
+
+def cancel(g, *throw_args):
+ """Like :func:`kill`, but only terminates the greenthread if it hasn't
+ already started execution. If the grenthread has already started
+ execution, :func:`cancel` has no effect."""
+ if not g:
+ kill(g, *throw_args)
+
+
+def kill(g, *throw_args):
+ """Terminates the target greenthread by raising an exception into it.
+ Whatever that greenthread might be doing; be it waiting for I/O or another
+ primitive, it sees an exception right away.
+
+ By default, this exception is GreenletExit, but a specific exception
+ may be specified. *throw_args* should be the same as the arguments to
+ raise; either an exception instance or an exc_info tuple.
+
+ Calling :func:`kill` causes the calling greenthread to cooperatively yield.
+ """
+ if g.dead:
+ return
+ hub = hubs.get_hub()
+ if not g:
+ # greenlet hasn't started yet and therefore throw won't work
+ # on its own; semantically we want it to be as though the main
+ # method never got called
+ def just_raise(*a, **kw):
+ if throw_args:
+ raise throw_args[1].with_traceback(throw_args[2])
+ else:
+ raise greenlet.GreenletExit()
+ g.run = just_raise
+ if isinstance(g, GreenThread):
+ # it's a GreenThread object, so we want to call its main
+ # method to take advantage of the notification
+ try:
+ g.main(just_raise, (), {})
+ except:
+ pass
+ current = getcurrent()
+ if current is not hub.greenlet:
+ # arrange to wake the caller back up immediately
+ hub.ensure_greenlet()
+ hub.schedule_call_global(0, current.switch)
+ g.throw(*throw_args)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/__init__.py
new file mode 100644
index 0000000..b1a3e80
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/__init__.py
@@ -0,0 +1,188 @@
+import importlib
+import inspect
+import os
+import warnings
+
+from eventlet import patcher
+from eventlet.support import greenlets as greenlet
+
+
+__all__ = ["use_hub", "get_hub", "get_default_hub", "trampoline"]
+
+threading = patcher.original('threading')
+_threadlocal = threading.local()
+
+
+# order is important, get_default_hub returns first available from here
+builtin_hub_names = ('epolls', 'kqueue', 'poll', 'selects')
+builtin_hub_modules = tuple(importlib.import_module('eventlet.hubs.' + name) for name in builtin_hub_names)
+
+
+class HubError(Exception):
+ pass
+
+
+def get_default_hub():
+ """Select the default hub implementation based on what multiplexing
+ libraries are installed. The order that the hubs are tried is:
+
+ * epoll
+ * kqueue
+ * poll
+ * select
+
+ .. include:: ../../doc/source/common.txt
+ .. note :: |internal|
+ """
+ for mod in builtin_hub_modules:
+ if mod.is_available():
+ return mod
+
+ raise HubError('no built-in hubs are available: {}'.format(builtin_hub_modules))
+
+
+def use_hub(mod=None):
+ """Use the module *mod*, containing a class called Hub, as the
+ event hub. Usually not required; the default hub is usually fine.
+
+ `mod` can be an actual hub class, a module, a string, or None.
+
+ If `mod` is a class, use it directly.
+ If `mod` is a module, use `module.Hub` class
+ If `mod` is a string and contains either '.' or ':'
+ then `use_hub` uses 'package.subpackage.module:Class' convention,
+ otherwise imports `eventlet.hubs.mod`.
+ If `mod` is None, `use_hub` uses the default hub.
+
+ Only call use_hub during application initialization,
+ because it resets the hub's state and any existing
+ timers or listeners will never be resumed.
+
+ These two threadlocal attributes are not part of Eventlet public API:
+ - `threadlocal.Hub` (capital H) is hub constructor, used when no hub is currently active
+ - `threadlocal.hub` (lowercase h) is active hub instance
+ """
+ if mod is None:
+ mod = os.environ.get('EVENTLET_HUB', None)
+ if mod is None:
+ mod = get_default_hub()
+ if hasattr(_threadlocal, 'hub'):
+ del _threadlocal.hub
+
+ classname = ''
+ if isinstance(mod, str):
+ if mod.strip() == "":
+ raise RuntimeError("Need to specify a hub")
+ if '.' in mod or ':' in mod:
+ modulename, _, classname = mod.strip().partition(':')
+ else:
+ modulename = 'eventlet.hubs.' + mod
+ mod = importlib.import_module(modulename)
+
+ if hasattr(mod, 'is_available'):
+ if not mod.is_available():
+ raise Exception('selected hub is not available on this system mod={}'.format(mod))
+ else:
+ msg = '''Please provide `is_available()` function in your custom Eventlet hub {mod}.
+It must return bool: whether hub supports current platform. See eventlet/hubs/{{epoll,kqueue}} for example.
+'''.format(mod=mod)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
+
+ hubclass = mod
+ if not inspect.isclass(mod):
+ hubclass = getattr(mod, classname or 'Hub')
+
+ _threadlocal.Hub = hubclass
+
+
+def get_hub():
+ """Get the current event hub singleton object.
+
+ .. note :: |internal|
+ """
+ try:
+ hub = _threadlocal.hub
+ except AttributeError:
+ try:
+ _threadlocal.Hub
+ except AttributeError:
+ use_hub()
+ hub = _threadlocal.hub = _threadlocal.Hub()
+ return hub
+
+
+# Lame middle file import because complex dependencies in import graph
+from eventlet import timeout
+
+
+def trampoline(fd, read=None, write=None, timeout=None,
+ timeout_exc=timeout.Timeout,
+ mark_as_closed=None):
+ """Suspend the current coroutine until the given socket object or file
+ descriptor is ready to *read*, ready to *write*, or the specified
+ *timeout* elapses, depending on arguments specified.
+
+ To wait for *fd* to be ready to read, pass *read* ``=True``; ready to
+ write, pass *write* ``=True``. To specify a timeout, pass the *timeout*
+ argument in seconds.
+
+ If the specified *timeout* elapses before the socket is ready to read or
+ write, *timeout_exc* will be raised instead of ``trampoline()``
+ returning normally.
+
+ .. note :: |internal|
+ """
+ t = None
+ hub = get_hub()
+ current = greenlet.getcurrent()
+ if hub.greenlet is current:
+ raise RuntimeError('do not call blocking functions from the mainloop')
+ if (read and write):
+ raise RuntimeError('not allowed to trampoline for reading and writing')
+ try:
+ fileno = fd.fileno()
+ except AttributeError:
+ fileno = fd
+ if timeout is not None:
+ def _timeout(exc):
+ # This is only useful to insert debugging
+ current.throw(exc)
+ t = hub.schedule_call_global(timeout, _timeout, timeout_exc)
+ try:
+ if read:
+ listener = hub.add(hub.READ, fileno, current.switch, current.throw, mark_as_closed)
+ elif write:
+ listener = hub.add(hub.WRITE, fileno, current.switch, current.throw, mark_as_closed)
+ try:
+ return hub.switch()
+ finally:
+ hub.remove(listener)
+ finally:
+ if t is not None:
+ t.cancel()
+
+
+def notify_close(fd):
+ """
+ A particular file descriptor has been explicitly closed. Register for any
+ waiting listeners to be notified on the next run loop.
+ """
+ hub = get_hub()
+ hub.notify_close(fd)
+
+
+def notify_opened(fd):
+ """
+ Some file descriptors may be closed 'silently' - that is, by the garbage
+ collector, by an external library, etc. When the OS returns a file descriptor
+ from an open call (or something similar), this may be the only indication we
+ have that the FD has been closed and then recycled.
+ We let the hub know that the old file descriptor is dead; any stuck listeners
+ will be disabled and notified in turn.
+ """
+ hub = get_hub()
+ hub.mark_as_reopened(fd)
+
+
+class IOClosed(IOError):
+ pass
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/asyncio.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/asyncio.py
new file mode 100644
index 0000000..2b9b7e5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/asyncio.py
@@ -0,0 +1,174 @@
+"""
+Asyncio-based hub, originally implemented by Miguel Grinberg.
+"""
+
+# The various modules involved in asyncio need to call the original, unpatched
+# standard library APIs to work: socket, select, threading, and so on. We
+# therefore don't import them on the module level, since that would involve
+# their imports getting patched, and instead delay importing them as much as
+# possible. Then, we do a little song and dance in Hub.__init__ below so that
+# when they're imported they import the original modules (select, socket, etc)
+# rather than the patched ones.
+
+import os
+import sys
+
+from eventlet.hubs import hub
+from eventlet.patcher import _unmonkey_patch_asyncio_all
+
+
+def is_available():
+ """
+ Indicate whether this hub is available, since some hubs are
+ platform-specific.
+
+ Python always has asyncio, so this is always ``True``.
+ """
+ return True
+
+
+class Hub(hub.BaseHub):
+ """An Eventlet hub implementation on top of an asyncio event loop."""
+
+ def __init__(self):
+ super().__init__()
+
+ # Pre-emptively make sure we're using the right modules:
+ _unmonkey_patch_asyncio_all()
+
+ # The presumption is that eventlet is driving the event loop, so we
+ # want a new one we control.
+ import asyncio
+
+ self.loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(self.loop)
+ self.sleep_event = asyncio.Event()
+
+ import asyncio.events
+ if hasattr(asyncio.events, "on_fork"):
+ # Allow post-fork() child to continue using the same event loop.
+ # This is a terrible idea.
+ asyncio.events.on_fork.__code__ = (lambda: None).__code__
+ else:
+ # On Python 3.9-3.11, there's a thread local we need to reset.
+ # Also a terrible idea.
+ def re_register_loop(loop=self.loop):
+ asyncio.events._set_running_loop(loop)
+
+ os.register_at_fork(after_in_child=re_register_loop)
+
+ def add_timer(self, timer):
+ """
+ Register a ``Timer``.
+
+ Typically not called directly by users.
+ """
+ super().add_timer(timer)
+ self.sleep_event.set()
+
+ def _file_cb(self, cb, fileno):
+ """
+ Callback called by ``asyncio`` when a file descriptor has an event.
+ """
+ try:
+ cb(fileno)
+ except self.SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
+ self.sleep_event.set()
+
+ def add(self, evtype, fileno, cb, tb, mark_as_closed):
+ """
+ Add a file descriptor of given event type to the ``Hub``. See the
+ superclass for details.
+
+ Typically not called directly by users.
+ """
+ try:
+ os.fstat(fileno)
+ except OSError:
+ raise ValueError("Invalid file descriptor")
+ already_listening = self.listeners[evtype].get(fileno) is not None
+ listener = super().add(evtype, fileno, cb, tb, mark_as_closed)
+ if not already_listening:
+ if evtype == hub.READ:
+ self.loop.add_reader(fileno, self._file_cb, cb, fileno)
+ else:
+ self.loop.add_writer(fileno, self._file_cb, cb, fileno)
+ return listener
+
+ def remove(self, listener):
+ """
+ Remove a listener from the ``Hub``. See the superclass for details.
+
+ Typically not called directly by users.
+ """
+ super().remove(listener)
+ evtype = listener.evtype
+ fileno = listener.fileno
+ if not self.listeners[evtype].get(fileno):
+ if evtype == hub.READ:
+ self.loop.remove_reader(fileno)
+ else:
+ self.loop.remove_writer(fileno)
+
+ def remove_descriptor(self, fileno):
+ """
+ Remove a file descriptor from the ``asyncio`` loop.
+
+ Typically not called directly by users.
+ """
+ have_read = self.listeners[hub.READ].get(fileno)
+ have_write = self.listeners[hub.WRITE].get(fileno)
+ super().remove_descriptor(fileno)
+ if have_read:
+ self.loop.remove_reader(fileno)
+ if have_write:
+ self.loop.remove_writer(fileno)
+
+ def run(self, *a, **kw):
+ """
+ Start the ``Hub`` running. See the superclass for details.
+ """
+ import asyncio
+
+ async def async_run():
+ if self.running:
+ raise RuntimeError("Already running!")
+ try:
+ self.running = True
+ self.stopping = False
+ while not self.stopping:
+ while self.closed:
+ # We ditch all of these first.
+ self.close_one()
+ self.prepare_timers()
+ if self.debug_blocking:
+ self.block_detect_pre()
+ self.fire_timers(self.clock())
+ if self.debug_blocking:
+ self.block_detect_post()
+ self.prepare_timers()
+ wakeup_when = self.sleep_until()
+ if wakeup_when is None:
+ sleep_time = self.default_sleep()
+ else:
+ sleep_time = wakeup_when - self.clock()
+ if sleep_time > 0:
+ try:
+ await asyncio.wait_for(self.sleep_event.wait(), sleep_time)
+ except asyncio.TimeoutError:
+ pass
+ self.sleep_event.clear()
+ else:
+ await asyncio.sleep(0)
+ else:
+ self.timers_canceled = 0
+ del self.timers[:]
+ del self.next_timers[:]
+ finally:
+ self.running = False
+ self.stopping = False
+
+ self.loop.run_until_complete(async_run())
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/epolls.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/epolls.py
new file mode 100644
index 0000000..770c18d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/epolls.py
@@ -0,0 +1,31 @@
+import errno
+from eventlet import patcher, support
+from eventlet.hubs import hub, poll
+select = patcher.original('select')
+
+
+def is_available():
+ return hasattr(select, 'epoll')
+
+
+# NOTE: we rely on the fact that the epoll flag constants
+# are identical in value to the poll constants
+class Hub(poll.Hub):
+ def __init__(self, clock=None):
+ super().__init__(clock=clock)
+ self.poll = select.epoll()
+
+ def add(self, evtype, fileno, cb, tb, mac):
+ oldlisteners = bool(self.listeners[self.READ].get(fileno) or
+ self.listeners[self.WRITE].get(fileno))
+ # not super() to avoid double register()
+ listener = hub.BaseHub.add(self, evtype, fileno, cb, tb, mac)
+ try:
+ self.register(fileno, new=not oldlisteners)
+ except OSError as ex: # ignore EEXIST, #80
+ if support.get_errno(ex) != errno.EEXIST:
+ raise
+ return listener
+
+ def do_poll(self, seconds):
+ return self.poll.poll(seconds)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/hub.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/hub.py
new file mode 100644
index 0000000..abeee6c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/hub.py
@@ -0,0 +1,495 @@
+import errno
+import heapq
+import math
+import signal
+import sys
+import traceback
+
+arm_alarm = None
+if hasattr(signal, 'setitimer'):
+ def alarm_itimer(seconds):
+ signal.setitimer(signal.ITIMER_REAL, seconds)
+ arm_alarm = alarm_itimer
+else:
+ try:
+ import itimer
+ arm_alarm = itimer.alarm
+ except ImportError:
+ def alarm_signal(seconds):
+ signal.alarm(math.ceil(seconds))
+ arm_alarm = alarm_signal
+
+import eventlet.hubs
+from eventlet.hubs import timer
+from eventlet.support import greenlets as greenlet
+try:
+ from monotonic import monotonic
+except ImportError:
+ from time import monotonic
+
+g_prevent_multiple_readers = True
+
+READ = "read"
+WRITE = "write"
+
+
+def closed_callback(fileno):
+ """ Used to de-fang a callback that may be triggered by a loop in BaseHub.wait
+ """
+ # No-op.
+ pass
+
+
+class FdListener:
+
+ def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
+ """ The following are required:
+ cb - the standard callback, which will switch into the
+ listening greenlet to indicate that the event waited upon
+ is ready
+ tb - a 'throwback'. This is typically greenlet.throw, used
+ to raise a signal into the target greenlet indicating that
+ an event was obsoleted by its underlying filehandle being
+ repurposed.
+ mark_as_closed - if any listener is obsoleted, this is called
+ (in the context of some other client greenlet) to alert
+ underlying filehandle-wrapping objects that they've been
+ closed.
+ """
+ assert (evtype is READ or evtype is WRITE)
+ self.evtype = evtype
+ self.fileno = fileno
+ self.cb = cb
+ self.tb = tb
+ self.mark_as_closed = mark_as_closed
+ self.spent = False
+ self.greenlet = greenlet.getcurrent()
+
+ def __repr__(self):
+ return "%s(%r, %r, %r, %r)" % (type(self).__name__, self.evtype, self.fileno,
+ self.cb, self.tb)
+ __str__ = __repr__
+
+ def defang(self):
+ self.cb = closed_callback
+ if self.mark_as_closed is not None:
+ self.mark_as_closed()
+ self.spent = True
+
+
+noop = FdListener(READ, 0, lambda x: None, lambda x: None, None)
+
+
+# in debug mode, track the call site that created the listener
+
+
+class DebugListener(FdListener):
+
+ def __init__(self, evtype, fileno, cb, tb, mark_as_closed):
+ self.where_called = traceback.format_stack()
+ self.greenlet = greenlet.getcurrent()
+ super().__init__(evtype, fileno, cb, tb, mark_as_closed)
+
+ def __repr__(self):
+ return "DebugListener(%r, %r, %r, %r, %r, %r)\n%sEndDebugFdListener" % (
+ self.evtype,
+ self.fileno,
+ self.cb,
+ self.tb,
+ self.mark_as_closed,
+ self.greenlet,
+ ''.join(self.where_called))
+ __str__ = __repr__
+
+
+def alarm_handler(signum, frame):
+ import inspect
+ raise RuntimeError("Blocking detector ALARMED at" + str(inspect.getframeinfo(frame)))
+
+
+class BaseHub:
+ """ Base hub class for easing the implementation of subclasses that are
+ specific to a particular underlying event architecture. """
+
+ SYSTEM_EXCEPTIONS = (KeyboardInterrupt, SystemExit)
+
+ READ = READ
+ WRITE = WRITE
+
+ def __init__(self, clock=None):
+ self.listeners = {READ: {}, WRITE: {}}
+ self.secondaries = {READ: {}, WRITE: {}}
+ self.closed = []
+
+ if clock is None:
+ clock = monotonic
+ self.clock = clock
+
+ self.greenlet = greenlet.greenlet(self.run)
+ self.stopping = False
+ self.running = False
+ self.timers = []
+ self.next_timers = []
+ self.lclass = FdListener
+ self.timers_canceled = 0
+ self.debug_exceptions = True
+ self.debug_blocking = False
+ self.debug_blocking_resolution = 1
+
+ def block_detect_pre(self):
+ # shortest alarm we can possibly raise is one second
+ tmp = signal.signal(signal.SIGALRM, alarm_handler)
+ if tmp != alarm_handler:
+ self._old_signal_handler = tmp
+
+ arm_alarm(self.debug_blocking_resolution)
+
+ def block_detect_post(self):
+ if (hasattr(self, "_old_signal_handler") and
+ self._old_signal_handler):
+ signal.signal(signal.SIGALRM, self._old_signal_handler)
+ signal.alarm(0)
+
+ def add(self, evtype, fileno, cb, tb, mark_as_closed):
+ """ Signals an intent to or write a particular file descriptor.
+
+ The *evtype* argument is either the constant READ or WRITE.
+
+ The *fileno* argument is the file number of the file of interest.
+
+ The *cb* argument is the callback which will be called when the file
+ is ready for reading/writing.
+
+ The *tb* argument is the throwback used to signal (into the greenlet)
+ that the file was closed.
+
+ The *mark_as_closed* is used in the context of the event hub to
+ prepare a Python object as being closed, pre-empting further
+ close operations from accidentally shutting down the wrong OS thread.
+ """
+ listener = self.lclass(evtype, fileno, cb, tb, mark_as_closed)
+ bucket = self.listeners[evtype]
+ if fileno in bucket:
+ if g_prevent_multiple_readers:
+ raise RuntimeError(
+ "Second simultaneous %s on fileno %s "
+ "detected. Unless you really know what you're doing, "
+ "make sure that only one greenthread can %s any "
+ "particular socket. Consider using a pools.Pool. "
+ "If you do know what you're doing and want to disable "
+ "this error, call "
+ "eventlet.debug.hub_prevent_multiple_readers(False) - MY THREAD=%s; "
+ "THAT THREAD=%s" % (
+ evtype, fileno, evtype, cb, bucket[fileno]))
+ # store off the second listener in another structure
+ self.secondaries[evtype].setdefault(fileno, []).append(listener)
+ else:
+ bucket[fileno] = listener
+ return listener
+
+ def _obsolete(self, fileno):
+ """ We've received an indication that 'fileno' has been obsoleted.
+ Any current listeners must be defanged, and notifications to
+ their greenlets queued up to send.
+ """
+ found = False
+ for evtype, bucket in self.secondaries.items():
+ if fileno in bucket:
+ for listener in bucket[fileno]:
+ found = True
+ self.closed.append(listener)
+ listener.defang()
+ del bucket[fileno]
+
+ # For the primary listeners, we actually need to call remove,
+ # which may modify the underlying OS polling objects.
+ for evtype, bucket in self.listeners.items():
+ if fileno in bucket:
+ listener = bucket[fileno]
+ found = True
+ self.closed.append(listener)
+ self.remove(listener)
+ listener.defang()
+
+ return found
+
+ def notify_close(self, fileno):
+ """ We might want to do something when a fileno is closed.
+ However, currently it suffices to obsolete listeners only
+ when we detect an old fileno being recycled, on open.
+ """
+ pass
+
+ def remove(self, listener):
+ if listener.spent:
+ # trampoline may trigger this in its finally section.
+ return
+
+ fileno = listener.fileno
+ evtype = listener.evtype
+ if listener is self.listeners[evtype][fileno]:
+ del self.listeners[evtype][fileno]
+ # migrate a secondary listener to be the primary listener
+ if fileno in self.secondaries[evtype]:
+ sec = self.secondaries[evtype][fileno]
+ if sec:
+ self.listeners[evtype][fileno] = sec.pop(0)
+ if not sec:
+ del self.secondaries[evtype][fileno]
+ else:
+ self.secondaries[evtype][fileno].remove(listener)
+ if not self.secondaries[evtype][fileno]:
+ del self.secondaries[evtype][fileno]
+
+ def mark_as_reopened(self, fileno):
+ """ If a file descriptor is returned by the OS as the result of some
+ open call (or equivalent), that signals that it might be being
+ recycled.
+
+ Catch the case where the fd was previously in use.
+ """
+ self._obsolete(fileno)
+
+ def remove_descriptor(self, fileno):
+ """ Completely remove all listeners for this fileno. For internal use
+ only."""
+ # gather any listeners we have
+ listeners = []
+ listeners.append(self.listeners[READ].get(fileno, noop))
+ listeners.append(self.listeners[WRITE].get(fileno, noop))
+ listeners.extend(self.secondaries[READ].get(fileno, ()))
+ listeners.extend(self.secondaries[WRITE].get(fileno, ()))
+ for listener in listeners:
+ try:
+ # listener.cb may want to remove(listener)
+ listener.cb(fileno)
+ except Exception:
+ self.squelch_generic_exception(sys.exc_info())
+ # NOW this fileno is now dead to all
+ self.listeners[READ].pop(fileno, None)
+ self.listeners[WRITE].pop(fileno, None)
+ self.secondaries[READ].pop(fileno, None)
+ self.secondaries[WRITE].pop(fileno, None)
+
+ def close_one(self):
+ """ Triggered from the main run loop. If a listener's underlying FD was
+ closed somehow, throw an exception back to the trampoline, which should
+ be able to manage it appropriately.
+ """
+ listener = self.closed.pop()
+ if not listener.greenlet.dead:
+ # There's no point signalling a greenlet that's already dead.
+ listener.tb(eventlet.hubs.IOClosed(errno.ENOTCONN, "Operation on closed file"))
+
+ def ensure_greenlet(self):
+ if self.greenlet.dead:
+ # create new greenlet sharing same parent as original
+ new = greenlet.greenlet(self.run, self.greenlet.parent)
+ # need to assign as parent of old greenlet
+ # for those greenlets that are currently
+ # children of the dead hub and may subsequently
+ # exit without further switching to hub.
+ self.greenlet.parent = new
+ self.greenlet = new
+
+ def switch(self):
+ cur = greenlet.getcurrent()
+ assert cur is not self.greenlet, 'Cannot switch to MAINLOOP from MAINLOOP'
+ switch_out = getattr(cur, 'switch_out', None)
+ if switch_out is not None:
+ try:
+ switch_out()
+ except:
+ self.squelch_generic_exception(sys.exc_info())
+ self.ensure_greenlet()
+ try:
+ if self.greenlet.parent is not cur:
+ cur.parent = self.greenlet
+ except ValueError:
+ pass # gets raised if there is a greenlet parent cycle
+ return self.greenlet.switch()
+
+ def squelch_exception(self, fileno, exc_info):
+ traceback.print_exception(*exc_info)
+ sys.stderr.write("Removing descriptor: %r\n" % (fileno,))
+ sys.stderr.flush()
+ try:
+ self.remove_descriptor(fileno)
+ except Exception as e:
+ sys.stderr.write("Exception while removing descriptor! %r\n" % (e,))
+ sys.stderr.flush()
+
+ def wait(self, seconds=None):
+ raise NotImplementedError("Implement this in a subclass")
+
+ def default_sleep(self):
+ return 60.0
+
+ def sleep_until(self):
+ t = self.timers
+ if not t:
+ return None
+ return t[0][0]
+
+ def run(self, *a, **kw):
+ """Run the runloop until abort is called.
+ """
+ # accept and discard variable arguments because they will be
+ # supplied if other greenlets have run and exited before the
+ # hub's greenlet gets a chance to run
+ if self.running:
+ raise RuntimeError("Already running!")
+ try:
+ self.running = True
+ self.stopping = False
+ while not self.stopping:
+ while self.closed:
+ # We ditch all of these first.
+ self.close_one()
+ self.prepare_timers()
+ if self.debug_blocking:
+ self.block_detect_pre()
+ self.fire_timers(self.clock())
+ if self.debug_blocking:
+ self.block_detect_post()
+ self.prepare_timers()
+ wakeup_when = self.sleep_until()
+ if wakeup_when is None:
+ sleep_time = self.default_sleep()
+ else:
+ sleep_time = wakeup_when - self.clock()
+ if sleep_time > 0:
+ self.wait(sleep_time)
+ else:
+ self.wait(0)
+ else:
+ self.timers_canceled = 0
+ del self.timers[:]
+ del self.next_timers[:]
+ finally:
+ self.running = False
+ self.stopping = False
+
+ def abort(self, wait=False):
+ """Stop the runloop. If run is executing, it will exit after
+ completing the next runloop iteration.
+
+ Set *wait* to True to cause abort to switch to the hub immediately and
+ wait until it's finished processing. Waiting for the hub will only
+ work from the main greenthread; all other greenthreads will become
+ unreachable.
+ """
+ if self.running:
+ self.stopping = True
+ if wait:
+ assert self.greenlet is not greenlet.getcurrent(
+ ), "Can't abort with wait from inside the hub's greenlet."
+ # schedule an immediate timer just so the hub doesn't sleep
+ self.schedule_call_global(0, lambda: None)
+ # switch to it; when done the hub will switch back to its parent,
+ # the main greenlet
+ self.switch()
+
+ def squelch_generic_exception(self, exc_info):
+ if self.debug_exceptions:
+ traceback.print_exception(*exc_info)
+ sys.stderr.flush()
+
+ def squelch_timer_exception(self, timer, exc_info):
+ if self.debug_exceptions:
+ traceback.print_exception(*exc_info)
+ sys.stderr.flush()
+
+ def add_timer(self, timer):
+ scheduled_time = self.clock() + timer.seconds
+ self.next_timers.append((scheduled_time, timer))
+ return scheduled_time
+
+ def timer_canceled(self, timer):
+ self.timers_canceled += 1
+ len_timers = len(self.timers) + len(self.next_timers)
+ if len_timers > 1000 and len_timers / 2 <= self.timers_canceled:
+ self.timers_canceled = 0
+ self.timers = [t for t in self.timers if not t[1].called]
+ self.next_timers = [t for t in self.next_timers if not t[1].called]
+ heapq.heapify(self.timers)
+
+ def prepare_timers(self):
+ heappush = heapq.heappush
+ t = self.timers
+ for item in self.next_timers:
+ if item[1].called:
+ self.timers_canceled -= 1
+ else:
+ heappush(t, item)
+ del self.next_timers[:]
+
+ def schedule_call_local(self, seconds, cb, *args, **kw):
+ """Schedule a callable to be called after 'seconds' seconds have
+ elapsed. Cancel the timer if greenlet has exited.
+ seconds: The number of seconds to wait.
+ cb: The callable to call after the given time.
+ *args: Arguments to pass to the callable when called.
+ **kw: Keyword arguments to pass to the callable when called.
+ """
+ t = timer.LocalTimer(seconds, cb, *args, **kw)
+ self.add_timer(t)
+ return t
+
+ def schedule_call_global(self, seconds, cb, *args, **kw):
+ """Schedule a callable to be called after 'seconds' seconds have
+ elapsed. The timer will NOT be canceled if the current greenlet has
+ exited before the timer fires.
+ seconds: The number of seconds to wait.
+ cb: The callable to call after the given time.
+ *args: Arguments to pass to the callable when called.
+ **kw: Keyword arguments to pass to the callable when called.
+ """
+ t = timer.Timer(seconds, cb, *args, **kw)
+ self.add_timer(t)
+ return t
+
+ def fire_timers(self, when):
+ t = self.timers
+ heappop = heapq.heappop
+
+ while t:
+ next = t[0]
+
+ exp = next[0]
+ timer = next[1]
+
+ if when < exp:
+ break
+
+ heappop(t)
+
+ try:
+ if timer.called:
+ self.timers_canceled -= 1
+ else:
+ timer()
+ except self.SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_timer_exception(timer, sys.exc_info())
+
+ # for debugging:
+
+ def get_readers(self):
+ return self.listeners[READ].values()
+
+ def get_writers(self):
+ return self.listeners[WRITE].values()
+
+ def get_timers_count(hub):
+ return len(hub.timers) + len(hub.next_timers)
+
+ def set_debug_listeners(self, value):
+ if value:
+ self.lclass = DebugListener
+ else:
+ self.lclass = FdListener
+
+ def set_timer_exceptions(self, value):
+ self.debug_exceptions = value
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/kqueue.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/kqueue.py
new file mode 100644
index 0000000..9502576
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/kqueue.py
@@ -0,0 +1,110 @@
+import os
+import sys
+from eventlet import patcher, support
+from eventlet.hubs import hub
+select = patcher.original('select')
+time = patcher.original('time')
+
+
+def is_available():
+ return hasattr(select, 'kqueue')
+
+
+class Hub(hub.BaseHub):
+ MAX_EVENTS = 100
+
+ def __init__(self, clock=None):
+ self.FILTERS = {
+ hub.READ: select.KQ_FILTER_READ,
+ hub.WRITE: select.KQ_FILTER_WRITE,
+ }
+ super().__init__(clock)
+ self._events = {}
+ self._init_kqueue()
+
+ def _init_kqueue(self):
+ self.kqueue = select.kqueue()
+ self._pid = os.getpid()
+
+ def _reinit_kqueue(self):
+ self.kqueue.close()
+ self._init_kqueue()
+ events = [e for i in self._events.values()
+ for e in i.values()]
+ self.kqueue.control(events, 0, 0)
+
+ def _control(self, events, max_events, timeout):
+ try:
+ return self.kqueue.control(events, max_events, timeout)
+ except OSError:
+ # have we forked?
+ if os.getpid() != self._pid:
+ self._reinit_kqueue()
+ return self.kqueue.control(events, max_events, timeout)
+ raise
+
+ def add(self, evtype, fileno, cb, tb, mac):
+ listener = super().add(evtype, fileno, cb, tb, mac)
+ events = self._events.setdefault(fileno, {})
+ if evtype not in events:
+ try:
+ event = select.kevent(fileno, self.FILTERS.get(evtype), select.KQ_EV_ADD)
+ self._control([event], 0, 0)
+ events[evtype] = event
+ except ValueError:
+ super().remove(listener)
+ raise
+ return listener
+
+ def _delete_events(self, events):
+ del_events = [
+ select.kevent(e.ident, e.filter, select.KQ_EV_DELETE)
+ for e in events
+ ]
+ self._control(del_events, 0, 0)
+
+ def remove(self, listener):
+ super().remove(listener)
+ evtype = listener.evtype
+ fileno = listener.fileno
+ if not self.listeners[evtype].get(fileno):
+ event = self._events[fileno].pop(evtype, None)
+ if event is None:
+ return
+ try:
+ self._delete_events((event,))
+ except OSError:
+ pass
+
+ def remove_descriptor(self, fileno):
+ super().remove_descriptor(fileno)
+ try:
+ events = self._events.pop(fileno).values()
+ self._delete_events(events)
+ except KeyError:
+ pass
+ except OSError:
+ pass
+
+ def wait(self, seconds=None):
+ readers = self.listeners[self.READ]
+ writers = self.listeners[self.WRITE]
+
+ if not readers and not writers:
+ if seconds:
+ time.sleep(seconds)
+ return
+ result = self._control([], self.MAX_EVENTS, seconds)
+ SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
+ for event in result:
+ fileno = event.ident
+ evfilt = event.filter
+ try:
+ if evfilt == select.KQ_FILTER_READ:
+ readers.get(fileno, hub.noop).cb(fileno)
+ if evfilt == select.KQ_FILTER_WRITE:
+ writers.get(fileno, hub.noop).cb(fileno)
+ except SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/poll.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/poll.py
new file mode 100644
index 0000000..0984214
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/poll.py
@@ -0,0 +1,118 @@
+import errno
+import sys
+
+from eventlet import patcher, support
+from eventlet.hubs import hub
+select = patcher.original('select')
+time = patcher.original('time')
+
+
+def is_available():
+ return hasattr(select, 'poll')
+
+
+class Hub(hub.BaseHub):
+ def __init__(self, clock=None):
+ super().__init__(clock)
+ self.EXC_MASK = select.POLLERR | select.POLLHUP
+ self.READ_MASK = select.POLLIN | select.POLLPRI
+ self.WRITE_MASK = select.POLLOUT
+ self.poll = select.poll()
+
+ def add(self, evtype, fileno, cb, tb, mac):
+ listener = super().add(evtype, fileno, cb, tb, mac)
+ self.register(fileno, new=True)
+ return listener
+
+ def remove(self, listener):
+ super().remove(listener)
+ self.register(listener.fileno)
+
+ def register(self, fileno, new=False):
+ mask = 0
+ if self.listeners[self.READ].get(fileno):
+ mask |= self.READ_MASK | self.EXC_MASK
+ if self.listeners[self.WRITE].get(fileno):
+ mask |= self.WRITE_MASK | self.EXC_MASK
+ try:
+ if mask:
+ if new:
+ self.poll.register(fileno, mask)
+ else:
+ try:
+ self.poll.modify(fileno, mask)
+ except OSError:
+ self.poll.register(fileno, mask)
+ else:
+ try:
+ self.poll.unregister(fileno)
+ except (KeyError, OSError):
+ # raised if we try to remove a fileno that was
+ # already removed/invalid
+ pass
+ except ValueError:
+ # fileno is bad, issue 74
+ self.remove_descriptor(fileno)
+ raise
+
+ def remove_descriptor(self, fileno):
+ super().remove_descriptor(fileno)
+ try:
+ self.poll.unregister(fileno)
+ except (KeyError, ValueError, OSError):
+ # raised if we try to remove a fileno that was
+ # already removed/invalid
+ pass
+
+ def do_poll(self, seconds):
+ # poll.poll expects integral milliseconds
+ return self.poll.poll(int(seconds * 1000.0))
+
+ def wait(self, seconds=None):
+ readers = self.listeners[self.READ]
+ writers = self.listeners[self.WRITE]
+
+ if not readers and not writers:
+ if seconds:
+ time.sleep(seconds)
+ return
+ try:
+ presult = self.do_poll(seconds)
+ except OSError as e:
+ if support.get_errno(e) == errno.EINTR:
+ return
+ raise
+ SYSTEM_EXCEPTIONS = self.SYSTEM_EXCEPTIONS
+
+ if self.debug_blocking:
+ self.block_detect_pre()
+
+ # Accumulate the listeners to call back to prior to
+ # triggering any of them. This is to keep the set
+ # of callbacks in sync with the events we've just
+ # polled for. It prevents one handler from invalidating
+ # another.
+ callbacks = set()
+ noop = hub.noop # shave getattr
+ for fileno, event in presult:
+ if event & self.READ_MASK:
+ callbacks.add((readers.get(fileno, noop), fileno))
+ if event & self.WRITE_MASK:
+ callbacks.add((writers.get(fileno, noop), fileno))
+ if event & select.POLLNVAL:
+ self.remove_descriptor(fileno)
+ continue
+ if event & self.EXC_MASK:
+ callbacks.add((readers.get(fileno, noop), fileno))
+ callbacks.add((writers.get(fileno, noop), fileno))
+
+ for listener, fileno in callbacks:
+ try:
+ listener.cb(fileno)
+ except SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
+
+ if self.debug_blocking:
+ self.block_detect_post()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/pyevent.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/pyevent.py
new file mode 100644
index 0000000..0802243
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/pyevent.py
@@ -0,0 +1,4 @@
+raise ImportError(
+ "Eventlet pyevent hub was removed because it was not maintained."
+ " Try version 0.22.1 or older. Sorry for the inconvenience."
+)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/selects.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/selects.py
new file mode 100644
index 0000000..b6cf129
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/selects.py
@@ -0,0 +1,63 @@
+import errno
+import sys
+from eventlet import patcher, support
+from eventlet.hubs import hub
+select = patcher.original('select')
+time = patcher.original('time')
+
+try:
+ BAD_SOCK = {errno.EBADF, errno.WSAENOTSOCK}
+except AttributeError:
+ BAD_SOCK = {errno.EBADF}
+
+
+def is_available():
+ return hasattr(select, 'select')
+
+
+class Hub(hub.BaseHub):
+ def _remove_bad_fds(self):
+ """ Iterate through fds, removing the ones that are bad per the
+ operating system.
+ """
+ all_fds = list(self.listeners[self.READ]) + list(self.listeners[self.WRITE])
+ for fd in all_fds:
+ try:
+ select.select([fd], [], [], 0)
+ except OSError as e:
+ if support.get_errno(e) in BAD_SOCK:
+ self.remove_descriptor(fd)
+
+ def wait(self, seconds=None):
+ readers = self.listeners[self.READ]
+ writers = self.listeners[self.WRITE]
+ if not readers and not writers:
+ if seconds:
+ time.sleep(seconds)
+ return
+ reader_fds = list(readers)
+ writer_fds = list(writers)
+ all_fds = reader_fds + writer_fds
+ try:
+ r, w, er = select.select(reader_fds, writer_fds, all_fds, seconds)
+ except OSError as e:
+ if support.get_errno(e) == errno.EINTR:
+ return
+ elif support.get_errno(e) in BAD_SOCK:
+ self._remove_bad_fds()
+ return
+ else:
+ raise
+
+ for fileno in er:
+ readers.get(fileno, hub.noop).cb(fileno)
+ writers.get(fileno, hub.noop).cb(fileno)
+
+ for listeners, events in ((readers, r), (writers, w)):
+ for fileno in events:
+ try:
+ listeners.get(fileno, hub.noop).cb(fileno)
+ except self.SYSTEM_EXCEPTIONS:
+ raise
+ except:
+ self.squelch_exception(fileno, sys.exc_info())
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/hubs/timer.py b/tapdown/lib/python3.11/site-packages/eventlet/hubs/timer.py
new file mode 100644
index 0000000..2e3fd95
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/hubs/timer.py
@@ -0,0 +1,106 @@
+import traceback
+
+import eventlet.hubs
+from eventlet.support import greenlets as greenlet
+import io
+
+""" If true, captures a stack trace for each timer when constructed. This is
+useful for debugging leaking timers, to find out where the timer was set up. """
+_g_debug = False
+
+
+class Timer:
+ def __init__(self, seconds, cb, *args, **kw):
+ """Create a timer.
+ seconds: The minimum number of seconds to wait before calling
+ cb: The callback to call when the timer has expired
+ *args: The arguments to pass to cb
+ **kw: The keyword arguments to pass to cb
+
+ This timer will not be run unless it is scheduled in a runloop by
+ calling timer.schedule() or runloop.add_timer(timer).
+ """
+ self.seconds = seconds
+ self.tpl = cb, args, kw
+ self.called = False
+ if _g_debug:
+ self.traceback = io.StringIO()
+ traceback.print_stack(file=self.traceback)
+
+ @property
+ def pending(self):
+ return not self.called
+
+ def __repr__(self):
+ secs = getattr(self, 'seconds', None)
+ cb, args, kw = getattr(self, 'tpl', (None, None, None))
+ retval = "Timer(%s, %s, *%s, **%s)" % (
+ secs, cb, args, kw)
+ if _g_debug and hasattr(self, 'traceback'):
+ retval += '\n' + self.traceback.getvalue()
+ return retval
+
+ def copy(self):
+ cb, args, kw = self.tpl
+ return self.__class__(self.seconds, cb, *args, **kw)
+
+ def schedule(self):
+ """Schedule this timer to run in the current runloop.
+ """
+ self.called = False
+ self.scheduled_time = eventlet.hubs.get_hub().add_timer(self)
+ return self
+
+ def __call__(self, *args):
+ if not self.called:
+ self.called = True
+ cb, args, kw = self.tpl
+ try:
+ cb(*args, **kw)
+ finally:
+ try:
+ del self.tpl
+ except AttributeError:
+ pass
+
+ def cancel(self):
+ """Prevent this timer from being called. If the timer has already
+ been called or canceled, has no effect.
+ """
+ if not self.called:
+ self.called = True
+ eventlet.hubs.get_hub().timer_canceled(self)
+ try:
+ del self.tpl
+ except AttributeError:
+ pass
+
+ # No default ordering in 3.x. heapq uses <
+ # FIXME should full set be added?
+ def __lt__(self, other):
+ return id(self) < id(other)
+
+
+class LocalTimer(Timer):
+
+ def __init__(self, *args, **kwargs):
+ self.greenlet = greenlet.getcurrent()
+ Timer.__init__(self, *args, **kwargs)
+
+ @property
+ def pending(self):
+ if self.greenlet is None or self.greenlet.dead:
+ return False
+ return not self.called
+
+ def __call__(self, *args):
+ if not self.called:
+ self.called = True
+ if self.greenlet is not None and self.greenlet.dead:
+ return
+ cb, args, kw = self.tpl
+ cb(*args, **kw)
+
+ def cancel(self):
+ self.greenlet = None
+ Timer.cancel(self)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/lock.py b/tapdown/lib/python3.11/site-packages/eventlet/lock.py
new file mode 100644
index 0000000..4b21e0b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/lock.py
@@ -0,0 +1,37 @@
+from eventlet import hubs
+from eventlet.semaphore import Semaphore
+
+
+class Lock(Semaphore):
+
+ """A lock.
+ This is API-compatible with :class:`threading.Lock`.
+
+ It is a context manager, and thus can be used in a with block::
+
+ lock = Lock()
+ with lock:
+ do_some_stuff()
+ """
+
+ def release(self, blocking=True):
+ """Modify behaviour vs :class:`Semaphore` to raise a RuntimeError
+ exception if the value is greater than zero. This corrects behaviour
+ to realign with :class:`threading.Lock`.
+ """
+ if self.counter > 0:
+ raise RuntimeError("release unlocked lock")
+
+ # Consciously *do not* call super().release(), but instead inline
+ # Semaphore.release() here. We've seen issues with logging._lock
+ # deadlocking because garbage collection happened to run mid-release
+ # and eliminating the extra stack frame should help prevent that.
+ # See https://github.com/eventlet/eventlet/issues/742
+ self.counter += 1
+ if self._waiters:
+ hubs.get_hub().schedule_call_global(0, self._do_acquire)
+ return True
+
+ def _at_fork_reinit(self):
+ self.counter = 1
+ self._waiters.clear()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/patcher.py b/tapdown/lib/python3.11/site-packages/eventlet/patcher.py
new file mode 100644
index 0000000..12d8069
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/patcher.py
@@ -0,0 +1,773 @@
+from __future__ import annotations
+
+try:
+ import _imp as imp
+except ImportError:
+ import imp
+import importlib
+import sys
+
+try:
+ # Only for this purpose, it's irrelevant if `os` was already patched.
+ # https://github.com/eventlet/eventlet/pull/661
+ from os import register_at_fork
+except ImportError:
+ register_at_fork = None
+
+import eventlet
+
+
+__all__ = ["inject", "import_patched", "monkey_patch", "is_monkey_patched"]
+
+__exclude = {"__builtins__", "__file__", "__name__"}
+
+
+class SysModulesSaver:
+ """Class that captures some subset of the current state of
+ sys.modules. Pass in an iterator of module names to the
+ constructor."""
+
+ def __init__(self, module_names=()):
+ self._saved = {}
+ imp.acquire_lock()
+ self.save(*module_names)
+
+ def save(self, *module_names):
+ """Saves the named modules to the object."""
+ for modname in module_names:
+ self._saved[modname] = sys.modules.get(modname, None)
+
+ def restore(self):
+ """Restores the modules that the saver knows about into
+ sys.modules.
+ """
+ try:
+ for modname, mod in self._saved.items():
+ if mod is not None:
+ sys.modules[modname] = mod
+ else:
+ try:
+ del sys.modules[modname]
+ except KeyError:
+ pass
+ finally:
+ imp.release_lock()
+
+
+def inject(module_name, new_globals, *additional_modules):
+ """Base method for "injecting" greened modules into an imported module. It
+ imports the module specified in *module_name*, arranging things so
+ that the already-imported modules in *additional_modules* are used when
+ *module_name* makes its imports.
+
+ **Note:** This function does not create or change any sys.modules item, so
+ if your greened module use code like 'sys.modules["your_module_name"]', you
+ need to update sys.modules by yourself.
+
+ *new_globals* is either None or a globals dictionary that gets populated
+ with the contents of the *module_name* module. This is useful when creating
+ a "green" version of some other module.
+
+ *additional_modules* should be a collection of two-element tuples, of the
+ form (, ). If it's not specified, a default selection of
+ name/module pairs is used, which should cover all use cases but may be
+ slower because there are inevitably redundant or unnecessary imports.
+ """
+ patched_name = "__patched_module_" + module_name
+ if patched_name in sys.modules:
+ # returning already-patched module so as not to destroy existing
+ # references to patched modules
+ return sys.modules[patched_name]
+
+ if not additional_modules:
+ # supply some defaults
+ additional_modules = (
+ _green_os_modules()
+ + _green_select_modules()
+ + _green_socket_modules()
+ + _green_thread_modules()
+ + _green_time_modules()
+ )
+ # _green_MySQLdb()) # enable this after a short baking-in period
+
+ # after this we are gonna screw with sys.modules, so capture the
+ # state of all the modules we're going to mess with, and lock
+ saver = SysModulesSaver([name for name, m in additional_modules])
+ saver.save(module_name)
+
+ # Cover the target modules so that when you import the module it
+ # sees only the patched versions
+ for name, mod in additional_modules:
+ sys.modules[name] = mod
+
+ # Remove the old module from sys.modules and reimport it while
+ # the specified modules are in place
+ sys.modules.pop(module_name, None)
+ # Also remove sub modules and reimport. Use copy the keys to list
+ # because of the pop operations will change the content of sys.modules
+ # within th loop
+ for imported_module_name in list(sys.modules.keys()):
+ if imported_module_name.startswith(module_name + "."):
+ sys.modules.pop(imported_module_name, None)
+ try:
+ module = __import__(module_name, {}, {}, module_name.split(".")[:-1])
+
+ if new_globals is not None:
+ # Update the given globals dictionary with everything from this new module
+ for name in dir(module):
+ if name not in __exclude:
+ new_globals[name] = getattr(module, name)
+
+ # Keep a reference to the new module to prevent it from dying
+ sys.modules[patched_name] = module
+ finally:
+ saver.restore() # Put the original modules back
+
+ return module
+
+
+def import_patched(module_name, *additional_modules, **kw_additional_modules):
+ """Imports a module in a way that ensures that the module uses "green"
+ versions of the standard library modules, so that everything works
+ nonblockingly.
+
+ The only required argument is the name of the module to be imported.
+ """
+ return inject(
+ module_name, None, *additional_modules + tuple(kw_additional_modules.items())
+ )
+
+
+def patch_function(func, *additional_modules):
+ """Decorator that returns a version of the function that patches
+ some modules for the duration of the function call. This is
+ deeply gross and should only be used for functions that import
+ network libraries within their function bodies that there is no
+ way of getting around."""
+ if not additional_modules:
+ # supply some defaults
+ additional_modules = (
+ _green_os_modules()
+ + _green_select_modules()
+ + _green_socket_modules()
+ + _green_thread_modules()
+ + _green_time_modules()
+ )
+
+ def patched(*args, **kw):
+ saver = SysModulesSaver()
+ for name, mod in additional_modules:
+ saver.save(name)
+ sys.modules[name] = mod
+ try:
+ return func(*args, **kw)
+ finally:
+ saver.restore()
+
+ return patched
+
+
+def _original_patch_function(func, *module_names):
+ """Kind of the contrapositive of patch_function: decorates a
+ function such that when it's called, sys.modules is populated only
+ with the unpatched versions of the specified modules. Unlike
+ patch_function, only the names of the modules need be supplied,
+ and there are no defaults. This is a gross hack; tell your kids not
+ to import inside function bodies!"""
+
+ def patched(*args, **kw):
+ saver = SysModulesSaver(module_names)
+ for name in module_names:
+ sys.modules[name] = original(name)
+ try:
+ return func(*args, **kw)
+ finally:
+ saver.restore()
+
+ return patched
+
+
+def original(modname):
+ """This returns an unpatched version of a module; this is useful for
+ Eventlet itself (i.e. tpool)."""
+ # note that it's not necessary to temporarily install unpatched
+ # versions of all patchable modules during the import of the
+ # module; this is because none of them import each other, except
+ # for threading which imports thread
+ original_name = "__original_module_" + modname
+ if original_name in sys.modules:
+ return sys.modules.get(original_name)
+
+ # re-import the "pure" module and store it in the global _originals
+ # dict; be sure to restore whatever module had that name already
+ saver = SysModulesSaver((modname,))
+ sys.modules.pop(modname, None)
+ # some rudimentary dependency checking -- fortunately the modules
+ # we're working on don't have many dependencies so we can just do
+ # some special-casing here
+ deps = {"threading": "_thread", "queue": "threading"}
+ if modname in deps:
+ dependency = deps[modname]
+ saver.save(dependency)
+ sys.modules[dependency] = original(dependency)
+ try:
+ real_mod = __import__(modname, {}, {}, modname.split(".")[:-1])
+ if modname in ("Queue", "queue") and not hasattr(real_mod, "_threading"):
+ # tricky hack: Queue's constructor in <2.7 imports
+ # threading on every instantiation; therefore we wrap
+ # it so that it always gets the original threading
+ real_mod.Queue.__init__ = _original_patch_function(
+ real_mod.Queue.__init__, "threading"
+ )
+ # save a reference to the unpatched module so it doesn't get lost
+ sys.modules[original_name] = real_mod
+ finally:
+ saver.restore()
+
+ return sys.modules[original_name]
+
+
+already_patched = {}
+
+
+def _unmonkey_patch_asyncio(unmonkeypatch_refs_to_this_module):
+ """
+ When using asyncio hub, we want the asyncio modules to use the original,
+ blocking APIs. So un-monkeypatch references to the given module name, e.g.
+ "select".
+ """
+ to_unpatch = unmonkeypatch_refs_to_this_module
+ original_module = original(to_unpatch)
+
+ # Lower down for asyncio modules, we will switch their imported modules to
+ # original ones instead of the green ones they probably have. This won't
+ # fix "from socket import whatev" but asyncio doesn't seem to do that in
+ # ways we care about for Python 3.8 to 3.13, with the one exception of
+ # get_ident() in some older versions.
+ if to_unpatch == "_thread":
+ import asyncio.base_futures
+
+ if hasattr(asyncio.base_futures, "get_ident"):
+ asyncio.base_futures = original_module.get_ident
+
+ # Asyncio uses these for its blocking thread pool:
+ if to_unpatch in ("threading", "queue"):
+ try:
+ import concurrent.futures.thread
+ except RuntimeError:
+ # This happens in weird edge cases where asyncio hub is started at
+ # shutdown. Not much we can do if this happens.
+ pass
+ else:
+ if to_unpatch == "threading":
+ concurrent.futures.thread.threading = original_module
+ if to_unpatch == "queue":
+ concurrent.futures.thread.queue = original_module
+
+ # Patch asyncio modules:
+ for module_name in [
+ "asyncio.base_events",
+ "asyncio.base_futures",
+ "asyncio.base_subprocess",
+ "asyncio.base_tasks",
+ "asyncio.constants",
+ "asyncio.coroutines",
+ "asyncio.events",
+ "asyncio.exceptions",
+ "asyncio.format_helpers",
+ "asyncio.futures",
+ "asyncio",
+ "asyncio.locks",
+ "asyncio.log",
+ "asyncio.mixins",
+ "asyncio.protocols",
+ "asyncio.queues",
+ "asyncio.runners",
+ "asyncio.selector_events",
+ "asyncio.sslproto",
+ "asyncio.staggered",
+ "asyncio.streams",
+ "asyncio.subprocess",
+ "asyncio.taskgroups",
+ "asyncio.tasks",
+ "asyncio.threads",
+ "asyncio.timeouts",
+ "asyncio.transports",
+ "asyncio.trsock",
+ "asyncio.unix_events",
+ ]:
+ try:
+ module = importlib.import_module(module_name)
+ except ImportError:
+ # The list is from Python 3.13, so some modules may not be present
+ # in older versions of Python:
+ continue
+ if getattr(module, to_unpatch, None) is sys.modules[to_unpatch]:
+ setattr(module, to_unpatch, original_module)
+
+
+def _unmonkey_patch_asyncio_all():
+ """
+ Unmonkey-patch all referred-to modules in asyncio.
+ """
+ for module_name, _ in sum([
+ _green_os_modules(),
+ _green_select_modules(),
+ _green_socket_modules(),
+ _green_thread_modules(),
+ _green_time_modules(),
+ _green_builtins(),
+ _green_subprocess_modules(),
+ ], []):
+ _unmonkey_patch_asyncio(module_name)
+ original("selectors").select = original("select")
+
+
+def monkey_patch(**on):
+ """Globally patches certain system modules to be greenthread-friendly.
+
+ The keyword arguments afford some control over which modules are patched.
+ If no keyword arguments are supplied, all possible modules are patched.
+ If keywords are set to True, only the specified modules are patched. E.g.,
+ ``monkey_patch(socket=True, select=True)`` patches only the select and
+ socket modules. Most arguments patch the single module of the same name
+ (os, time, select). The exceptions are socket, which also patches the ssl
+ module if present; and thread, which patches thread, threading, and Queue.
+
+ It's safe to call monkey_patch multiple times.
+ """
+
+ # Workaround for import cycle observed as following in monotonic
+ # RuntimeError: no suitable implementation for this system
+ # see https://github.com/eventlet/eventlet/issues/401#issuecomment-325015989
+ #
+ # Make sure the hub is completely imported before any
+ # monkey-patching, or we risk recursion if the process of importing
+ # the hub calls into monkey-patched modules.
+ eventlet.hubs.get_hub()
+
+ accepted_args = {
+ "os",
+ "select",
+ "socket",
+ "thread",
+ "time",
+ "psycopg",
+ "MySQLdb",
+ "builtins",
+ "subprocess",
+ }
+ # To make sure only one of them is passed here
+ assert not ("__builtin__" in on and "builtins" in on)
+ try:
+ b = on.pop("__builtin__")
+ except KeyError:
+ pass
+ else:
+ on["builtins"] = b
+
+ default_on = on.pop("all", None)
+
+ for k in on.keys():
+ if k not in accepted_args:
+ raise TypeError(
+ "monkey_patch() got an unexpected " "keyword argument %r" % k
+ )
+ if default_on is None:
+ default_on = True not in on.values()
+ for modname in accepted_args:
+ if modname == "MySQLdb":
+ # MySQLdb is only on when explicitly patched for the moment
+ on.setdefault(modname, False)
+ if modname == "builtins":
+ on.setdefault(modname, False)
+ on.setdefault(modname, default_on)
+
+ import threading
+
+ original_rlock_type = type(threading.RLock())
+
+ modules_to_patch = []
+ for name, modules_function in [
+ ("os", _green_os_modules),
+ ("select", _green_select_modules),
+ ("socket", _green_socket_modules),
+ ("thread", _green_thread_modules),
+ ("time", _green_time_modules),
+ ("MySQLdb", _green_MySQLdb),
+ ("builtins", _green_builtins),
+ ("subprocess", _green_subprocess_modules),
+ ]:
+ if on[name] and not already_patched.get(name):
+ modules_to_patch += modules_function()
+ already_patched[name] = True
+
+ if on["psycopg"] and not already_patched.get("psycopg"):
+ try:
+ from eventlet.support import psycopg2_patcher
+
+ psycopg2_patcher.make_psycopg_green()
+ already_patched["psycopg"] = True
+ except ImportError:
+ # note that if we get an importerror from trying to
+ # monkeypatch psycopg, we will continually retry it
+ # whenever monkey_patch is called; this should not be a
+ # performance problem but it allows is_monkey_patched to
+ # tell us whether or not we succeeded
+ pass
+
+ _threading = original("threading")
+ imp.acquire_lock()
+ try:
+ for name, mod in modules_to_patch:
+ orig_mod = sys.modules.get(name)
+ if orig_mod is None:
+ orig_mod = __import__(name)
+ for attr_name in mod.__patched__:
+ patched_attr = getattr(mod, attr_name, None)
+ if patched_attr is not None:
+ setattr(orig_mod, attr_name, patched_attr)
+ deleted = getattr(mod, "__deleted__", [])
+ for attr_name in deleted:
+ if hasattr(orig_mod, attr_name):
+ delattr(orig_mod, attr_name)
+
+ if name == "threading" and register_at_fork:
+ # The whole post-fork processing in stdlib threading.py,
+ # implemented in threading._after_fork(), is based on the
+ # assumption that threads don't survive fork(). However, green
+ # threads do survive fork, and that's what threading.py is
+ # tracking when using eventlet, so there's no need to do any
+ # post-fork cleanup in this case.
+ #
+ # So, we wipe out _after_fork()'s code so it does nothing. We
+ # can't just override it because it has already been registered
+ # with os.register_after_fork().
+ def noop():
+ pass
+ orig_mod._after_fork.__code__ = noop.__code__
+ inject("threading", {})._after_fork.__code__ = noop.__code__
+ finally:
+ imp.release_lock()
+
+ import importlib._bootstrap
+
+ thread = original("_thread")
+ # importlib must use real thread locks, not eventlet.Semaphore
+ importlib._bootstrap._thread = thread
+
+ # Issue #185: Since Python 3.3, threading.RLock is implemented in C and
+ # so call a C function to get the thread identifier, instead of calling
+ # threading.get_ident(). Force the Python implementation of RLock which
+ # calls threading.get_ident() and so is compatible with eventlet.
+ import threading
+
+ threading.RLock = threading._PyRLock
+
+ # Issue #508: Since Python 3.7 queue.SimpleQueue is implemented in C,
+ # causing a deadlock. Replace the C implementation with the Python one.
+ import queue
+
+ queue.SimpleQueue = queue._PySimpleQueue
+
+ # Green existing locks _after_ patching modules, since patching modules
+ # might involve imports that create new locks:
+ for name, _ in modules_to_patch:
+ if name == "threading":
+ _green_existing_locks(original_rlock_type)
+
+
+def is_monkey_patched(module):
+ """Returns True if the given module is monkeypatched currently, False if
+ not. *module* can be either the module itself or its name.
+
+ Based entirely off the name of the module, so if you import a
+ module some other way than with the import keyword (including
+ import_patched), this might not be correct about that particular
+ module."""
+ return (
+ module in already_patched
+ or getattr(module, "__name__", None) in already_patched
+ )
+
+
+def _green_existing_locks(rlock_type):
+ """Make locks created before monkey-patching safe.
+
+ RLocks rely on a Lock and on Python 2, if an unpatched Lock blocks, it
+ blocks the native thread. We need to replace these with green Locks.
+
+ This was originally noticed in the stdlib logging module."""
+ import gc
+ import os
+ import eventlet.green.thread
+
+ # We're monkey-patching so there can't be any greenlets yet, ergo our thread
+ # ID is the only valid owner possible.
+ tid = eventlet.green.thread.get_ident()
+
+ # Now, upgrade all instances:
+ def upgrade(old_lock):
+ return _convert_py3_rlock(old_lock, tid)
+
+ _upgrade_instances(sys.modules, rlock_type, upgrade)
+
+ # Report if there are RLocks we couldn't upgrade. For cases where we're
+ # using coverage.py in parent process, and more generally for tests in
+ # general, this is difficult to ensure, so just don't complain in that case.
+ if "PYTEST_CURRENT_TEST" in os.environ:
+ return
+ # On older Pythons (< 3.10), gc.get_objects() won't return any RLock
+ # instances, so this warning won't get logged on older Pythons. However,
+ # it's a useful warning, so we try to do it anyway for the benefit of those
+ # users on 3.10 or later.
+ gc.collect()
+ remaining_rlocks = 0
+ for o in gc.get_objects():
+ try:
+ if isinstance(o, rlock_type):
+ remaining_rlocks += 1
+ except ReferenceError as exc:
+ import logging
+ import traceback
+
+ logger = logging.Logger("eventlet")
+ logger.error(
+ "Not increase rlock count, an exception of type "
+ + type(exc).__name__ + "occurred with the message '"
+ + str(exc) + "'. Traceback details: "
+ + traceback.format_exc()
+ )
+ if remaining_rlocks:
+ try:
+ import _frozen_importlib
+ except ImportError:
+ pass
+ else:
+ for o in gc.get_objects():
+ # This can happen in Python 3.12, at least, if monkey patch
+ # happened as side-effect of importing a module.
+ try:
+ if not isinstance(o, rlock_type):
+ continue
+ except ReferenceError as exc:
+ import logging
+ import traceback
+
+ logger = logging.Logger("eventlet")
+ logger.error(
+ "No decrease rlock count, an exception of type "
+ + type(exc).__name__ + "occurred with the message '"
+ + str(exc) + "'. Traceback details: "
+ + traceback.format_exc()
+ )
+ continue # if ReferenceError, skip this object and continue with the next one.
+ if _frozen_importlib._ModuleLock in map(type, gc.get_referrers(o)):
+ remaining_rlocks -= 1
+ del o
+
+ if remaining_rlocks:
+ import logging
+
+ logger = logging.Logger("eventlet")
+ logger.error(
+ "{} RLock(s) were not greened,".format(remaining_rlocks)
+ + " to fix this error make sure you run eventlet.monkey_patch() "
+ + "before importing any other modules."
+ )
+
+
+def _upgrade_instances(container, klass, upgrade, visited=None, old_to_new=None):
+ """
+ Starting with a Python object, find all instances of ``klass``, following
+ references in ``dict`` values, ``list`` items, and attributes.
+
+ Once an object is found, replace all instances with
+ ``upgrade(found_object)``, again limited to the criteria above.
+
+ In practice this is used only for ``threading.RLock``, so we can assume
+ instances are hashable.
+ """
+ if visited is None:
+ visited = {} # map id(obj) to obj
+ if old_to_new is None:
+ old_to_new = {} # map old klass instance to upgrade(old)
+
+ # Handle circular references:
+ visited[id(container)] = container
+
+ def upgrade_or_traverse(obj):
+ if id(obj) in visited:
+ return None
+ if isinstance(obj, klass):
+ if obj in old_to_new:
+ return old_to_new[obj]
+ else:
+ new = upgrade(obj)
+ old_to_new[obj] = new
+ return new
+ else:
+ _upgrade_instances(obj, klass, upgrade, visited, old_to_new)
+ return None
+
+ if isinstance(container, dict):
+ for k, v in list(container.items()):
+ new = upgrade_or_traverse(v)
+ if new is not None:
+ container[k] = new
+ if isinstance(container, list):
+ for i, v in enumerate(container):
+ new = upgrade_or_traverse(v)
+ if new is not None:
+ container[i] = new
+ try:
+ container_vars = vars(container)
+ except TypeError:
+ pass
+ else:
+ # If we get here, we're operating on an object that could
+ # be doing strange things. If anything bad happens, error and
+ # warn the eventlet user to monkey_patch earlier.
+ try:
+ for k, v in list(container_vars.items()):
+ new = upgrade_or_traverse(v)
+ if new is not None:
+ setattr(container, k, new)
+ except:
+ import logging
+
+ logger = logging.Logger("eventlet")
+ logger.exception(
+ "An exception was thrown while monkey_patching for eventlet. "
+ "to fix this error make sure you run eventlet.monkey_patch() "
+ "before importing any other modules.",
+ exc_info=True,
+ )
+
+
+def _convert_py3_rlock(old, tid):
+ """
+ Convert a normal RLock to one implemented in Python.
+
+ This is necessary to make RLocks work with eventlet, but also introduces
+ bugs, e.g. https://bugs.python.org/issue13697. So more of a downgrade,
+ really.
+ """
+ import threading
+ from eventlet.green.thread import allocate_lock
+
+ new = threading._PyRLock()
+ if not hasattr(new, "_block") or not hasattr(new, "_owner"):
+ # These will only fail if Python changes its internal implementation of
+ # _PyRLock:
+ raise RuntimeError(
+ "INTERNAL BUG. Perhaps you are using a major version "
+ + "of Python that is unsupported by eventlet? Please file a bug "
+ + "at https://github.com/eventlet/eventlet/issues/new"
+ )
+ new._block = allocate_lock()
+ acquired = False
+ while old._is_owned():
+ old.release()
+ new.acquire()
+ acquired = True
+ if old._is_owned():
+ new.acquire()
+ acquired = True
+ if acquired:
+ new._owner = tid
+ return new
+
+
+def _green_os_modules():
+ from eventlet.green import os
+
+ return [("os", os)]
+
+
+def _green_select_modules():
+ from eventlet.green import select
+
+ modules = [("select", select)]
+
+ from eventlet.green import selectors
+
+ modules.append(("selectors", selectors))
+
+ return modules
+
+
+def _green_socket_modules():
+ from eventlet.green import socket
+
+ try:
+ from eventlet.green import ssl
+
+ return [("socket", socket), ("ssl", ssl)]
+ except ImportError:
+ return [("socket", socket)]
+
+
+def _green_subprocess_modules():
+ from eventlet.green import subprocess
+
+ return [("subprocess", subprocess)]
+
+
+def _green_thread_modules():
+ from eventlet.green import Queue
+ from eventlet.green import thread
+ from eventlet.green import threading
+
+ return [("queue", Queue), ("_thread", thread), ("threading", threading)]
+
+
+def _green_time_modules():
+ from eventlet.green import time
+
+ return [("time", time)]
+
+
+def _green_MySQLdb():
+ try:
+ from eventlet.green import MySQLdb
+
+ return [("MySQLdb", MySQLdb)]
+ except ImportError:
+ return []
+
+
+def _green_builtins():
+ try:
+ from eventlet.green import builtin
+
+ return [("builtins", builtin)]
+ except ImportError:
+ return []
+
+
+def slurp_properties(source, destination, ignore=[], srckeys=None):
+ """Copy properties from *source* (assumed to be a module) to
+ *destination* (assumed to be a dict).
+
+ *ignore* lists properties that should not be thusly copied.
+ *srckeys* is a list of keys to copy, if the source's __all__ is
+ untrustworthy.
+ """
+ if srckeys is None:
+ srckeys = source.__all__
+ destination.update(
+ {
+ name: getattr(source, name)
+ for name in srckeys
+ if not (name.startswith("__") or name in ignore)
+ }
+ )
+
+
+if __name__ == "__main__":
+ sys.argv.pop(0)
+ monkey_patch()
+ with open(sys.argv[0]) as f:
+ code = compile(f.read(), sys.argv[0], "exec")
+ exec(code)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/pools.py b/tapdown/lib/python3.11/site-packages/eventlet/pools.py
new file mode 100644
index 0000000..a65f174
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/pools.py
@@ -0,0 +1,184 @@
+import collections
+from contextlib import contextmanager
+
+from eventlet import queue
+
+
+__all__ = ['Pool', 'TokenPool']
+
+
+class Pool:
+ """
+ Pool class implements resource limitation and construction.
+
+ There are two ways of using Pool: passing a `create` argument or
+ subclassing. In either case you must provide a way to create
+ the resource.
+
+ When using `create` argument, pass a function with no arguments::
+
+ http_pool = pools.Pool(create=httplib2.Http)
+
+ If you need to pass arguments, build a nullary function with either
+ `lambda` expression::
+
+ http_pool = pools.Pool(create=lambda: httplib2.Http(timeout=90))
+
+ or :func:`functools.partial`::
+
+ from functools import partial
+ http_pool = pools.Pool(create=partial(httplib2.Http, timeout=90))
+
+ When subclassing, define only the :meth:`create` method
+ to implement the desired resource::
+
+ class MyPool(pools.Pool):
+ def create(self):
+ return MyObject()
+
+ If using 2.5 or greater, the :meth:`item` method acts as a context manager;
+ that's the best way to use it::
+
+ with mypool.item() as thing:
+ thing.dostuff()
+
+ The maximum size of the pool can be modified at runtime via
+ the :meth:`resize` method.
+
+ Specifying a non-zero *min-size* argument pre-populates the pool with
+ *min_size* items. *max-size* sets a hard limit to the size of the pool --
+ it cannot contain any more items than *max_size*, and if there are already
+ *max_size* items 'checked out' of the pool, the pool will cause any
+ greenthread calling :meth:`get` to cooperatively yield until an item
+ is :meth:`put` in.
+ """
+
+ def __init__(self, min_size=0, max_size=4, order_as_stack=False, create=None):
+ """*order_as_stack* governs the ordering of the items in the free pool.
+ If ``False`` (the default), the free items collection (of items that
+ were created and were put back in the pool) acts as a round-robin,
+ giving each item approximately equal utilization. If ``True``, the
+ free pool acts as a FILO stack, which preferentially re-uses items that
+ have most recently been used.
+ """
+ self.min_size = min_size
+ self.max_size = max_size
+ self.order_as_stack = order_as_stack
+ self.current_size = 0
+ self.channel = queue.LightQueue(0)
+ self.free_items = collections.deque()
+ if create is not None:
+ self.create = create
+
+ for x in range(min_size):
+ self.current_size += 1
+ self.free_items.append(self.create())
+
+ def get(self):
+ """Return an item from the pool, when one is available. This may
+ cause the calling greenthread to block.
+ """
+ if self.free_items:
+ return self.free_items.popleft()
+ self.current_size += 1
+ if self.current_size <= self.max_size:
+ try:
+ created = self.create()
+ except:
+ self.current_size -= 1
+ raise
+ return created
+ self.current_size -= 1 # did not create
+ return self.channel.get()
+
+ @contextmanager
+ def item(self):
+ """ Get an object out of the pool, for use with with statement.
+
+ >>> from eventlet import pools
+ >>> pool = pools.TokenPool(max_size=4)
+ >>> with pool.item() as obj:
+ ... print("got token")
+ ...
+ got token
+ >>> pool.free()
+ 4
+ """
+ obj = self.get()
+ try:
+ yield obj
+ finally:
+ self.put(obj)
+
+ def put(self, item):
+ """Put an item back into the pool, when done. This may
+ cause the putting greenthread to block.
+ """
+ if self.current_size > self.max_size:
+ self.current_size -= 1
+ return
+
+ if self.waiting():
+ try:
+ self.channel.put(item, block=False)
+ return
+ except queue.Full:
+ pass
+
+ if self.order_as_stack:
+ self.free_items.appendleft(item)
+ else:
+ self.free_items.append(item)
+
+ def resize(self, new_size):
+ """Resize the pool to *new_size*.
+
+ Adjusting this number does not affect existing items checked out of
+ the pool, nor on any greenthreads who are waiting for an item to free
+ up. Some indeterminate number of :meth:`get`/:meth:`put`
+ cycles will be necessary before the new maximum size truly matches
+ the actual operation of the pool.
+ """
+ self.max_size = new_size
+
+ def free(self):
+ """Return the number of free items in the pool. This corresponds
+ to the number of :meth:`get` calls needed to empty the pool.
+ """
+ return len(self.free_items) + self.max_size - self.current_size
+
+ def waiting(self):
+ """Return the number of routines waiting for a pool item.
+ """
+ return max(0, self.channel.getting() - self.channel.putting())
+
+ def create(self):
+ """Generate a new pool item. In order for the pool to
+ function, either this method must be overriden in a subclass
+ or the pool must be constructed with the `create` argument.
+ It accepts no arguments and returns a single instance of
+ whatever thing the pool is supposed to contain.
+
+ In general, :meth:`create` is called whenever the pool exceeds its
+ previous high-water mark of concurrently-checked-out-items. In other
+ words, in a new pool with *min_size* of 0, the very first call
+ to :meth:`get` will result in a call to :meth:`create`. If the first
+ caller calls :meth:`put` before some other caller calls :meth:`get`,
+ then the first item will be returned, and :meth:`create` will not be
+ called a second time.
+ """
+ raise NotImplementedError("Implement in subclass")
+
+
+class Token:
+ pass
+
+
+class TokenPool(Pool):
+ """A pool which gives out tokens (opaque unique objects), which indicate
+ that the coroutine which holds the token has a right to consume some
+ limited resource.
+ """
+
+ def create(self):
+ return Token()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/queue.py b/tapdown/lib/python3.11/site-packages/eventlet/queue.py
new file mode 100644
index 0000000..d3bd4dc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/queue.py
@@ -0,0 +1,496 @@
+# Copyright (c) 2009 Denis Bilenko, denis.bilenko at gmail com
+# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
+# and licensed under the MIT license:
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+"""Synchronized queues.
+
+The :mod:`eventlet.queue` module implements multi-producer, multi-consumer
+queues that work across greenlets, with the API similar to the classes found in
+the standard :mod:`Queue` and :class:`multiprocessing `
+modules.
+
+A major difference is that queues in this module operate as channels when
+initialized with *maxsize* of zero. In such case, both :meth:`Queue.empty`
+and :meth:`Queue.full` return ``True`` and :meth:`Queue.put` always blocks until
+a call to :meth:`Queue.get` retrieves the item.
+
+An interesting difference, made possible because of greenthreads, is
+that :meth:`Queue.qsize`, :meth:`Queue.empty`, and :meth:`Queue.full` *can* be
+used as indicators of whether the subsequent :meth:`Queue.get`
+or :meth:`Queue.put` will not block. The new methods :meth:`Queue.getting`
+and :meth:`Queue.putting` report on the number of greenthreads blocking
+in :meth:`put ` or :meth:`get ` respectively.
+"""
+
+import collections
+import heapq
+import sys
+import traceback
+import types
+
+from eventlet.event import Event
+from eventlet.greenthread import getcurrent
+from eventlet.hubs import get_hub
+import queue as Stdlib_Queue
+from eventlet.timeout import Timeout
+
+
+__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'LightQueue', 'Full', 'Empty']
+
+_NONE = object()
+Full = Stdlib_Queue.Full
+Empty = Stdlib_Queue.Empty
+
+
+class Waiter:
+ """A low level synchronization class.
+
+ Wrapper around greenlet's ``switch()`` and ``throw()`` calls that makes them safe:
+
+ * switching will occur only if the waiting greenlet is executing :meth:`wait`
+ method currently. Otherwise, :meth:`switch` and :meth:`throw` are no-ops.
+ * any error raised in the greenlet is handled inside :meth:`switch` and :meth:`throw`
+
+ The :meth:`switch` and :meth:`throw` methods must only be called from the :class:`Hub` greenlet.
+ The :meth:`wait` method must be called from a greenlet other than :class:`Hub`.
+ """
+ __slots__ = ['greenlet']
+
+ def __init__(self):
+ self.greenlet = None
+
+ def __repr__(self):
+ if self.waiting:
+ waiting = ' waiting'
+ else:
+ waiting = ''
+ return '<%s at %s%s greenlet=%r>' % (
+ type(self).__name__, hex(id(self)), waiting, self.greenlet,
+ )
+
+ def __str__(self):
+ """
+ >>> print(Waiter())
+
+ """
+ if self.waiting:
+ waiting = ' waiting'
+ else:
+ waiting = ''
+ return '<%s%s greenlet=%s>' % (type(self).__name__, waiting, self.greenlet)
+
+ def __nonzero__(self):
+ return self.greenlet is not None
+
+ __bool__ = __nonzero__
+
+ @property
+ def waiting(self):
+ return self.greenlet is not None
+
+ def switch(self, value=None):
+ """Wake up the greenlet that is calling wait() currently (if there is one).
+ Can only be called from Hub's greenlet.
+ """
+ assert getcurrent() is get_hub(
+ ).greenlet, "Can only use Waiter.switch method from the mainloop"
+ if self.greenlet is not None:
+ try:
+ self.greenlet.switch(value)
+ except Exception:
+ traceback.print_exc()
+
+ def throw(self, *throw_args):
+ """Make greenlet calling wait() wake up (if there is a wait()).
+ Can only be called from Hub's greenlet.
+ """
+ assert getcurrent() is get_hub(
+ ).greenlet, "Can only use Waiter.switch method from the mainloop"
+ if self.greenlet is not None:
+ try:
+ self.greenlet.throw(*throw_args)
+ except Exception:
+ traceback.print_exc()
+
+ # XXX should be renamed to get() ? and the whole class is called Receiver?
+ def wait(self):
+ """Wait until switch() or throw() is called.
+ """
+ assert self.greenlet is None, 'This Waiter is already used by %r' % (self.greenlet, )
+ self.greenlet = getcurrent()
+ try:
+ return get_hub().switch()
+ finally:
+ self.greenlet = None
+
+
+class LightQueue:
+ """
+ This is a variant of Queue that behaves mostly like the standard
+ :class:`Stdlib_Queue`. It differs by not supporting the
+ :meth:`task_done ` or
+ :meth:`join ` methods, and is a little faster for
+ not having that overhead.
+ """
+
+ def __init__(self, maxsize=None):
+ if maxsize is None or maxsize < 0: # None is not comparable in 3.x
+ self.maxsize = None
+ else:
+ self.maxsize = maxsize
+ self.getters = set()
+ self.putters = set()
+ self._event_unlock = None
+ self._init(maxsize)
+
+ # QQQ make maxsize into a property with setter that schedules unlock if necessary
+
+ def _init(self, maxsize):
+ self.queue = collections.deque()
+
+ def _get(self):
+ return self.queue.popleft()
+
+ def _put(self, item):
+ self.queue.append(item)
+
+ def __repr__(self):
+ return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._format())
+
+ def __str__(self):
+ return '<%s %s>' % (type(self).__name__, self._format())
+
+ def _format(self):
+ result = 'maxsize=%r' % (self.maxsize, )
+ if getattr(self, 'queue', None):
+ result += ' queue=%r' % self.queue
+ if self.getters:
+ result += ' getters[%s]' % len(self.getters)
+ if self.putters:
+ result += ' putters[%s]' % len(self.putters)
+ if self._event_unlock is not None:
+ result += ' unlocking'
+ return result
+
+ def qsize(self):
+ """Return the size of the queue."""
+ return len(self.queue)
+
+ def resize(self, size):
+ """Resizes the queue's maximum size.
+
+ If the size is increased, and there are putters waiting, they may be woken up."""
+ # None is not comparable in 3.x
+ if self.maxsize is not None and (size is None or size > self.maxsize):
+ # Maybe wake some stuff up
+ self._schedule_unlock()
+ self.maxsize = size
+
+ def putting(self):
+ """Returns the number of greenthreads that are blocked waiting to put
+ items into the queue."""
+ return len(self.putters)
+
+ def getting(self):
+ """Returns the number of greenthreads that are blocked waiting on an
+ empty queue."""
+ return len(self.getters)
+
+ def empty(self):
+ """Return ``True`` if the queue is empty, ``False`` otherwise."""
+ return not self.qsize()
+
+ def full(self):
+ """Return ``True`` if the queue is full, ``False`` otherwise.
+
+ ``Queue(None)`` is never full.
+ """
+ # None is not comparable in 3.x
+ return self.maxsize is not None and self.qsize() >= self.maxsize
+
+ def put(self, item, block=True, timeout=None):
+ """Put an item into the queue.
+
+ If optional arg *block* is true and *timeout* is ``None`` (the default),
+ block if necessary until a free slot is available. If *timeout* is
+ a positive number, it blocks at most *timeout* seconds and raises
+ the :class:`Full` exception if no free slot was available within that time.
+ Otherwise (*block* is false), put an item on the queue if a free slot
+ is immediately available, else raise the :class:`Full` exception (*timeout*
+ is ignored in that case).
+ """
+ if self.maxsize is None or self.qsize() < self.maxsize:
+ # there's a free slot, put an item right away
+ self._put(item)
+ if self.getters:
+ self._schedule_unlock()
+ elif not block and get_hub().greenlet is getcurrent():
+ # we're in the mainloop, so we cannot wait; we can switch() to other greenlets though
+ # find a getter and deliver an item to it
+ while self.getters:
+ getter = self.getters.pop()
+ if getter:
+ self._put(item)
+ item = self._get()
+ getter.switch(item)
+ return
+ raise Full
+ elif block:
+ waiter = ItemWaiter(item, block)
+ self.putters.add(waiter)
+ timeout = Timeout(timeout, Full)
+ try:
+ if self.getters:
+ self._schedule_unlock()
+ result = waiter.wait()
+ assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
+ if waiter.item is not _NONE:
+ self._put(item)
+ finally:
+ timeout.cancel()
+ self.putters.discard(waiter)
+ elif self.getters:
+ waiter = ItemWaiter(item, block)
+ self.putters.add(waiter)
+ self._schedule_unlock()
+ result = waiter.wait()
+ assert result is waiter, "Invalid switch into Queue.put: %r" % (result, )
+ if waiter.item is not _NONE:
+ raise Full
+ else:
+ raise Full
+
+ def put_nowait(self, item):
+ """Put an item into the queue without blocking.
+
+ Only enqueue the item if a free slot is immediately available.
+ Otherwise raise the :class:`Full` exception.
+ """
+ self.put(item, False)
+
+ def get(self, block=True, timeout=None):
+ """Remove and return an item from the queue.
+
+ If optional args *block* is true and *timeout* is ``None`` (the default),
+ block if necessary until an item is available. If *timeout* is a positive number,
+ it blocks at most *timeout* seconds and raises the :class:`Empty` exception
+ if no item was available within that time. Otherwise (*block* is false), return
+ an item if one is immediately available, else raise the :class:`Empty` exception
+ (*timeout* is ignored in that case).
+ """
+ if self.qsize():
+ if self.putters:
+ self._schedule_unlock()
+ return self._get()
+ elif not block and get_hub().greenlet is getcurrent():
+ # special case to make get_nowait() runnable in the mainloop greenlet
+ # there are no items in the queue; try to fix the situation by unlocking putters
+ while self.putters:
+ putter = self.putters.pop()
+ if putter:
+ putter.switch(putter)
+ if self.qsize():
+ return self._get()
+ raise Empty
+ elif block:
+ waiter = Waiter()
+ timeout = Timeout(timeout, Empty)
+ try:
+ self.getters.add(waiter)
+ if self.putters:
+ self._schedule_unlock()
+ try:
+ return waiter.wait()
+ except:
+ self._schedule_unlock()
+ raise
+ finally:
+ self.getters.discard(waiter)
+ timeout.cancel()
+ else:
+ raise Empty
+
+ def get_nowait(self):
+ """Remove and return an item from the queue without blocking.
+
+ Only get an item if one is immediately available. Otherwise
+ raise the :class:`Empty` exception.
+ """
+ return self.get(False)
+
+ def _unlock(self):
+ try:
+ while True:
+ if self.qsize() and self.getters:
+ getter = self.getters.pop()
+ if getter:
+ try:
+ item = self._get()
+ except:
+ getter.throw(*sys.exc_info())
+ else:
+ getter.switch(item)
+ elif self.putters and self.getters:
+ putter = self.putters.pop()
+ if putter:
+ getter = self.getters.pop()
+ if getter:
+ item = putter.item
+ # this makes greenlet calling put() not to call _put() again
+ putter.item = _NONE
+ self._put(item)
+ item = self._get()
+ getter.switch(item)
+ putter.switch(putter)
+ else:
+ self.putters.add(putter)
+ elif self.putters and (self.getters or
+ self.maxsize is None or
+ self.qsize() < self.maxsize):
+ putter = self.putters.pop()
+ putter.switch(putter)
+ elif self.putters and not self.getters:
+ full = [p for p in self.putters if not p.block]
+ if not full:
+ break
+ for putter in full:
+ self.putters.discard(putter)
+ get_hub().schedule_call_global(
+ 0, putter.greenlet.throw, Full)
+ else:
+ break
+ finally:
+ self._event_unlock = None # QQQ maybe it's possible to obtain this info from libevent?
+ # i.e. whether this event is pending _OR_ currently executing
+ # testcase: 2 greenlets: while True: q.put(q.get()) - nothing else has a change to execute
+ # to avoid this, schedule unlock with timer(0, ...) once in a while
+
+ def _schedule_unlock(self):
+ if self._event_unlock is None:
+ self._event_unlock = get_hub().schedule_call_global(0, self._unlock)
+
+ # TODO(stephenfin): Remove conditional when we bump the minimum Python
+ # version
+ if sys.version_info >= (3, 9):
+ __class_getitem__ = classmethod(types.GenericAlias)
+
+
+class ItemWaiter(Waiter):
+ __slots__ = ['item', 'block']
+
+ def __init__(self, item, block):
+ Waiter.__init__(self)
+ self.item = item
+ self.block = block
+
+
+class Queue(LightQueue):
+ '''Create a queue object with a given maximum size.
+
+ If *maxsize* is less than zero or ``None``, the queue size is infinite.
+
+ ``Queue(0)`` is a channel, that is, its :meth:`put` method always blocks
+ until the item is delivered. (This is unlike the standard
+ :class:`Stdlib_Queue`, where 0 means infinite size).
+
+ In all other respects, this Queue class resembles the standard library,
+ :class:`Stdlib_Queue`.
+ '''
+
+ def __init__(self, maxsize=None):
+ LightQueue.__init__(self, maxsize)
+ self.unfinished_tasks = 0
+ self._cond = Event()
+
+ def _format(self):
+ result = LightQueue._format(self)
+ if self.unfinished_tasks:
+ result += ' tasks=%s _cond=%s' % (self.unfinished_tasks, self._cond)
+ return result
+
+ def _put(self, item):
+ LightQueue._put(self, item)
+ self._put_bookkeeping()
+
+ def _put_bookkeeping(self):
+ self.unfinished_tasks += 1
+ if self._cond.ready():
+ self._cond.reset()
+
+ def task_done(self):
+ '''Indicate that a formerly enqueued task is complete. Used by queue consumer threads.
+ For each :meth:`get ` used to fetch a task, a subsequent call to
+ :meth:`task_done` tells the queue that the processing on the task is complete.
+
+ If a :meth:`join` is currently blocking, it will resume when all items have been processed
+ (meaning that a :meth:`task_done` call was received for every item that had been
+ :meth:`put ` into the queue).
+
+ Raises a :exc:`ValueError` if called more times than there were items placed in the queue.
+ '''
+
+ if self.unfinished_tasks <= 0:
+ raise ValueError('task_done() called too many times')
+ self.unfinished_tasks -= 1
+ if self.unfinished_tasks == 0:
+ self._cond.send(None)
+
+ def join(self):
+ '''Block until all items in the queue have been gotten and processed.
+
+ The count of unfinished tasks goes up whenever an item is added to the queue.
+ The count goes down whenever a consumer thread calls :meth:`task_done` to indicate
+ that the item was retrieved and all work on it is complete. When the count of
+ unfinished tasks drops to zero, :meth:`join` unblocks.
+ '''
+ if self.unfinished_tasks > 0:
+ self._cond.wait()
+
+
+class PriorityQueue(Queue):
+ '''A subclass of :class:`Queue` that retrieves entries in priority order (lowest first).
+
+ Entries are typically tuples of the form: ``(priority number, data)``.
+ '''
+
+ def _init(self, maxsize):
+ self.queue = []
+
+ def _put(self, item, heappush=heapq.heappush):
+ heappush(self.queue, item)
+ self._put_bookkeeping()
+
+ def _get(self, heappop=heapq.heappop):
+ return heappop(self.queue)
+
+
+class LifoQueue(Queue):
+ '''A subclass of :class:`Queue` that retrieves most recently added entries first.'''
+
+ def _init(self, maxsize):
+ self.queue = []
+
+ def _put(self, item):
+ self.queue.append(item)
+ self._put_bookkeeping()
+
+ def _get(self):
+ return self.queue.pop()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/semaphore.py b/tapdown/lib/python3.11/site-packages/eventlet/semaphore.py
new file mode 100644
index 0000000..218d01a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/semaphore.py
@@ -0,0 +1,315 @@
+import collections
+
+import eventlet
+from eventlet import hubs
+
+
+class Semaphore:
+
+ """An unbounded semaphore.
+ Optionally initialize with a resource *count*, then :meth:`acquire` and
+ :meth:`release` resources as needed. Attempting to :meth:`acquire` when
+ *count* is zero suspends the calling greenthread until *count* becomes
+ nonzero again.
+
+ This is API-compatible with :class:`threading.Semaphore`.
+
+ It is a context manager, and thus can be used in a with block::
+
+ sem = Semaphore(2)
+ with sem:
+ do_some_stuff()
+
+ If not specified, *value* defaults to 1.
+
+ It is possible to limit acquire time::
+
+ sem = Semaphore()
+ ok = sem.acquire(timeout=0.1)
+ # True if acquired, False if timed out.
+
+ """
+
+ def __init__(self, value=1):
+ try:
+ value = int(value)
+ except ValueError as e:
+ msg = 'Semaphore() expect value :: int, actual: {} {}'.format(type(value), str(e))
+ raise TypeError(msg)
+ if value < 0:
+ msg = 'Semaphore() expect value >= 0, actual: {}'.format(repr(value))
+ raise ValueError(msg)
+ self.counter = value
+ self._waiters = collections.deque()
+
+ def __repr__(self):
+ params = (self.__class__.__name__, hex(id(self)),
+ self.counter, len(self._waiters))
+ return '<%s at %s c=%s _w[%s]>' % params
+
+ def __str__(self):
+ params = (self.__class__.__name__, self.counter, len(self._waiters))
+ return '<%s c=%s _w[%s]>' % params
+
+ def locked(self):
+ """Returns true if a call to acquire would block.
+ """
+ return self.counter <= 0
+
+ def bounded(self):
+ """Returns False; for consistency with
+ :class:`~eventlet.semaphore.CappedSemaphore`.
+ """
+ return False
+
+ def acquire(self, blocking=True, timeout=None):
+ """Acquire a semaphore.
+
+ When invoked without arguments: if the internal counter is larger than
+ zero on entry, decrement it by one and return immediately. If it is zero
+ on entry, block, waiting until some other thread has called release() to
+ make it larger than zero. This is done with proper interlocking so that
+ if multiple acquire() calls are blocked, release() will wake exactly one
+ of them up. The implementation may pick one at random, so the order in
+ which blocked threads are awakened should not be relied on. There is no
+ return value in this case.
+
+ When invoked with blocking set to true, do the same thing as when called
+ without arguments, and return true.
+
+ When invoked with blocking set to false, do not block. If a call without
+ an argument would block, return false immediately; otherwise, do the
+ same thing as when called without arguments, and return true.
+
+ Timeout value must be strictly positive.
+ """
+ if timeout == -1:
+ timeout = None
+ if timeout is not None and timeout < 0:
+ raise ValueError("timeout value must be strictly positive")
+ if not blocking:
+ if timeout is not None:
+ raise ValueError("can't specify timeout for non-blocking acquire")
+ timeout = 0
+ if not blocking and self.locked():
+ return False
+
+ current_thread = eventlet.getcurrent()
+
+ if self.counter <= 0 or self._waiters:
+ if current_thread not in self._waiters:
+ self._waiters.append(current_thread)
+ try:
+ if timeout is not None:
+ ok = False
+ with eventlet.Timeout(timeout, False):
+ while self.counter <= 0:
+ hubs.get_hub().switch()
+ ok = True
+ if not ok:
+ return False
+ else:
+ # If someone else is already in this wait loop, give them
+ # a chance to get out.
+ while True:
+ hubs.get_hub().switch()
+ if self.counter > 0:
+ break
+ finally:
+ try:
+ self._waiters.remove(current_thread)
+ except ValueError:
+ # Fine if its already been dropped.
+ pass
+
+ self.counter -= 1
+ return True
+
+ def __enter__(self):
+ self.acquire()
+
+ def release(self, blocking=True):
+ """Release a semaphore, incrementing the internal counter by one. When
+ it was zero on entry and another thread is waiting for it to become
+ larger than zero again, wake up that thread.
+
+ The *blocking* argument is for consistency with CappedSemaphore and is
+ ignored
+ """
+ self.counter += 1
+ if self._waiters:
+ hubs.get_hub().schedule_call_global(0, self._do_acquire)
+ return True
+
+ def _do_acquire(self):
+ if self._waiters and self.counter > 0:
+ waiter = self._waiters.popleft()
+ waiter.switch()
+
+ def __exit__(self, typ, val, tb):
+ self.release()
+
+ @property
+ def balance(self):
+ """An integer value that represents how many new calls to
+ :meth:`acquire` or :meth:`release` would be needed to get the counter to
+ 0. If it is positive, then its value is the number of acquires that can
+ happen before the next acquire would block. If it is negative, it is
+ the negative of the number of releases that would be required in order
+ to make the counter 0 again (one more release would push the counter to
+ 1 and unblock acquirers). It takes into account how many greenthreads
+ are currently blocking in :meth:`acquire`.
+ """
+ # positive means there are free items
+ # zero means there are no free items but nobody has requested one
+ # negative means there are requests for items, but no items
+ return self.counter - len(self._waiters)
+
+
+class BoundedSemaphore(Semaphore):
+
+ """A bounded semaphore checks to make sure its current value doesn't exceed
+ its initial value. If it does, ValueError is raised. In most situations
+ semaphores are used to guard resources with limited capacity. If the
+ semaphore is released too many times it's a sign of a bug. If not given,
+ *value* defaults to 1.
+ """
+
+ def __init__(self, value=1):
+ super().__init__(value)
+ self.original_counter = value
+
+ def release(self, blocking=True):
+ """Release a semaphore, incrementing the internal counter by one. If
+ the counter would exceed the initial value, raises ValueError. When
+ it was zero on entry and another thread is waiting for it to become
+ larger than zero again, wake up that thread.
+
+ The *blocking* argument is for consistency with :class:`CappedSemaphore`
+ and is ignored
+ """
+ if self.counter >= self.original_counter:
+ raise ValueError("Semaphore released too many times")
+ return super().release(blocking)
+
+
+class CappedSemaphore:
+
+ """A blockingly bounded semaphore.
+
+ Optionally initialize with a resource *count*, then :meth:`acquire` and
+ :meth:`release` resources as needed. Attempting to :meth:`acquire` when
+ *count* is zero suspends the calling greenthread until count becomes nonzero
+ again. Attempting to :meth:`release` after *count* has reached *limit*
+ suspends the calling greenthread until *count* becomes less than *limit*
+ again.
+
+ This has the same API as :class:`threading.Semaphore`, though its
+ semantics and behavior differ subtly due to the upper limit on calls
+ to :meth:`release`. It is **not** compatible with
+ :class:`threading.BoundedSemaphore` because it blocks when reaching *limit*
+ instead of raising a ValueError.
+
+ It is a context manager, and thus can be used in a with block::
+
+ sem = CappedSemaphore(2)
+ with sem:
+ do_some_stuff()
+ """
+
+ def __init__(self, count, limit):
+ if count < 0:
+ raise ValueError("CappedSemaphore must be initialized with a "
+ "positive number, got %s" % count)
+ if count > limit:
+ # accidentally, this also catches the case when limit is None
+ raise ValueError("'count' cannot be more than 'limit'")
+ self.lower_bound = Semaphore(count)
+ self.upper_bound = Semaphore(limit - count)
+
+ def __repr__(self):
+ params = (self.__class__.__name__, hex(id(self)),
+ self.balance, self.lower_bound, self.upper_bound)
+ return '<%s at %s b=%s l=%s u=%s>' % params
+
+ def __str__(self):
+ params = (self.__class__.__name__, self.balance,
+ self.lower_bound, self.upper_bound)
+ return '<%s b=%s l=%s u=%s>' % params
+
+ def locked(self):
+ """Returns true if a call to acquire would block.
+ """
+ return self.lower_bound.locked()
+
+ def bounded(self):
+ """Returns true if a call to release would block.
+ """
+ return self.upper_bound.locked()
+
+ def acquire(self, blocking=True):
+ """Acquire a semaphore.
+
+ When invoked without arguments: if the internal counter is larger than
+ zero on entry, decrement it by one and return immediately. If it is zero
+ on entry, block, waiting until some other thread has called release() to
+ make it larger than zero. This is done with proper interlocking so that
+ if multiple acquire() calls are blocked, release() will wake exactly one
+ of them up. The implementation may pick one at random, so the order in
+ which blocked threads are awakened should not be relied on. There is no
+ return value in this case.
+
+ When invoked with blocking set to true, do the same thing as when called
+ without arguments, and return true.
+
+ When invoked with blocking set to false, do not block. If a call without
+ an argument would block, return false immediately; otherwise, do the
+ same thing as when called without arguments, and return true.
+ """
+ if not blocking and self.locked():
+ return False
+ self.upper_bound.release()
+ try:
+ return self.lower_bound.acquire()
+ except:
+ self.upper_bound.counter -= 1
+ # using counter directly means that it can be less than zero.
+ # however I certainly don't need to wait here and I don't seem to have
+ # a need to care about such inconsistency
+ raise
+
+ def __enter__(self):
+ self.acquire()
+
+ def release(self, blocking=True):
+ """Release a semaphore. In this class, this behaves very much like
+ an :meth:`acquire` but in the opposite direction.
+
+ Imagine the docs of :meth:`acquire` here, but with every direction
+ reversed. When calling this method, it will block if the internal
+ counter is greater than or equal to *limit*.
+ """
+ if not blocking and self.bounded():
+ return False
+ self.lower_bound.release()
+ try:
+ return self.upper_bound.acquire()
+ except:
+ self.lower_bound.counter -= 1
+ raise
+
+ def __exit__(self, typ, val, tb):
+ self.release()
+
+ @property
+ def balance(self):
+ """An integer value that represents how many new calls to
+ :meth:`acquire` or :meth:`release` would be needed to get the counter to
+ 0. If it is positive, then its value is the number of acquires that can
+ happen before the next acquire would block. If it is negative, it is
+ the negative of the number of releases that would be required in order
+ to make the counter 0 again (one more release would push the counter to
+ 1 and unblock acquirers). It takes into account how many greenthreads
+ are currently blocking in :meth:`acquire` and :meth:`release`.
+ """
+ return self.lower_bound.balance - self.upper_bound.balance
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/support/__init__.py
new file mode 100644
index 0000000..b1c1607
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/__init__.py
@@ -0,0 +1,69 @@
+import inspect
+import functools
+import sys
+import warnings
+
+from eventlet.support import greenlets
+
+
+_MISSING = object()
+
+
+def get_errno(exc):
+ """ Get the error code out of socket.error objects.
+ socket.error in <2.5 does not have errno attribute
+ socket.error in 3.x does not allow indexing access
+ e.args[0] works for all.
+ There are cases when args[0] is not errno.
+ i.e. http://bugs.python.org/issue6471
+ Maybe there are cases when errno is set, but it is not the first argument?
+ """
+
+ try:
+ if exc.errno is not None:
+ return exc.errno
+ except AttributeError:
+ pass
+ try:
+ return exc.args[0]
+ except IndexError:
+ return None
+
+
+if sys.version_info[0] < 3:
+ def bytes_to_str(b, encoding='ascii'):
+ return b
+else:
+ def bytes_to_str(b, encoding='ascii'):
+ return b.decode(encoding)
+
+PY33 = sys.version_info[:2] == (3, 3)
+
+
+def wrap_deprecated(old, new):
+ def _resolve(s):
+ return 'eventlet.'+s if '.' not in s else s
+ msg = '''\
+{old} is deprecated and will be removed in next version. Use {new} instead.
+Autoupgrade: fgrep -rl '{old}' . |xargs -t sed --in-place='' -e 's/{old}/{new}/'
+'''.format(old=_resolve(old), new=_resolve(new))
+
+ def wrapper(base):
+ klass = None
+ if inspect.isclass(base):
+ class klass(base):
+ pass
+ klass.__name__ = base.__name__
+ klass.__module__ = base.__module__
+
+ @functools.wraps(base)
+ def wrapped(*a, **kw):
+ warnings.warn(msg, DeprecationWarning, stacklevel=5)
+ return base(*a, **kw)
+
+ if klass is not None:
+ klass.__init__ = wrapped
+ return klass
+
+ return wrapped
+ return wrapper
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/greendns.py b/tapdown/lib/python3.11/site-packages/eventlet/support/greendns.py
new file mode 100644
index 0000000..365664f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/greendns.py
@@ -0,0 +1,959 @@
+'''greendns - non-blocking DNS support for Eventlet
+'''
+
+# Portions of this code taken from the gogreen project:
+# http://github.com/slideinc/gogreen
+#
+# Copyright (c) 2005-2010 Slide, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the author nor the names of other
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import re
+import struct
+import sys
+
+import eventlet
+from eventlet import patcher
+from eventlet.green import _socket_nodns
+from eventlet.green import os
+from eventlet.green import time
+from eventlet.green import select
+from eventlet.green import ssl
+
+
+def import_patched(module_name):
+ # Import cycle note: it's crucial to use _socket_nodns here because
+ # regular evenlet.green.socket imports *this* module and if we imported
+ # it back we'd end with an import cycle (socket -> greendns -> socket).
+ # We break this import cycle by providing a restricted socket module.
+ modules = {
+ 'select': select,
+ 'time': time,
+ 'os': os,
+ 'socket': _socket_nodns,
+ 'ssl': ssl,
+ }
+ return patcher.import_patched(module_name, **modules)
+
+
+dns = import_patched('dns')
+
+# Handle rdtypes separately; we need fully it available as we patch the rest
+dns.rdtypes = import_patched('dns.rdtypes')
+dns.rdtypes.__all__.extend(['dnskeybase', 'dsbase', 'txtbase'])
+for pkg in dns.rdtypes.__all__:
+ setattr(dns.rdtypes, pkg, import_patched('dns.rdtypes.' + pkg))
+for pkg in dns.rdtypes.IN.__all__:
+ setattr(dns.rdtypes.IN, pkg, import_patched('dns.rdtypes.IN.' + pkg))
+for pkg in dns.rdtypes.ANY.__all__:
+ setattr(dns.rdtypes.ANY, pkg, import_patched('dns.rdtypes.ANY.' + pkg))
+
+for pkg in dns.__all__:
+ if pkg == 'rdtypes':
+ continue
+ setattr(dns, pkg, import_patched('dns.' + pkg))
+del import_patched
+
+
+socket = _socket_nodns
+
+DNS_QUERY_TIMEOUT = 10.0
+HOSTS_TTL = 10.0
+
+# NOTE(victor): do not use EAI_*_ERROR instances for raising errors in python3, which will cause a memory leak.
+EAI_EAGAIN_ERROR = socket.gaierror(socket.EAI_AGAIN, 'Lookup timed out')
+EAI_NONAME_ERROR = socket.gaierror(socket.EAI_NONAME, 'Name or service not known')
+# EAI_NODATA was removed from RFC3493, it's now replaced with EAI_NONAME
+# socket.EAI_NODATA is not defined on FreeBSD, probably on some other platforms too.
+# https://lists.freebsd.org/pipermail/freebsd-ports/2003-October/005757.html
+EAI_NODATA_ERROR = EAI_NONAME_ERROR
+if (os.environ.get('EVENTLET_DEPRECATED_EAI_NODATA', '').lower() in ('1', 'y', 'yes')
+ and hasattr(socket, 'EAI_NODATA')):
+ EAI_NODATA_ERROR = socket.gaierror(socket.EAI_NODATA, 'No address associated with hostname')
+
+
+def _raise_new_error(error_instance):
+ raise error_instance.__class__(*error_instance.args)
+
+
+def is_ipv4_addr(host):
+ """Return True if host is a valid IPv4 address"""
+ if not isinstance(host, str):
+ return False
+ try:
+ dns.ipv4.inet_aton(host)
+ except dns.exception.SyntaxError:
+ return False
+ else:
+ return True
+
+
+def is_ipv6_addr(host):
+ """Return True if host is a valid IPv6 address"""
+ if not isinstance(host, str):
+ return False
+ host = host.split('%', 1)[0]
+ try:
+ dns.ipv6.inet_aton(host)
+ except dns.exception.SyntaxError:
+ return False
+ else:
+ return True
+
+
+def is_ip_addr(host):
+ """Return True if host is a valid IPv4 or IPv6 address"""
+ return is_ipv4_addr(host) or is_ipv6_addr(host)
+
+
+# NOTE(ralonsoh): in dnspython v2.0.0, "_compute_expiration" was replaced
+# by "_compute_times".
+if hasattr(dns.query, '_compute_expiration'):
+ def compute_expiration(query, timeout):
+ return query._compute_expiration(timeout)
+else:
+ def compute_expiration(query, timeout):
+ return query._compute_times(timeout)[1]
+
+
+class HostsAnswer(dns.resolver.Answer):
+ """Answer class for HostsResolver object"""
+
+ def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
+ """Create a new answer
+
+ :qname: A dns.name.Name instance of the query name
+ :rdtype: The rdatatype of the query
+ :rdclass: The rdataclass of the query
+ :rrset: The dns.rrset.RRset with the response, must have ttl attribute
+ :raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no
+ answer.
+ """
+ self.response = None
+ self.qname = qname
+ self.rdtype = rdtype
+ self.rdclass = rdclass
+ self.canonical_name = qname
+ if not rrset and raise_on_no_answer:
+ raise dns.resolver.NoAnswer()
+ self.rrset = rrset
+ self.expiration = (time.time() +
+ rrset.ttl if hasattr(rrset, 'ttl') else 0)
+
+
+class HostsResolver:
+ """Class to parse the hosts file
+
+ Attributes
+ ----------
+
+ :fname: The filename of the hosts file in use.
+ :interval: The time between checking for hosts file modification
+ """
+
+ LINES_RE = re.compile(r"""
+ \s* # Leading space
+ ([^\r\n#]*?) # The actual match, non-greedy so as not to include trailing space
+ \s* # Trailing space
+ (?:[#][^\r\n]+)? # Comments
+ (?:$|[\r\n]+) # EOF or newline
+ """, re.VERBOSE)
+
+ def __init__(self, fname=None, interval=HOSTS_TTL):
+ self._v4 = {} # name -> ipv4
+ self._v6 = {} # name -> ipv6
+ self._aliases = {} # name -> canonical_name
+ self.interval = interval
+ self.fname = fname
+ if fname is None:
+ if os.name == 'posix':
+ self.fname = '/etc/hosts'
+ elif os.name == 'nt':
+ self.fname = os.path.expandvars(
+ r'%SystemRoot%\system32\drivers\etc\hosts')
+ self._last_load = 0
+ if self.fname:
+ self._load()
+
+ def _readlines(self):
+ """Read the contents of the hosts file
+
+ Return list of lines, comment lines and empty lines are
+ excluded.
+
+ Note that this performs disk I/O so can be blocking.
+ """
+ try:
+ with open(self.fname, 'rb') as fp:
+ fdata = fp.read()
+ except OSError:
+ return []
+
+ udata = fdata.decode(errors='ignore')
+
+ return filter(None, self.LINES_RE.findall(udata))
+
+ def _load(self):
+ """Load hosts file
+
+ This will unconditionally (re)load the data from the hosts
+ file.
+ """
+ lines = self._readlines()
+ self._v4.clear()
+ self._v6.clear()
+ self._aliases.clear()
+ for line in lines:
+ parts = line.split()
+ if len(parts) < 2:
+ continue
+ ip = parts.pop(0)
+ if is_ipv4_addr(ip):
+ ipmap = self._v4
+ elif is_ipv6_addr(ip):
+ if ip.startswith('fe80'):
+ # Do not use link-local addresses, OSX stores these here
+ continue
+ ipmap = self._v6
+ else:
+ continue
+ cname = parts.pop(0).lower()
+ ipmap[cname] = ip
+ for alias in parts:
+ alias = alias.lower()
+ ipmap[alias] = ip
+ self._aliases[alias] = cname
+ self._last_load = time.time()
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True):
+ """Query the hosts file
+
+ The known rdtypes are dns.rdatatype.A, dns.rdatatype.AAAA and
+ dns.rdatatype.CNAME.
+
+ The ``rdclass`` parameter must be dns.rdataclass.IN while the
+ ``tcp`` and ``source`` parameters are ignored.
+
+ Return a HostAnswer instance or raise a dns.resolver.NoAnswer
+ exception.
+ """
+ now = time.time()
+ if self._last_load + self.interval < now:
+ self._load()
+ rdclass = dns.rdataclass.IN
+ if isinstance(qname, str):
+ name = qname
+ qname = dns.name.from_text(qname)
+ elif isinstance(qname, bytes):
+ name = qname.decode("ascii")
+ qname = dns.name.from_text(qname)
+ else:
+ name = str(qname)
+ name = name.lower()
+ rrset = dns.rrset.RRset(qname, rdclass, rdtype)
+ rrset.ttl = self._last_load + self.interval - now
+ if rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.A:
+ addr = self._v4.get(name)
+ if not addr and qname.is_absolute():
+ addr = self._v4.get(name[:-1])
+ if addr:
+ rrset.add(dns.rdtypes.IN.A.A(rdclass, rdtype, addr))
+ elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.AAAA:
+ addr = self._v6.get(name)
+ if not addr and qname.is_absolute():
+ addr = self._v6.get(name[:-1])
+ if addr:
+ rrset.add(dns.rdtypes.IN.AAAA.AAAA(rdclass, rdtype, addr))
+ elif rdclass == dns.rdataclass.IN and rdtype == dns.rdatatype.CNAME:
+ cname = self._aliases.get(name)
+ if not cname and qname.is_absolute():
+ cname = self._aliases.get(name[:-1])
+ if cname:
+ rrset.add(dns.rdtypes.ANY.CNAME.CNAME(
+ rdclass, rdtype, dns.name.from_text(cname)))
+ return HostsAnswer(qname, rdtype, rdclass, rrset, raise_on_no_answer)
+
+ def getaliases(self, hostname):
+ """Return a list of all the aliases of a given cname"""
+ # Due to the way store aliases this is a bit inefficient, this
+ # clearly was an afterthought. But this is only used by
+ # gethostbyname_ex so it's probably fine.
+ aliases = []
+ if hostname in self._aliases:
+ cannon = self._aliases[hostname]
+ else:
+ cannon = hostname
+ aliases.append(cannon)
+ for alias, cname in self._aliases.items():
+ if cannon == cname:
+ aliases.append(alias)
+ aliases.remove(hostname)
+ return aliases
+
+
+class ResolverProxy:
+ """Resolver class which can also use /etc/hosts
+
+ Initialise with a HostsResolver instance in order for it to also
+ use the hosts file.
+ """
+
+ def __init__(self, hosts_resolver=None, filename='/etc/resolv.conf'):
+ """Initialise the resolver proxy
+
+ :param hosts_resolver: An instance of HostsResolver to use.
+
+ :param filename: The filename containing the resolver
+ configuration. The default value is correct for both UNIX
+ and Windows, on Windows it will result in the configuration
+ being read from the Windows registry.
+ """
+ self._hosts = hosts_resolver
+ self._filename = filename
+ # NOTE(dtantsur): we cannot create a resolver here since this code is
+ # executed on eventlet import. In an environment without DNS, creating
+ # a Resolver will fail making eventlet unusable at all. See
+ # https://github.com/eventlet/eventlet/issues/736 for details.
+ self._cached_resolver = None
+
+ @property
+ def _resolver(self):
+ if self._cached_resolver is None:
+ self.clear()
+ return self._cached_resolver
+
+ @_resolver.setter
+ def _resolver(self, value):
+ self._cached_resolver = value
+
+ def clear(self):
+ self._resolver = dns.resolver.Resolver(filename=self._filename)
+ self._resolver.cache = dns.resolver.LRUCache()
+
+ def query(self, qname, rdtype=dns.rdatatype.A, rdclass=dns.rdataclass.IN,
+ tcp=False, source=None, raise_on_no_answer=True,
+ _hosts_rdtypes=(dns.rdatatype.A, dns.rdatatype.AAAA),
+ use_network=True):
+ """Query the resolver, using /etc/hosts if enabled.
+
+ Behavior:
+ 1. if hosts is enabled and contains answer, return it now
+ 2. query nameservers for qname if use_network is True
+ 3. if qname did not contain dots, pretend it was top-level domain,
+ query "foobar." and append to previous result
+ """
+ result = [None, None, 0]
+
+ if qname is None:
+ qname = '0.0.0.0'
+ if isinstance(qname, str) or isinstance(qname, bytes):
+ qname = dns.name.from_text(qname, None)
+
+ def step(fun, *args, **kwargs):
+ try:
+ a = fun(*args, **kwargs)
+ except Exception as e:
+ result[1] = e
+ return False
+ if a.rrset is not None and len(a.rrset):
+ if result[0] is None:
+ result[0] = a
+ else:
+ result[0].rrset.union_update(a.rrset)
+ result[2] += len(a.rrset)
+ return True
+
+ def end():
+ if result[0] is not None:
+ if raise_on_no_answer and result[2] == 0:
+ raise dns.resolver.NoAnswer
+ return result[0]
+ if result[1] is not None:
+ if raise_on_no_answer or not isinstance(result[1], dns.resolver.NoAnswer):
+ raise result[1]
+ raise dns.resolver.NXDOMAIN(qnames=(qname,))
+
+ if (self._hosts and (rdclass == dns.rdataclass.IN) and (rdtype in _hosts_rdtypes)):
+ if step(self._hosts.query, qname, rdtype, raise_on_no_answer=False):
+ if (result[0] is not None) or (result[1] is not None) or (not use_network):
+ return end()
+
+ # Main query
+ step(self._resolver.query, qname, rdtype, rdclass, tcp, source, raise_on_no_answer=False)
+
+ # `resolv.conf` docs say unqualified names must resolve from search (or local) domain.
+ # However, common OS `getaddrinfo()` implementations append trailing dot (e.g. `db -> db.`)
+ # and ask nameservers, as if top-level domain was queried.
+ # This step follows established practice.
+ # https://github.com/nameko/nameko/issues/392
+ # https://github.com/eventlet/eventlet/issues/363
+ if len(qname) == 1:
+ step(self._resolver.query, qname.concatenate(dns.name.root),
+ rdtype, rdclass, tcp, source, raise_on_no_answer=False)
+
+ return end()
+
+ def getaliases(self, hostname):
+ """Return a list of all the aliases of a given hostname"""
+ if self._hosts:
+ aliases = self._hosts.getaliases(hostname)
+ else:
+ aliases = []
+ while True:
+ try:
+ ans = self._resolver.query(hostname, dns.rdatatype.CNAME)
+ except (dns.resolver.NoAnswer, dns.resolver.NXDOMAIN):
+ break
+ else:
+ aliases.extend(str(rr.target) for rr in ans.rrset)
+ hostname = ans[0].target
+ return aliases
+
+
+resolver = ResolverProxy(hosts_resolver=HostsResolver())
+
+
+def resolve(name, family=socket.AF_INET, raises=True, _proxy=None,
+ use_network=True):
+ """Resolve a name for a given family using the global resolver proxy.
+
+ This method is called by the global getaddrinfo() function. If use_network
+ is False, only resolution via hosts file will be performed.
+
+ Return a dns.resolver.Answer instance. If there is no answer it's
+ rrset will be emtpy.
+ """
+ if family == socket.AF_INET:
+ rdtype = dns.rdatatype.A
+ elif family == socket.AF_INET6:
+ rdtype = dns.rdatatype.AAAA
+ else:
+ raise socket.gaierror(socket.EAI_FAMILY,
+ 'Address family not supported')
+
+ if _proxy is None:
+ _proxy = resolver
+ try:
+ try:
+ return _proxy.query(name, rdtype, raise_on_no_answer=raises,
+ use_network=use_network)
+ except dns.resolver.NXDOMAIN:
+ if not raises:
+ return HostsAnswer(dns.name.Name(name),
+ rdtype, dns.rdataclass.IN, None, False)
+ raise
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ _raise_new_error(EAI_NODATA_ERROR)
+
+
+def resolve_cname(host):
+ """Return the canonical name of a hostname"""
+ try:
+ ans = resolver.query(host, dns.rdatatype.CNAME)
+ except dns.resolver.NoAnswer:
+ return host
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ _raise_new_error(EAI_NODATA_ERROR)
+ else:
+ return str(ans[0].target)
+
+
+def getaliases(host):
+ """Return a list of for aliases for the given hostname
+
+ This method does translate the dnspython exceptions into
+ socket.gaierror exceptions. If no aliases are available an empty
+ list will be returned.
+ """
+ try:
+ return resolver.getaliases(host)
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ _raise_new_error(EAI_NODATA_ERROR)
+
+
+def _getaddrinfo_lookup(host, family, flags):
+ """Resolve a hostname to a list of addresses
+
+ Helper function for getaddrinfo.
+ """
+ if flags & socket.AI_NUMERICHOST:
+ _raise_new_error(EAI_NONAME_ERROR)
+ addrs = []
+ if family == socket.AF_UNSPEC:
+ err = None
+ for use_network in [False, True]:
+ for qfamily in [socket.AF_INET6, socket.AF_INET]:
+ try:
+ answer = resolve(host, qfamily, False, use_network=use_network)
+ except socket.gaierror as e:
+ if e.errno not in (socket.EAI_AGAIN, EAI_NONAME_ERROR.errno, EAI_NODATA_ERROR.errno):
+ raise
+ err = e
+ else:
+ if answer.rrset:
+ addrs.extend(rr.address for rr in answer.rrset)
+ if addrs:
+ break
+ if err is not None and not addrs:
+ raise err
+ elif family == socket.AF_INET6 and flags & socket.AI_V4MAPPED:
+ answer = resolve(host, socket.AF_INET6, False)
+ if answer.rrset:
+ addrs = [rr.address for rr in answer.rrset]
+ if not addrs or flags & socket.AI_ALL:
+ answer = resolve(host, socket.AF_INET, False)
+ if answer.rrset:
+ addrs = ['::ffff:' + rr.address for rr in answer.rrset]
+ else:
+ answer = resolve(host, family, False)
+ if answer.rrset:
+ addrs = [rr.address for rr in answer.rrset]
+ return str(answer.qname), addrs
+
+
+def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
+ """Replacement for Python's socket.getaddrinfo
+
+ This does the A and AAAA lookups asynchronously after which it
+ calls the OS' getaddrinfo(3) using the AI_NUMERICHOST flag. This
+ flag ensures getaddrinfo(3) does not use the network itself and
+ allows us to respect all the other arguments like the native OS.
+ """
+ if isinstance(host, str):
+ host = host.encode('idna').decode('ascii')
+ elif isinstance(host, bytes):
+ host = host.decode("ascii")
+ if host is not None and not is_ip_addr(host):
+ qname, addrs = _getaddrinfo_lookup(host, family, flags)
+ else:
+ qname = host
+ addrs = [host]
+ aiflags = (flags | socket.AI_NUMERICHOST) & (0xffff ^ socket.AI_CANONNAME)
+ res = []
+ err = None
+ for addr in addrs:
+ try:
+ ai = socket.getaddrinfo(addr, port, family,
+ type, proto, aiflags)
+ except OSError as e:
+ if flags & socket.AI_ADDRCONFIG:
+ err = e
+ continue
+ raise
+ res.extend(ai)
+ if not res:
+ if err:
+ raise err
+ raise socket.gaierror(socket.EAI_NONAME, 'No address found')
+ if flags & socket.AI_CANONNAME:
+ if not is_ip_addr(qname):
+ qname = resolve_cname(qname).encode('ascii').decode('idna')
+ ai = res[0]
+ res[0] = (ai[0], ai[1], ai[2], qname, ai[4])
+ return res
+
+
+def gethostbyname(hostname):
+ """Replacement for Python's socket.gethostbyname"""
+ if is_ipv4_addr(hostname):
+ return hostname
+ rrset = resolve(hostname)
+ return rrset[0].address
+
+
+def gethostbyname_ex(hostname):
+ """Replacement for Python's socket.gethostbyname_ex"""
+ if is_ipv4_addr(hostname):
+ return (hostname, [], [hostname])
+ ans = resolve(hostname)
+ aliases = getaliases(hostname)
+ addrs = [rr.address for rr in ans.rrset]
+ qname = str(ans.qname)
+ if qname[-1] == '.':
+ qname = qname[:-1]
+ return (qname, aliases, addrs)
+
+
+def getnameinfo(sockaddr, flags):
+ """Replacement for Python's socket.getnameinfo.
+
+ Currently only supports IPv4.
+ """
+ try:
+ host, port = sockaddr
+ except (ValueError, TypeError):
+ if not isinstance(sockaddr, tuple):
+ del sockaddr # to pass a stdlib test that is
+ # hyper-careful about reference counts
+ raise TypeError('getnameinfo() argument 1 must be a tuple')
+ else:
+ # must be ipv6 sockaddr, pretending we don't know how to resolve it
+ _raise_new_error(EAI_NONAME_ERROR)
+
+ if (flags & socket.NI_NAMEREQD) and (flags & socket.NI_NUMERICHOST):
+ # Conflicting flags. Punt.
+ _raise_new_error(EAI_NONAME_ERROR)
+
+ if is_ipv4_addr(host):
+ try:
+ rrset = resolver.query(
+ dns.reversename.from_address(host), dns.rdatatype.PTR)
+ if len(rrset) > 1:
+ raise OSError('sockaddr resolved to multiple addresses')
+ host = rrset[0].target.to_text(omit_final_dot=True)
+ except dns.exception.Timeout:
+ if flags & socket.NI_NAMEREQD:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ if flags & socket.NI_NAMEREQD:
+ _raise_new_error(EAI_NONAME_ERROR)
+ else:
+ try:
+ rrset = resolver.query(host)
+ if len(rrset) > 1:
+ raise OSError('sockaddr resolved to multiple addresses')
+ if flags & socket.NI_NUMERICHOST:
+ host = rrset[0].address
+ except dns.exception.Timeout:
+ _raise_new_error(EAI_EAGAIN_ERROR)
+ except dns.exception.DNSException:
+ raise socket.gaierror(
+ (socket.EAI_NODATA, 'No address associated with hostname'))
+
+ if not (flags & socket.NI_NUMERICSERV):
+ proto = (flags & socket.NI_DGRAM) and 'udp' or 'tcp'
+ port = socket.getservbyport(port, proto)
+
+ return (host, port)
+
+
+def _net_read(sock, count, expiration):
+ """coro friendly replacement for dns.query._net_read
+ Read the specified number of bytes from sock. Keep trying until we
+ either get the desired amount, or we hit EOF.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ s = bytearray()
+ while count > 0:
+ try:
+ n = sock.recv(count)
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+ eventlet.sleep(0.01)
+ continue
+ if n == b'':
+ raise EOFError
+ count = count - len(n)
+ s += n
+ return s
+
+
+def _net_write(sock, data, expiration):
+ """coro friendly replacement for dns.query._net_write
+ Write the specified data to the socket.
+ A Timeout exception will be raised if the operation is not completed
+ by the expiration time.
+ """
+ current = 0
+ l = len(data)
+ while current < l:
+ try:
+ current += sock.send(data[current:])
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+
+
+# Test if raise_on_truncation is an argument we should handle.
+# It was newly added in dnspython 2.0
+try:
+ dns.message.from_wire("", raise_on_truncation=True)
+except dns.message.ShortHeader:
+ _handle_raise_on_truncation = True
+except TypeError:
+ # Argument error, there is no argument "raise_on_truncation"
+ _handle_raise_on_truncation = False
+
+
+def udp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
+ af=None, source=None, source_port=0, ignore_unexpected=False,
+ one_rr_per_rrset=False, ignore_trailing=False,
+ raise_on_truncation=False, sock=None, ignore_errors=False):
+ """coro friendly replacement for dns.query.udp
+ Return the response obtained after sending a query via UDP.
+
+ @param q: the query
+ @type q: dns.message.Message
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @param ignore_unexpected: If True, ignore responses from unexpected
+ sources. The default is False.
+ @type ignore_unexpected: bool
+ @param one_rr_per_rrset: If True, put each RR into its own
+ RRset.
+ @type one_rr_per_rrset: bool
+ @param ignore_trailing: If True, ignore trailing
+ junk at end of the received message.
+ @type ignore_trailing: bool
+ @param raise_on_truncation: If True, raise an exception if
+ the TC bit is set.
+ @type raise_on_truncation: bool
+ @param sock: the socket to use for the
+ query. If None, the default, a socket is created. Note that
+ if a socket is provided, it must be a nonblocking datagram socket,
+ and the source and source_port are ignored.
+ @type sock: socket.socket | None
+ @param ignore_errors: if various format errors or response mismatches occur,
+ continue listening.
+ @type ignore_errors: bool"""
+
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ # Purge any stray zeroes in source address. When doing the tuple comparison
+ # below, we need to always ensure both our target and where we receive replies
+ # from are compared with all zeroes removed so that we don't erroneously fail.
+ # e.g. ('00::1', 53, 0, 0) != ('::1', 53, 0, 0)
+ where_trunc = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(where))
+ destination = (where_trunc, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+
+ if sock:
+ s = sock
+ else:
+ s = socket.socket(af, socket.SOCK_DGRAM)
+ s.settimeout(timeout)
+ try:
+ expiration = compute_expiration(dns.query, timeout)
+ if source is not None:
+ s.bind(source)
+ while True:
+ try:
+ s.sendto(wire, destination)
+ break
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+ eventlet.sleep(0.01)
+ continue
+
+ tried = False
+ while True:
+ # If we've tried to receive at least once, check to see if our
+ # timer expired
+ if tried and (expiration - time.time() <= 0.0):
+ raise dns.exception.Timeout
+ # Sleep if we are retrying the operation due to a bad source
+ # address or a socket timeout.
+ if tried:
+ eventlet.sleep(0.01)
+ tried = True
+
+ try:
+ (wire, from_address) = s.recvfrom(65535)
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ continue
+ if dns.inet.af_for_address(from_address[0]) == dns.inet.AF_INET6:
+ # Purge all possible zeroes for ipv6 to match above logic
+ addr = from_address[0]
+ addr = dns.ipv6.inet_ntoa(dns.ipv6.inet_aton(addr))
+ from_address = (addr, from_address[1], from_address[2], from_address[3])
+ if from_address != destination:
+ if ignore_unexpected:
+ continue
+ else:
+ raise dns.query.UnexpectedSource(
+ 'got a response from %s instead of %s'
+ % (from_address, destination))
+ try:
+ if _handle_raise_on_truncation:
+ r = dns.message.from_wire(wire,
+ keyring=q.keyring,
+ request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing,
+ raise_on_truncation=raise_on_truncation)
+ else:
+ r = dns.message.from_wire(wire,
+ keyring=q.keyring,
+ request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing)
+ if not q.is_response(r):
+ raise dns.query.BadResponse()
+ break
+ except dns.message.Truncated as e:
+ if ignore_errors and not q.is_response(e.message()):
+ continue
+ else:
+ raise
+ except Exception:
+ if ignore_errors:
+ continue
+ else:
+ raise
+ finally:
+ s.close()
+
+ return r
+
+
+def tcp(q, where, timeout=DNS_QUERY_TIMEOUT, port=53,
+ af=None, source=None, source_port=0,
+ one_rr_per_rrset=False, ignore_trailing=False, sock=None):
+ """coro friendly replacement for dns.query.tcp
+ Return the response obtained after sending a query via TCP.
+
+ @param q: the query
+ @type q: dns.message.Message object
+ @param where: where to send the message
+ @type where: string containing an IPv4 or IPv6 address
+ @param timeout: The number of seconds to wait before the query times out.
+ If None, the default, wait forever.
+ @type timeout: float
+ @param port: The port to which to send the message. The default is 53.
+ @type port: int
+ @param af: the address family to use. The default is None, which
+ causes the address family to use to be inferred from the form of of where.
+ If the inference attempt fails, AF_INET is used.
+ @type af: int
+ @rtype: dns.message.Message object
+ @param source: source address. The default is the IPv4 wildcard address.
+ @type source: string
+ @param source_port: The port from which to send the message.
+ The default is 0.
+ @type source_port: int
+ @type ignore_unexpected: bool
+ @param one_rr_per_rrset: If True, put each RR into its own
+ RRset.
+ @type one_rr_per_rrset: bool
+ @param ignore_trailing: If True, ignore trailing
+ junk at end of the received message.
+ @type ignore_trailing: bool
+ @param sock: the socket to use for the
+ query. If None, the default, a socket is created. Note that
+ if a socket is provided, it must be a nonblocking datagram socket,
+ and the source and source_port are ignored.
+ @type sock: socket.socket | None"""
+
+ wire = q.to_wire()
+ if af is None:
+ try:
+ af = dns.inet.af_for_address(where)
+ except:
+ af = dns.inet.AF_INET
+ if af == dns.inet.AF_INET:
+ destination = (where, port)
+ if source is not None:
+ source = (source, source_port)
+ elif af == dns.inet.AF_INET6:
+ destination = (where, port, 0, 0)
+ if source is not None:
+ source = (source, source_port, 0, 0)
+ if sock:
+ s = sock
+ else:
+ s = socket.socket(af, socket.SOCK_STREAM)
+ s.settimeout(timeout)
+ try:
+ expiration = compute_expiration(dns.query, timeout)
+ if source is not None:
+ s.bind(source)
+ while True:
+ try:
+ s.connect(destination)
+ break
+ except socket.timeout:
+ # Q: Do we also need to catch coro.CoroutineSocketWake and pass?
+ if expiration - time.time() <= 0.0:
+ raise dns.exception.Timeout
+ eventlet.sleep(0.01)
+ continue
+
+ l = len(wire)
+ # copying the wire into tcpmsg is inefficient, but lets us
+ # avoid writev() or doing a short write that would get pushed
+ # onto the net
+ tcpmsg = struct.pack("!H", l) + wire
+ _net_write(s, tcpmsg, expiration)
+ ldata = _net_read(s, 2, expiration)
+ (l,) = struct.unpack("!H", ldata)
+ wire = bytes(_net_read(s, l, expiration))
+ finally:
+ s.close()
+ r = dns.message.from_wire(wire, keyring=q.keyring, request_mac=q.mac,
+ one_rr_per_rrset=one_rr_per_rrset,
+ ignore_trailing=ignore_trailing)
+ if not q.is_response(r):
+ raise dns.query.BadResponse()
+ return r
+
+
+def reset():
+ resolver.clear()
+
+
+# Install our coro-friendly replacements for the tcp and udp query methods.
+dns.query.tcp = tcp
+dns.query.udp = udp
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/greenlets.py b/tapdown/lib/python3.11/site-packages/eventlet/support/greenlets.py
new file mode 100644
index 0000000..b939328
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/greenlets.py
@@ -0,0 +1,4 @@
+import greenlet
+getcurrent = greenlet.greenlet.getcurrent
+GreenletExit = greenlet.greenlet.GreenletExit
+greenlet = greenlet.greenlet
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/psycopg2_patcher.py b/tapdown/lib/python3.11/site-packages/eventlet/support/psycopg2_patcher.py
new file mode 100644
index 0000000..2f4034a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/psycopg2_patcher.py
@@ -0,0 +1,55 @@
+"""A wait callback to allow psycopg2 cooperation with eventlet.
+
+Use `make_psycopg_green()` to enable eventlet support in Psycopg.
+"""
+
+# Copyright (C) 2010 Daniele Varrazzo
+# and licensed under the MIT license:
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import psycopg2
+from psycopg2 import extensions
+
+import eventlet.hubs
+
+
+def make_psycopg_green():
+ """Configure Psycopg to be used with eventlet in non-blocking way."""
+ if not hasattr(extensions, 'set_wait_callback'):
+ raise ImportError(
+ "support for coroutines not available in this Psycopg version (%s)"
+ % psycopg2.__version__)
+
+ extensions.set_wait_callback(eventlet_wait_callback)
+
+
+def eventlet_wait_callback(conn, timeout=-1):
+ """A wait callback useful to allow eventlet to work with Psycopg."""
+ while 1:
+ state = conn.poll()
+ if state == extensions.POLL_OK:
+ break
+ elif state == extensions.POLL_READ:
+ eventlet.hubs.trampoline(conn.fileno(), read=True)
+ elif state == extensions.POLL_WRITE:
+ eventlet.hubs.trampoline(conn.fileno(), write=True)
+ else:
+ raise psycopg2.OperationalError(
+ "Bad result from poll: %r" % state)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/pylib.py b/tapdown/lib/python3.11/site-packages/eventlet/support/pylib.py
new file mode 100644
index 0000000..fdb0682
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/pylib.py
@@ -0,0 +1,12 @@
+from py.magic import greenlet
+
+import sys
+import types
+
+
+def emulate():
+ module = types.ModuleType('greenlet')
+ sys.modules['greenlet'] = module
+ module.greenlet = greenlet
+ module.getcurrent = greenlet.getcurrent
+ module.GreenletExit = greenlet.GreenletExit
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/stacklesspypys.py b/tapdown/lib/python3.11/site-packages/eventlet/support/stacklesspypys.py
new file mode 100644
index 0000000..fe3638a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/stacklesspypys.py
@@ -0,0 +1,12 @@
+from stackless import greenlet
+
+import sys
+import types
+
+
+def emulate():
+ module = types.ModuleType('greenlet')
+ sys.modules['greenlet'] = module
+ module.greenlet = greenlet
+ module.getcurrent = greenlet.getcurrent
+ module.GreenletExit = greenlet.GreenletExit
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/support/stacklesss.py b/tapdown/lib/python3.11/site-packages/eventlet/support/stacklesss.py
new file mode 100644
index 0000000..9b3951e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/support/stacklesss.py
@@ -0,0 +1,84 @@
+"""
+Support for using stackless python. Broken and riddled with print statements
+at the moment. Please fix it!
+"""
+
+import sys
+import types
+
+import stackless
+
+caller = None
+coro_args = {}
+tasklet_to_greenlet = {}
+
+
+def getcurrent():
+ return tasklet_to_greenlet[stackless.getcurrent()]
+
+
+class FirstSwitch:
+ def __init__(self, gr):
+ self.gr = gr
+
+ def __call__(self, *args, **kw):
+ # print("first call", args, kw)
+ gr = self.gr
+ del gr.switch
+ run, gr.run = gr.run, None
+ t = stackless.tasklet(run)
+ gr.t = t
+ tasklet_to_greenlet[t] = gr
+ t.setup(*args, **kw)
+ t.run()
+
+
+class greenlet:
+ def __init__(self, run=None, parent=None):
+ self.dead = False
+ if parent is None:
+ parent = getcurrent()
+
+ self.parent = parent
+ if run is not None:
+ self.run = run
+
+ self.switch = FirstSwitch(self)
+
+ def switch(self, *args):
+ # print("switch", args)
+ global caller
+ caller = stackless.getcurrent()
+ coro_args[self] = args
+ self.t.insert()
+ stackless.schedule()
+ if caller is not self.t:
+ caller.remove()
+ rval = coro_args[self]
+ return rval
+
+ def run(self):
+ pass
+
+ def __bool__(self):
+ return self.run is None and not self.dead
+
+
+class GreenletExit(Exception):
+ pass
+
+
+def emulate():
+ module = types.ModuleType('greenlet')
+ sys.modules['greenlet'] = module
+ module.greenlet = greenlet
+ module.getcurrent = getcurrent
+ module.GreenletExit = GreenletExit
+
+ caller = stackless.getcurrent()
+ tasklet_to_greenlet[caller] = None
+ main_coro = greenlet()
+ tasklet_to_greenlet[caller] = main_coro
+ main_coro.t = caller
+ del main_coro.switch # It's already running
+ coro_args[main_coro] = None
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/timeout.py b/tapdown/lib/python3.11/site-packages/eventlet/timeout.py
new file mode 100644
index 0000000..4ab893e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/timeout.py
@@ -0,0 +1,184 @@
+# Copyright (c) 2009-2010 Denis Bilenko, denis.bilenko at gmail com
+# Copyright (c) 2010 Eventlet Contributors (see AUTHORS)
+# and licensed under the MIT license:
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import functools
+import inspect
+
+import eventlet
+from eventlet.support import greenlets as greenlet
+from eventlet.hubs import get_hub
+
+__all__ = ['Timeout', 'with_timeout', 'wrap_is_timeout', 'is_timeout']
+
+_MISSING = object()
+
+# deriving from BaseException so that "except Exception as e" doesn't catch
+# Timeout exceptions.
+
+
+class Timeout(BaseException):
+ """Raises *exception* in the current greenthread after *timeout* seconds.
+
+ When *exception* is omitted or ``None``, the :class:`Timeout` instance
+ itself is raised. If *seconds* is None, the timer is not scheduled, and is
+ only useful if you're planning to raise it directly.
+
+ Timeout objects are context managers, and so can be used in with statements.
+ When used in a with statement, if *exception* is ``False``, the timeout is
+ still raised, but the context manager suppresses it, so the code outside the
+ with-block won't see it.
+ """
+
+ def __init__(self, seconds=None, exception=None):
+ self.seconds = seconds
+ self.exception = exception
+ self.timer = None
+ self.start()
+
+ def start(self):
+ """Schedule the timeout. This is called on construction, so
+ it should not be called explicitly, unless the timer has been
+ canceled."""
+ assert not self.pending, \
+ '%r is already started; to restart it, cancel it first' % self
+ if self.seconds is None: # "fake" timeout (never expires)
+ self.timer = None
+ elif self.exception is None or isinstance(self.exception, bool): # timeout that raises self
+ self.timer = get_hub().schedule_call_global(
+ self.seconds, greenlet.getcurrent().throw, self)
+ else: # regular timeout with user-provided exception
+ self.timer = get_hub().schedule_call_global(
+ self.seconds, greenlet.getcurrent().throw, self.exception)
+ return self
+
+ @property
+ def pending(self):
+ """True if the timeout is scheduled to be raised."""
+ if self.timer is not None:
+ return self.timer.pending
+ else:
+ return False
+
+ def cancel(self):
+ """If the timeout is pending, cancel it. If not using
+ Timeouts in ``with`` statements, always call cancel() in a
+ ``finally`` after the block of code that is getting timed out.
+ If not canceled, the timeout will be raised later on, in some
+ unexpected section of the application."""
+ if self.timer is not None:
+ self.timer.cancel()
+ self.timer = None
+
+ def __repr__(self):
+ classname = self.__class__.__name__
+ if self.pending:
+ pending = ' pending'
+ else:
+ pending = ''
+ if self.exception is None:
+ exception = ''
+ else:
+ exception = ' exception=%r' % self.exception
+ return '<%s at %s seconds=%s%s%s>' % (
+ classname, hex(id(self)), self.seconds, exception, pending)
+
+ def __str__(self):
+ """
+ >>> raise Timeout # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ Timeout
+ """
+ if self.seconds is None:
+ return ''
+ if self.seconds == 1:
+ suffix = ''
+ else:
+ suffix = 's'
+ if self.exception is None or self.exception is True:
+ return '%s second%s' % (self.seconds, suffix)
+ elif self.exception is False:
+ return '%s second%s (silent)' % (self.seconds, suffix)
+ else:
+ return '%s second%s (%s)' % (self.seconds, suffix, self.exception)
+
+ def __enter__(self):
+ if self.timer is None:
+ self.start()
+ return self
+
+ def __exit__(self, typ, value, tb):
+ self.cancel()
+ if value is self and self.exception is False:
+ return True
+
+ @property
+ def is_timeout(self):
+ return True
+
+
+def with_timeout(seconds, function, *args, **kwds):
+ """Wrap a call to some (yielding) function with a timeout; if the called
+ function fails to return before the timeout, cancel it and return a flag
+ value.
+ """
+ timeout_value = kwds.pop("timeout_value", _MISSING)
+ timeout = Timeout(seconds)
+ try:
+ try:
+ return function(*args, **kwds)
+ except Timeout as ex:
+ if ex is timeout and timeout_value is not _MISSING:
+ return timeout_value
+ raise
+ finally:
+ timeout.cancel()
+
+
+def wrap_is_timeout(base):
+ '''Adds `.is_timeout=True` attribute to objects returned by `base()`.
+
+ When `base` is class, attribute is added as read-only property. Returns `base`.
+ Otherwise, it returns a function that sets attribute on result of `base()` call.
+
+ Wrappers make best effort to be transparent.
+ '''
+ if inspect.isclass(base):
+ base.is_timeout = property(lambda _: True)
+ return base
+
+ @functools.wraps(base)
+ def fun(*args, **kwargs):
+ ex = base(*args, **kwargs)
+ ex.is_timeout = True
+ return ex
+ return fun
+
+
+if isinstance(__builtins__, dict): # seen when running tests on py310, but HOW??
+ _timeout_err = __builtins__.get('TimeoutError', Timeout)
+else:
+ _timeout_err = getattr(__builtins__, 'TimeoutError', Timeout)
+
+
+def is_timeout(obj):
+ return bool(getattr(obj, 'is_timeout', False)) or isinstance(obj, _timeout_err)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/tpool.py b/tapdown/lib/python3.11/site-packages/eventlet/tpool.py
new file mode 100644
index 0000000..1a3f412
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/tpool.py
@@ -0,0 +1,336 @@
+# Copyright (c) 2007-2009, Linden Research, Inc.
+# Copyright (c) 2007, IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import atexit
+try:
+ import _imp as imp
+except ImportError:
+ import imp
+import os
+import sys
+import traceback
+
+import eventlet
+from eventlet import event, greenio, greenthread, patcher, timeout
+
+__all__ = ['execute', 'Proxy', 'killall', 'set_num_threads']
+
+
+EXC_CLASSES = (Exception, timeout.Timeout)
+SYS_EXCS = (GeneratorExit, KeyboardInterrupt, SystemExit)
+
+QUIET = True
+
+socket = patcher.original('socket')
+threading = patcher.original('threading')
+Queue_module = patcher.original('queue')
+
+Empty = Queue_module.Empty
+Queue = Queue_module.Queue
+
+_bytetosend = b' '
+_coro = None
+_nthreads = int(os.environ.get('EVENTLET_THREADPOOL_SIZE', 20))
+_reqq = _rspq = None
+_rsock = _wsock = None
+_setup_already = False
+_threads = []
+
+
+def tpool_trampoline():
+ global _rspq
+ while True:
+ try:
+ _c = _rsock.recv(1)
+ assert _c
+ # FIXME: this is probably redundant since using sockets instead of pipe now
+ except ValueError:
+ break # will be raised when pipe is closed
+ while not _rspq.empty():
+ try:
+ (e, rv) = _rspq.get(block=False)
+ e.send(rv)
+ e = rv = None
+ except Empty:
+ pass
+
+
+def tworker():
+ global _rspq
+ while True:
+ try:
+ msg = _reqq.get()
+ except AttributeError:
+ return # can't get anything off of a dud queue
+ if msg is None:
+ return
+ (e, meth, args, kwargs) = msg
+ rv = None
+ try:
+ rv = meth(*args, **kwargs)
+ except SYS_EXCS:
+ raise
+ except EXC_CLASSES:
+ rv = sys.exc_info()
+ traceback.clear_frames(rv[1].__traceback__)
+ # test_leakage_from_tracebacks verifies that the use of
+ # exc_info does not lead to memory leaks
+ _rspq.put((e, rv))
+ msg = meth = args = kwargs = e = rv = None
+ _wsock.sendall(_bytetosend)
+
+
+def execute(meth, *args, **kwargs):
+ """
+ Execute *meth* in a Python thread, blocking the current coroutine/
+ greenthread until the method completes.
+
+ The primary use case for this is to wrap an object or module that is not
+ amenable to monkeypatching or any of the other tricks that Eventlet uses
+ to achieve cooperative yielding. With tpool, you can force such objects to
+ cooperate with green threads by sticking them in native threads, at the cost
+ of some overhead.
+ """
+ setup()
+ # if already in tpool, don't recurse into the tpool
+ # also, call functions directly if we're inside an import lock, because
+ # if meth does any importing (sadly common), it will hang
+ my_thread = threading.current_thread()
+ if my_thread in _threads or imp.lock_held() or _nthreads == 0:
+ return meth(*args, **kwargs)
+
+ e = event.Event()
+ _reqq.put((e, meth, args, kwargs))
+
+ rv = e.wait()
+ if isinstance(rv, tuple) \
+ and len(rv) == 3 \
+ and isinstance(rv[1], EXC_CLASSES):
+ (c, e, tb) = rv
+ if not QUIET:
+ traceback.print_exception(c, e, tb)
+ traceback.print_stack()
+ raise e.with_traceback(tb)
+ return rv
+
+
+def proxy_call(autowrap, f, *args, **kwargs):
+ """
+ Call a function *f* and returns the value. If the type of the return value
+ is in the *autowrap* collection, then it is wrapped in a :class:`Proxy`
+ object before return.
+
+ Normally *f* will be called in the threadpool with :func:`execute`; if the
+ keyword argument "nonblocking" is set to ``True``, it will simply be
+ executed directly. This is useful if you have an object which has methods
+ that don't need to be called in a separate thread, but which return objects
+ that should be Proxy wrapped.
+ """
+ if kwargs.pop('nonblocking', False):
+ rv = f(*args, **kwargs)
+ else:
+ rv = execute(f, *args, **kwargs)
+ if isinstance(rv, autowrap):
+ return Proxy(rv, autowrap)
+ else:
+ return rv
+
+
+class Proxy:
+ """
+ a simple proxy-wrapper of any object that comes with a
+ methods-only interface, in order to forward every method
+ invocation onto a thread in the native-thread pool. A key
+ restriction is that the object's methods should not switch
+ greenlets or use Eventlet primitives, since they are in a
+ different thread from the main hub, and therefore might behave
+ unexpectedly. This is for running native-threaded code
+ only.
+
+ It's common to want to have some of the attributes or return
+ values also wrapped in Proxy objects (for example, database
+ connection objects produce cursor objects which also should be
+ wrapped in Proxy objects to remain nonblocking). *autowrap*, if
+ supplied, is a collection of types; if an attribute or return
+ value matches one of those types (via isinstance), it will be
+ wrapped in a Proxy. *autowrap_names* is a collection
+ of strings, which represent the names of attributes that should be
+ wrapped in Proxy objects when accessed.
+ """
+
+ def __init__(self, obj, autowrap=(), autowrap_names=()):
+ self._obj = obj
+ self._autowrap = autowrap
+ self._autowrap_names = autowrap_names
+
+ def __getattr__(self, attr_name):
+ f = getattr(self._obj, attr_name)
+ if not hasattr(f, '__call__'):
+ if isinstance(f, self._autowrap) or attr_name in self._autowrap_names:
+ return Proxy(f, self._autowrap)
+ return f
+
+ def doit(*args, **kwargs):
+ result = proxy_call(self._autowrap, f, *args, **kwargs)
+ if attr_name in self._autowrap_names and not isinstance(result, Proxy):
+ return Proxy(result)
+ return result
+ return doit
+
+ # the following are a buncha methods that the python interpeter
+ # doesn't use getattr to retrieve and therefore have to be defined
+ # explicitly
+ def __getitem__(self, key):
+ return proxy_call(self._autowrap, self._obj.__getitem__, key)
+
+ def __setitem__(self, key, value):
+ return proxy_call(self._autowrap, self._obj.__setitem__, key, value)
+
+ def __deepcopy__(self, memo=None):
+ return proxy_call(self._autowrap, self._obj.__deepcopy__, memo)
+
+ def __copy__(self, memo=None):
+ return proxy_call(self._autowrap, self._obj.__copy__, memo)
+
+ def __call__(self, *a, **kw):
+ if '__call__' in self._autowrap_names:
+ return Proxy(proxy_call(self._autowrap, self._obj, *a, **kw))
+ else:
+ return proxy_call(self._autowrap, self._obj, *a, **kw)
+
+ def __enter__(self):
+ return proxy_call(self._autowrap, self._obj.__enter__)
+
+ def __exit__(self, *exc):
+ return proxy_call(self._autowrap, self._obj.__exit__, *exc)
+
+ # these don't go through a proxy call, because they're likely to
+ # be called often, and are unlikely to be implemented on the
+ # wrapped object in such a way that they would block
+ def __eq__(self, rhs):
+ return self._obj == rhs
+
+ def __hash__(self):
+ return self._obj.__hash__()
+
+ def __repr__(self):
+ return self._obj.__repr__()
+
+ def __str__(self):
+ return self._obj.__str__()
+
+ def __len__(self):
+ return len(self._obj)
+
+ def __nonzero__(self):
+ return bool(self._obj)
+ # Python3
+ __bool__ = __nonzero__
+
+ def __iter__(self):
+ it = iter(self._obj)
+ if it == self._obj:
+ return self
+ else:
+ return Proxy(it)
+
+ def next(self):
+ return proxy_call(self._autowrap, next, self._obj)
+ # Python3
+ __next__ = next
+
+
+def setup():
+ global _rsock, _wsock, _coro, _setup_already, _rspq, _reqq
+ if _setup_already:
+ return
+ else:
+ _setup_already = True
+
+ assert _nthreads >= 0, "Can't specify negative number of threads"
+ if _nthreads == 0:
+ import warnings
+ warnings.warn("Zero threads in tpool. All tpool.execute calls will\
+ execute in main thread. Check the value of the environment \
+ variable EVENTLET_THREADPOOL_SIZE.", RuntimeWarning)
+ _reqq = Queue(maxsize=-1)
+ _rspq = Queue(maxsize=-1)
+
+ # connected socket pair
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.bind(('127.0.0.1', 0))
+ sock.listen(1)
+ csock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ csock.connect(sock.getsockname())
+ csock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
+ _wsock, _addr = sock.accept()
+ _wsock.settimeout(None)
+ _wsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
+ sock.close()
+ _rsock = greenio.GreenSocket(csock)
+ _rsock.settimeout(None)
+
+ for i in range(_nthreads):
+ t = threading.Thread(target=tworker,
+ name="tpool_thread_%s" % i)
+ t.daemon = True
+ t.start()
+ _threads.append(t)
+
+ _coro = greenthread.spawn_n(tpool_trampoline)
+ # This yield fixes subtle error with GreenSocket.__del__
+ eventlet.sleep(0)
+
+
+# Avoid ResourceWarning unclosed socket on Python3.2+
+@atexit.register
+def killall():
+ global _setup_already, _rspq, _rsock, _wsock
+ if not _setup_already:
+ return
+
+ # This yield fixes freeze in some scenarios
+ eventlet.sleep(0)
+
+ for thr in _threads:
+ _reqq.put(None)
+ for thr in _threads:
+ thr.join()
+ del _threads[:]
+
+ # return any remaining results
+ while (_rspq is not None) and not _rspq.empty():
+ try:
+ (e, rv) = _rspq.get(block=False)
+ e.send(rv)
+ e = rv = None
+ except Empty:
+ pass
+
+ if _coro is not None:
+ greenthread.kill(_coro)
+ if _rsock is not None:
+ _rsock.close()
+ _rsock = None
+ if _wsock is not None:
+ _wsock.close()
+ _wsock = None
+ _rspq = None
+ _setup_already = False
+
+
+def set_num_threads(nthreads):
+ global _nthreads
+ _nthreads = nthreads
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/websocket.py b/tapdown/lib/python3.11/site-packages/eventlet/websocket.py
new file mode 100644
index 0000000..3d50f70
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/websocket.py
@@ -0,0 +1,868 @@
+import base64
+import codecs
+import collections
+import errno
+from random import Random
+from socket import error as SocketError
+import string
+import struct
+import sys
+import time
+
+import zlib
+
+try:
+ from hashlib import md5, sha1
+except ImportError: # pragma NO COVER
+ from md5 import md5
+ from sha import sha as sha1
+
+from eventlet import semaphore
+from eventlet import wsgi
+from eventlet.green import socket
+from eventlet.support import get_errno
+
+# Python 2's utf8 decoding is more lenient than we'd like
+# In order to pass autobahn's testsuite we need stricter validation
+# if available...
+for _mod in ('wsaccel.utf8validator', 'autobahn.utf8validator'):
+ # autobahn has it's own python-based validator. in newest versions
+ # this prefers to use wsaccel, a cython based implementation, if available.
+ # wsaccel may also be installed w/out autobahn, or with a earlier version.
+ try:
+ utf8validator = __import__(_mod, {}, {}, [''])
+ except ImportError:
+ utf8validator = None
+ else:
+ break
+
+ACCEPTABLE_CLIENT_ERRORS = {errno.ECONNRESET, errno.EPIPE, errno.ESHUTDOWN}
+DEFAULT_MAX_FRAME_LENGTH = 8 << 20
+
+__all__ = ["WebSocketWSGI", "WebSocket"]
+PROTOCOL_GUID = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
+VALID_CLOSE_STATUS = set(
+ list(range(1000, 1004)) +
+ list(range(1007, 1012)) +
+ # 3000-3999: reserved for use by libraries, frameworks,
+ # and applications
+ list(range(3000, 4000)) +
+ # 4000-4999: reserved for private use and thus can't
+ # be registered
+ list(range(4000, 5000))
+)
+
+
+class BadRequest(Exception):
+ def __init__(self, status='400 Bad Request', body=None, headers=None):
+ super(Exception, self).__init__()
+ self.status = status
+ self.body = body
+ self.headers = headers
+
+
+class WebSocketWSGI:
+ """Wraps a websocket handler function in a WSGI application.
+
+ Use it like this::
+
+ @websocket.WebSocketWSGI
+ def my_handler(ws):
+ from_browser = ws.wait()
+ ws.send("from server")
+
+ The single argument to the function will be an instance of
+ :class:`WebSocket`. To close the socket, simply return from the
+ function. Note that the server will log the websocket request at
+ the time of closure.
+
+ An optional argument max_frame_length can be given, which will set the
+ maximum incoming *uncompressed* payload length of a frame. By default, this
+ is set to 8MiB. Note that excessive values here might create a DOS attack
+ vector.
+ """
+
+ def __init__(self, handler, max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
+ self.handler = handler
+ self.protocol_version = None
+ self.support_legacy_versions = True
+ self.supported_protocols = []
+ self.origin_checker = None
+ self.max_frame_length = max_frame_length
+
+ @classmethod
+ def configured(cls,
+ handler=None,
+ supported_protocols=None,
+ origin_checker=None,
+ support_legacy_versions=False):
+ def decorator(handler):
+ inst = cls(handler)
+ inst.support_legacy_versions = support_legacy_versions
+ inst.origin_checker = origin_checker
+ if supported_protocols:
+ inst.supported_protocols = supported_protocols
+ return inst
+ if handler is None:
+ return decorator
+ return decorator(handler)
+
+ def __call__(self, environ, start_response):
+ http_connection_parts = [
+ part.strip()
+ for part in environ.get('HTTP_CONNECTION', '').lower().split(',')]
+ if not ('upgrade' in http_connection_parts and
+ environ.get('HTTP_UPGRADE', '').lower() == 'websocket'):
+ # need to check a few more things here for true compliance
+ start_response('400 Bad Request', [('Connection', 'close')])
+ return []
+
+ try:
+ if 'HTTP_SEC_WEBSOCKET_VERSION' in environ:
+ ws = self._handle_hybi_request(environ)
+ elif self.support_legacy_versions:
+ ws = self._handle_legacy_request(environ)
+ else:
+ raise BadRequest()
+ except BadRequest as e:
+ status = e.status
+ body = e.body or b''
+ headers = e.headers or []
+ start_response(status,
+ [('Connection', 'close'), ] + headers)
+ return [body]
+
+ # We're ready to switch protocols; if running under Eventlet
+ # (this is not always the case) then flag the connection as
+ # idle to play well with a graceful stop
+ if 'eventlet.set_idle' in environ:
+ environ['eventlet.set_idle']()
+ try:
+ self.handler(ws)
+ except OSError as e:
+ if get_errno(e) not in ACCEPTABLE_CLIENT_ERRORS:
+ raise
+ # Make sure we send the closing frame
+ ws._send_closing_frame(True)
+ # use this undocumented feature of eventlet.wsgi to ensure that it
+ # doesn't barf on the fact that we didn't call start_response
+ wsgi.WSGI_LOCAL.already_handled = True
+ return []
+
+ def _handle_legacy_request(self, environ):
+ if 'eventlet.input' in environ:
+ sock = environ['eventlet.input'].get_socket()
+ elif 'gunicorn.socket' in environ:
+ sock = environ['gunicorn.socket']
+ else:
+ raise Exception('No eventlet.input or gunicorn.socket present in environ.')
+
+ if 'HTTP_SEC_WEBSOCKET_KEY1' in environ:
+ self.protocol_version = 76
+ if 'HTTP_SEC_WEBSOCKET_KEY2' not in environ:
+ raise BadRequest()
+ else:
+ self.protocol_version = 75
+
+ if self.protocol_version == 76:
+ key1 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY1'])
+ key2 = self._extract_number(environ['HTTP_SEC_WEBSOCKET_KEY2'])
+ # There's no content-length header in the request, but it has 8
+ # bytes of data.
+ environ['wsgi.input'].content_length = 8
+ key3 = environ['wsgi.input'].read(8)
+ key = struct.pack(">II", key1, key2) + key3
+ response = md5(key).digest()
+
+ # Start building the response
+ scheme = 'ws'
+ if environ.get('wsgi.url_scheme') == 'https':
+ scheme = 'wss'
+ location = '%s://%s%s%s' % (
+ scheme,
+ environ.get('HTTP_HOST'),
+ environ.get('SCRIPT_NAME'),
+ environ.get('PATH_INFO')
+ )
+ qs = environ.get('QUERY_STRING')
+ if qs is not None:
+ location += '?' + qs
+ if self.protocol_version == 75:
+ handshake_reply = (
+ b"HTTP/1.1 101 Web Socket Protocol Handshake\r\n"
+ b"Upgrade: WebSocket\r\n"
+ b"Connection: Upgrade\r\n"
+ b"WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
+ b"WebSocket-Location: " + location.encode() + b"\r\n\r\n"
+ )
+ elif self.protocol_version == 76:
+ handshake_reply = (
+ b"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
+ b"Upgrade: WebSocket\r\n"
+ b"Connection: Upgrade\r\n"
+ b"Sec-WebSocket-Origin: " + environ.get('HTTP_ORIGIN').encode() + b"\r\n"
+ b"Sec-WebSocket-Protocol: " +
+ environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'default').encode() + b"\r\n"
+ b"Sec-WebSocket-Location: " + location.encode() + b"\r\n"
+ b"\r\n" + response
+ )
+ else: # pragma NO COVER
+ raise ValueError("Unknown WebSocket protocol version.")
+ sock.sendall(handshake_reply)
+ return WebSocket(sock, environ, self.protocol_version)
+
+ def _parse_extension_header(self, header):
+ if header is None:
+ return None
+ res = {}
+ for ext in header.split(","):
+ parts = ext.split(";")
+ config = {}
+ for part in parts[1:]:
+ key_val = part.split("=")
+ if len(key_val) == 1:
+ config[key_val[0].strip().lower()] = True
+ else:
+ config[key_val[0].strip().lower()] = key_val[1].strip().strip('"').lower()
+ res.setdefault(parts[0].strip().lower(), []).append(config)
+ return res
+
+ def _negotiate_permessage_deflate(self, extensions):
+ if not extensions:
+ return None
+ deflate = extensions.get("permessage-deflate")
+ if deflate is None:
+ return None
+ for config in deflate:
+ # We'll evaluate each config in the client's preferred order and pick
+ # the first that we can support.
+ want_config = {
+ # These are bool options, we can support both
+ "server_no_context_takeover": config.get("server_no_context_takeover", False),
+ "client_no_context_takeover": config.get("client_no_context_takeover", False)
+ }
+ # These are either bool OR int options. True means the client can accept a value
+ # for the option, a number means the client wants that specific value.
+ max_wbits = min(zlib.MAX_WBITS, 15)
+ mwb = config.get("server_max_window_bits")
+ if mwb is not None:
+ if mwb is True:
+ want_config["server_max_window_bits"] = max_wbits
+ else:
+ want_config["server_max_window_bits"] = \
+ int(config.get("server_max_window_bits", max_wbits))
+ if not (8 <= want_config["server_max_window_bits"] <= 15):
+ continue
+ mwb = config.get("client_max_window_bits")
+ if mwb is not None:
+ if mwb is True:
+ want_config["client_max_window_bits"] = max_wbits
+ else:
+ want_config["client_max_window_bits"] = \
+ int(config.get("client_max_window_bits", max_wbits))
+ if not (8 <= want_config["client_max_window_bits"] <= 15):
+ continue
+ return want_config
+ return None
+
+ def _format_extension_header(self, parsed_extensions):
+ if not parsed_extensions:
+ return None
+ parts = []
+ for name, config in parsed_extensions.items():
+ ext_parts = [name.encode()]
+ for key, value in config.items():
+ if value is False:
+ pass
+ elif value is True:
+ ext_parts.append(key.encode())
+ else:
+ ext_parts.append(("%s=%s" % (key, str(value))).encode())
+ parts.append(b"; ".join(ext_parts))
+ return b", ".join(parts)
+
+ def _handle_hybi_request(self, environ):
+ if 'eventlet.input' in environ:
+ sock = environ['eventlet.input'].get_socket()
+ elif 'gunicorn.socket' in environ:
+ sock = environ['gunicorn.socket']
+ else:
+ raise Exception('No eventlet.input or gunicorn.socket present in environ.')
+
+ hybi_version = environ['HTTP_SEC_WEBSOCKET_VERSION']
+ if hybi_version not in ('8', '13', ):
+ raise BadRequest(status='426 Upgrade Required',
+ headers=[('Sec-WebSocket-Version', '8, 13')])
+ self.protocol_version = int(hybi_version)
+ if 'HTTP_SEC_WEBSOCKET_KEY' not in environ:
+ # That's bad.
+ raise BadRequest()
+ origin = environ.get(
+ 'HTTP_ORIGIN',
+ (environ.get('HTTP_SEC_WEBSOCKET_ORIGIN', '')
+ if self.protocol_version <= 8 else ''))
+ if self.origin_checker is not None:
+ if not self.origin_checker(environ.get('HTTP_HOST'), origin):
+ raise BadRequest(status='403 Forbidden')
+ protocols = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', None)
+ negotiated_protocol = None
+ if protocols:
+ for p in (i.strip() for i in protocols.split(',')):
+ if p in self.supported_protocols:
+ negotiated_protocol = p
+ break
+
+ key = environ['HTTP_SEC_WEBSOCKET_KEY']
+ response = base64.b64encode(sha1(key.encode() + PROTOCOL_GUID).digest())
+ handshake_reply = [b"HTTP/1.1 101 Switching Protocols",
+ b"Upgrade: websocket",
+ b"Connection: Upgrade",
+ b"Sec-WebSocket-Accept: " + response]
+ if negotiated_protocol:
+ handshake_reply.append(b"Sec-WebSocket-Protocol: " + negotiated_protocol.encode())
+
+ parsed_extensions = {}
+ extensions = self._parse_extension_header(environ.get("HTTP_SEC_WEBSOCKET_EXTENSIONS"))
+
+ deflate = self._negotiate_permessage_deflate(extensions)
+ if deflate is not None:
+ parsed_extensions["permessage-deflate"] = deflate
+
+ formatted_ext = self._format_extension_header(parsed_extensions)
+ if formatted_ext is not None:
+ handshake_reply.append(b"Sec-WebSocket-Extensions: " + formatted_ext)
+
+ sock.sendall(b'\r\n'.join(handshake_reply) + b'\r\n\r\n')
+ return RFC6455WebSocket(sock, environ, self.protocol_version,
+ protocol=negotiated_protocol,
+ extensions=parsed_extensions,
+ max_frame_length=self.max_frame_length)
+
+ def _extract_number(self, value):
+ """
+ Utility function which, given a string like 'g98sd 5[]221@1', will
+ return 9852211. Used to parse the Sec-WebSocket-Key headers.
+ """
+ out = ""
+ spaces = 0
+ for char in value:
+ if char in string.digits:
+ out += char
+ elif char == " ":
+ spaces += 1
+ return int(out) // spaces
+
+
+class WebSocket:
+ """A websocket object that handles the details of
+ serialization/deserialization to the socket.
+
+ The primary way to interact with a :class:`WebSocket` object is to
+ call :meth:`send` and :meth:`wait` in order to pass messages back
+ and forth with the browser. Also available are the following
+ properties:
+
+ path
+ The path value of the request. This is the same as the WSGI PATH_INFO variable,
+ but more convenient.
+ protocol
+ The value of the Websocket-Protocol header.
+ origin
+ The value of the 'Origin' header.
+ environ
+ The full WSGI environment for this request.
+
+ """
+
+ def __init__(self, sock, environ, version=76):
+ """
+ :param socket: The eventlet socket
+ :type socket: :class:`eventlet.greenio.GreenSocket`
+ :param environ: The wsgi environment
+ :param version: The WebSocket spec version to follow (default is 76)
+ """
+ self.log = environ.get('wsgi.errors', sys.stderr)
+ self.log_context = 'server={shost}/{spath} client={caddr}:{cport}'.format(
+ shost=environ.get('HTTP_HOST'),
+ spath=environ.get('SCRIPT_NAME', '') + environ.get('PATH_INFO', ''),
+ caddr=environ.get('REMOTE_ADDR'), cport=environ.get('REMOTE_PORT'),
+ )
+ self.socket = sock
+ self.origin = environ.get('HTTP_ORIGIN')
+ self.protocol = environ.get('HTTP_WEBSOCKET_PROTOCOL')
+ self.path = environ.get('PATH_INFO')
+ self.environ = environ
+ self.version = version
+ self.websocket_closed = False
+ self._buf = b""
+ self._msgs = collections.deque()
+ self._sendlock = semaphore.Semaphore()
+
+ def _pack_message(self, message):
+ """Pack the message inside ``00`` and ``FF``
+
+ As per the dataframing section (5.3) for the websocket spec
+ """
+ if isinstance(message, str):
+ message = message.encode('utf-8')
+ elif not isinstance(message, bytes):
+ message = str(message).encode()
+ packed = b"\x00" + message + b"\xFF"
+ return packed
+
+ def _parse_messages(self):
+ """ Parses for messages in the buffer *buf*. It is assumed that
+ the buffer contains the start character for a message, but that it
+ may contain only part of the rest of the message.
+
+ Returns an array of messages, and the buffer remainder that
+ didn't contain any full messages."""
+ msgs = []
+ end_idx = 0
+ buf = self._buf
+ while buf:
+ frame_type = buf[0]
+ if frame_type == 0:
+ # Normal message.
+ end_idx = buf.find(b"\xFF")
+ if end_idx == -1: # pragma NO COVER
+ break
+ msgs.append(buf[1:end_idx].decode('utf-8', 'replace'))
+ buf = buf[end_idx + 1:]
+ elif frame_type == 255:
+ # Closing handshake.
+ assert buf[1] == 0, "Unexpected closing handshake: %r" % buf
+ self.websocket_closed = True
+ break
+ else:
+ raise ValueError("Don't understand how to parse this type of message: %r" % buf)
+ self._buf = buf
+ return msgs
+
+ def send(self, message):
+ """Send a message to the browser.
+
+ *message* should be convertable to a string; unicode objects should be
+ encodable as utf-8. Raises socket.error with errno of 32
+ (broken pipe) if the socket has already been closed by the client."""
+ packed = self._pack_message(message)
+ # if two greenthreads are trying to send at the same time
+ # on the same socket, sendlock prevents interleaving and corruption
+ self._sendlock.acquire()
+ try:
+ self.socket.sendall(packed)
+ finally:
+ self._sendlock.release()
+
+ def wait(self):
+ """Waits for and deserializes messages.
+
+ Returns a single message; the oldest not yet processed. If the client
+ has already closed the connection, returns None. This is different
+ from normal socket behavior because the empty string is a valid
+ websocket message."""
+ while not self._msgs:
+ # Websocket might be closed already.
+ if self.websocket_closed:
+ return None
+ # no parsed messages, must mean buf needs more data
+ delta = self.socket.recv(8096)
+ if delta == b'':
+ return None
+ self._buf += delta
+ msgs = self._parse_messages()
+ self._msgs.extend(msgs)
+ return self._msgs.popleft()
+
+ def _send_closing_frame(self, ignore_send_errors=False):
+ """Sends the closing frame to the client, if required."""
+ if self.version == 76 and not self.websocket_closed:
+ try:
+ self.socket.sendall(b"\xff\x00")
+ except OSError:
+ # Sometimes, like when the remote side cuts off the connection,
+ # we don't care about this.
+ if not ignore_send_errors: # pragma NO COVER
+ raise
+ self.websocket_closed = True
+
+ def close(self):
+ """Forcibly close the websocket; generally it is preferable to
+ return from the handler method."""
+ try:
+ self._send_closing_frame(True)
+ self.socket.shutdown(True)
+ except OSError as e:
+ if e.errno != errno.ENOTCONN:
+ self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e))
+ finally:
+ self.socket.close()
+
+
+class ConnectionClosedError(Exception):
+ pass
+
+
+class FailedConnectionError(Exception):
+ def __init__(self, status, message):
+ super().__init__(status, message)
+ self.message = message
+ self.status = status
+
+
+class ProtocolError(ValueError):
+ pass
+
+
+class RFC6455WebSocket(WebSocket):
+ def __init__(self, sock, environ, version=13, protocol=None, client=False, extensions=None,
+ max_frame_length=DEFAULT_MAX_FRAME_LENGTH):
+ super().__init__(sock, environ, version)
+ self.iterator = self._iter_frames()
+ self.client = client
+ self.protocol = protocol
+ self.extensions = extensions or {}
+
+ self._deflate_enc = None
+ self._deflate_dec = None
+ self.max_frame_length = max_frame_length
+ self._remote_close_data = None
+
+ class UTF8Decoder:
+ def __init__(self):
+ if utf8validator:
+ self.validator = utf8validator.Utf8Validator()
+ else:
+ self.validator = None
+ decoderclass = codecs.getincrementaldecoder('utf8')
+ self.decoder = decoderclass()
+
+ def reset(self):
+ if self.validator:
+ self.validator.reset()
+ self.decoder.reset()
+
+ def decode(self, data, final=False):
+ if self.validator:
+ valid, eocp, c_i, t_i = self.validator.validate(data)
+ if not valid:
+ raise ValueError('Data is not valid unicode')
+ return self.decoder.decode(data, final)
+
+ def _get_permessage_deflate_enc(self):
+ options = self.extensions.get("permessage-deflate")
+ if options is None:
+ return None
+
+ def _make():
+ return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED,
+ -options.get("client_max_window_bits" if self.client
+ else "server_max_window_bits",
+ zlib.MAX_WBITS))
+
+ if options.get("client_no_context_takeover" if self.client
+ else "server_no_context_takeover"):
+ # This option means we have to make a new one every time
+ return _make()
+ else:
+ if self._deflate_enc is None:
+ self._deflate_enc = _make()
+ return self._deflate_enc
+
+ def _get_permessage_deflate_dec(self, rsv1):
+ options = self.extensions.get("permessage-deflate")
+ if options is None or not rsv1:
+ return None
+
+ def _make():
+ return zlib.decompressobj(-options.get("server_max_window_bits" if self.client
+ else "client_max_window_bits",
+ zlib.MAX_WBITS))
+
+ if options.get("server_no_context_takeover" if self.client
+ else "client_no_context_takeover"):
+ # This option means we have to make a new one every time
+ return _make()
+ else:
+ if self._deflate_dec is None:
+ self._deflate_dec = _make()
+ return self._deflate_dec
+
+ def _get_bytes(self, numbytes):
+ data = b''
+ while len(data) < numbytes:
+ d = self.socket.recv(numbytes - len(data))
+ if not d:
+ raise ConnectionClosedError()
+ data = data + d
+ return data
+
+ class Message:
+ def __init__(self, opcode, max_frame_length, decoder=None, decompressor=None):
+ self.decoder = decoder
+ self.data = []
+ self.finished = False
+ self.opcode = opcode
+ self.decompressor = decompressor
+ self.max_frame_length = max_frame_length
+
+ def push(self, data, final=False):
+ self.finished = final
+ self.data.append(data)
+
+ def getvalue(self):
+ data = b"".join(self.data)
+ if not self.opcode & 8 and self.decompressor:
+ data = self.decompressor.decompress(data + b"\x00\x00\xff\xff", self.max_frame_length)
+ if self.decompressor.unconsumed_tail:
+ raise FailedConnectionError(
+ 1009,
+ "Incoming compressed frame exceeds length limit of {} bytes.".format(self.max_frame_length))
+
+ if self.decoder:
+ data = self.decoder.decode(data, self.finished)
+ return data
+
+ @staticmethod
+ def _apply_mask(data, mask, length=None, offset=0):
+ if length is None:
+ length = len(data)
+ cnt = range(length)
+ return b''.join(bytes((data[i] ^ mask[(offset + i) % 4],)) for i in cnt)
+
+ def _handle_control_frame(self, opcode, data):
+ if opcode == 8: # connection close
+ self._remote_close_data = data
+ if not data:
+ status = 1000
+ elif len(data) > 1:
+ status = struct.unpack_from('!H', data)[0]
+ if not status or status not in VALID_CLOSE_STATUS:
+ raise FailedConnectionError(
+ 1002,
+ "Unexpected close status code.")
+ try:
+ data = self.UTF8Decoder().decode(data[2:], True)
+ except (UnicodeDecodeError, ValueError):
+ raise FailedConnectionError(
+ 1002,
+ "Close message data should be valid UTF-8.")
+ else:
+ status = 1002
+ self.close(close_data=(status, ''))
+ raise ConnectionClosedError()
+ elif opcode == 9: # ping
+ self.send(data, control_code=0xA)
+ elif opcode == 0xA: # pong
+ pass
+ else:
+ raise FailedConnectionError(
+ 1002, "Unknown control frame received.")
+
+ def _iter_frames(self):
+ fragmented_message = None
+ try:
+ while True:
+ message = self._recv_frame(message=fragmented_message)
+ if message.opcode & 8:
+ self._handle_control_frame(
+ message.opcode, message.getvalue())
+ continue
+ if fragmented_message and message is not fragmented_message:
+ raise RuntimeError('Unexpected message change.')
+ fragmented_message = message
+ if message.finished:
+ data = fragmented_message.getvalue()
+ fragmented_message = None
+ yield data
+ except FailedConnectionError:
+ exc_typ, exc_val, exc_tb = sys.exc_info()
+ self.close(close_data=(exc_val.status, exc_val.message))
+ except ConnectionClosedError:
+ return
+ except Exception:
+ self.close(close_data=(1011, 'Internal Server Error'))
+ raise
+
+ def _recv_frame(self, message=None):
+ recv = self._get_bytes
+
+ # Unpacking the frame described in Section 5.2 of RFC6455
+ # (https://tools.ietf.org/html/rfc6455#section-5.2)
+ header = recv(2)
+ a, b = struct.unpack('!BB', header)
+ finished = a >> 7 == 1
+ rsv123 = a >> 4 & 7
+ rsv1 = rsv123 & 4
+ if rsv123:
+ if rsv1 and "permessage-deflate" not in self.extensions:
+ # must be zero - unless it's compressed then rsv1 is true
+ raise FailedConnectionError(
+ 1002,
+ "RSV1, RSV2, RSV3: MUST be 0 unless an extension is"
+ " negotiated that defines meanings for non-zero values.")
+ opcode = a & 15
+ if opcode not in (0, 1, 2, 8, 9, 0xA):
+ raise FailedConnectionError(1002, "Unknown opcode received.")
+ masked = b & 128 == 128
+ if not masked and not self.client:
+ raise FailedConnectionError(1002, "A client MUST mask all frames"
+ " that it sends to the server")
+ length = b & 127
+ if opcode & 8:
+ if not finished:
+ raise FailedConnectionError(1002, "Control frames must not"
+ " be fragmented.")
+ if length > 125:
+ raise FailedConnectionError(
+ 1002,
+ "All control frames MUST have a payload length of 125"
+ " bytes or less")
+ elif opcode and message:
+ raise FailedConnectionError(
+ 1002,
+ "Received a non-continuation opcode within"
+ " fragmented message.")
+ elif not opcode and not message:
+ raise FailedConnectionError(
+ 1002,
+ "Received continuation opcode with no previous"
+ " fragments received.")
+ if length == 126:
+ length = struct.unpack('!H', recv(2))[0]
+ elif length == 127:
+ length = struct.unpack('!Q', recv(8))[0]
+
+ if length > self.max_frame_length:
+ raise FailedConnectionError(1009, "Incoming frame of {} bytes is above length limit of {} bytes.".format(
+ length, self.max_frame_length))
+ if masked:
+ mask = struct.unpack('!BBBB', recv(4))
+ received = 0
+ if not message or opcode & 8:
+ decoder = self.UTF8Decoder() if opcode == 1 else None
+ decompressor = self._get_permessage_deflate_dec(rsv1)
+ message = self.Message(opcode, self.max_frame_length, decoder=decoder, decompressor=decompressor)
+ if not length:
+ message.push(b'', final=finished)
+ else:
+ while received < length:
+ d = self.socket.recv(length - received)
+ if not d:
+ raise ConnectionClosedError()
+ dlen = len(d)
+ if masked:
+ d = self._apply_mask(d, mask, length=dlen, offset=received)
+ received = received + dlen
+ try:
+ message.push(d, final=finished)
+ except (UnicodeDecodeError, ValueError):
+ raise FailedConnectionError(
+ 1007, "Text data must be valid utf-8")
+ return message
+
+ def _pack_message(self, message, masked=False,
+ continuation=False, final=True, control_code=None):
+ is_text = False
+ if isinstance(message, str):
+ message = message.encode('utf-8')
+ is_text = True
+
+ compress_bit = 0
+ compressor = self._get_permessage_deflate_enc()
+ # Control frames are identified by opcodes where the most significant
+ # bit of the opcode is 1. Currently defined opcodes for control frames
+ # include 0x8 (Close), 0x9 (Ping), and 0xA (Pong). Opcodes 0xB-0xF are
+ # reserved for further control frames yet to be defined.
+ # https://datatracker.ietf.org/doc/html/rfc6455#section-5.5
+ is_control_frame = (control_code or 0) & 8
+ # An endpoint MUST NOT set the "Per-Message Compressed" bit of control
+ # frames and non-first fragments of a data message. An endpoint
+ # receiving such a frame MUST _Fail the WebSocket Connection_.
+ # https://datatracker.ietf.org/doc/html/rfc7692#section-6.1
+ if message and compressor and not is_control_frame:
+ message = compressor.compress(message)
+ message += compressor.flush(zlib.Z_SYNC_FLUSH)
+ assert message[-4:] == b"\x00\x00\xff\xff"
+ message = message[:-4]
+ compress_bit = 1 << 6
+
+ length = len(message)
+ if not length:
+ # no point masking empty data
+ masked = False
+ if control_code:
+ if control_code not in (8, 9, 0xA):
+ raise ProtocolError('Unknown control opcode.')
+ if continuation or not final:
+ raise ProtocolError('Control frame cannot be a fragment.')
+ if length > 125:
+ raise ProtocolError('Control frame data too large (>125).')
+ header = struct.pack('!B', control_code | 1 << 7)
+ else:
+ opcode = 0 if continuation else ((1 if is_text else 2) | compress_bit)
+ header = struct.pack('!B', opcode | (1 << 7 if final else 0))
+ lengthdata = 1 << 7 if masked else 0
+ if length > 65535:
+ lengthdata = struct.pack('!BQ', lengthdata | 127, length)
+ elif length > 125:
+ lengthdata = struct.pack('!BH', lengthdata | 126, length)
+ else:
+ lengthdata = struct.pack('!B', lengthdata | length)
+ if masked:
+ # NOTE: RFC6455 states:
+ # A server MUST NOT mask any frames that it sends to the client
+ rand = Random(time.time())
+ mask = [rand.getrandbits(8) for _ in range(4)]
+ message = RFC6455WebSocket._apply_mask(message, mask, length)
+ maskdata = struct.pack('!BBBB', *mask)
+ else:
+ maskdata = b''
+
+ return b''.join((header, lengthdata, maskdata, message))
+
+ def wait(self):
+ for i in self.iterator:
+ return i
+
+ def _send(self, frame):
+ self._sendlock.acquire()
+ try:
+ self.socket.sendall(frame)
+ finally:
+ self._sendlock.release()
+
+ def send(self, message, **kw):
+ kw['masked'] = self.client
+ payload = self._pack_message(message, **kw)
+ self._send(payload)
+
+ def _send_closing_frame(self, ignore_send_errors=False, close_data=None):
+ if self.version in (8, 13) and not self.websocket_closed:
+ if close_data is not None:
+ status, msg = close_data
+ if isinstance(msg, str):
+ msg = msg.encode('utf-8')
+ data = struct.pack('!H', status) + msg
+ else:
+ data = ''
+ try:
+ self.send(data, control_code=8)
+ except OSError:
+ # Sometimes, like when the remote side cuts off the connection,
+ # we don't care about this.
+ if not ignore_send_errors: # pragma NO COVER
+ raise
+ self.websocket_closed = True
+
+ def close(self, close_data=None):
+ """Forcibly close the websocket; generally it is preferable to
+ return from the handler method."""
+ try:
+ self._send_closing_frame(close_data=close_data, ignore_send_errors=True)
+ self.socket.shutdown(socket.SHUT_WR)
+ except OSError as e:
+ if e.errno != errno.ENOTCONN:
+ self.log.write('{ctx} socket shutdown error: {e}'.format(ctx=self.log_context, e=e))
+ finally:
+ self.socket.close()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/wsgi.py b/tapdown/lib/python3.11/site-packages/eventlet/wsgi.py
new file mode 100644
index 0000000..b6b4d0c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/wsgi.py
@@ -0,0 +1,1102 @@
+import errno
+import os
+import sys
+import time
+import traceback
+import types
+import urllib.parse
+import warnings
+
+import eventlet
+from eventlet import greenio
+from eventlet import support
+from eventlet.corolocal import local
+from eventlet.green import BaseHTTPServer
+from eventlet.green import socket
+
+
+DEFAULT_MAX_SIMULTANEOUS_REQUESTS = 1024
+DEFAULT_MAX_HTTP_VERSION = 'HTTP/1.1'
+MAX_REQUEST_LINE = 8192
+MAX_HEADER_LINE = 8192
+MAX_TOTAL_HEADER_SIZE = 65536
+MINIMUM_CHUNK_SIZE = 4096
+# %(client_port)s is also available
+DEFAULT_LOG_FORMAT = ('%(client_ip)s - - [%(date_time)s] "%(request_line)s"'
+ ' %(status_code)s %(body_length)s %(wall_seconds).6f')
+RESPONSE_414 = b'''HTTP/1.0 414 Request URI Too Long\r\n\
+Connection: close\r\n\
+Content-Length: 0\r\n\r\n'''
+is_accepting = True
+
+STATE_IDLE = 'idle'
+STATE_REQUEST = 'request'
+STATE_CLOSE = 'close'
+
+__all__ = ['server', 'format_date_time']
+
+# Weekday and month names for HTTP date/time formatting; always English!
+_weekdayname = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
+_monthname = [None, # Dummy so we can use 1-based month numbers
+ "Jan", "Feb", "Mar", "Apr", "May", "Jun",
+ "Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
+
+
+def format_date_time(timestamp):
+ """Formats a unix timestamp into an HTTP standard string."""
+ year, month, day, hh, mm, ss, wd, _y, _z = time.gmtime(timestamp)
+ return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+ _weekdayname[wd], day, _monthname[month], year, hh, mm, ss
+ )
+
+
+def addr_to_host_port(addr):
+ host = 'unix'
+ port = ''
+ if isinstance(addr, tuple):
+ host = addr[0]
+ port = addr[1]
+ return (host, port)
+
+
+# Collections of error codes to compare against. Not all attributes are set
+# on errno module on all platforms, so some are literals :(
+BAD_SOCK = {errno.EBADF, 10053}
+BROKEN_SOCK = {errno.EPIPE, errno.ECONNRESET, errno.ESHUTDOWN}
+
+
+class ChunkReadError(ValueError):
+ pass
+
+
+WSGI_LOCAL = local()
+
+
+class Input:
+
+ def __init__(self,
+ rfile,
+ content_length,
+ sock,
+ wfile=None,
+ wfile_line=None,
+ chunked_input=False):
+
+ self.rfile = rfile
+ self._sock = sock
+ if content_length is not None:
+ content_length = int(content_length)
+ self.content_length = content_length
+
+ self.wfile = wfile
+ self.wfile_line = wfile_line
+
+ self.position = 0
+ self.chunked_input = chunked_input
+ self.chunk_length = -1
+
+ # (optional) headers to send with a "100 Continue" response. Set by
+ # calling set_hundred_continue_respose_headers() on env['wsgi.input']
+ self.hundred_continue_headers = None
+ self.is_hundred_continue_response_sent = False
+
+ # handle_one_response should give us a ref to the response state so we
+ # know whether we can still send the 100 Continue; until then, though,
+ # we're flying blind
+ self.headers_sent = None
+
+ def send_hundred_continue_response(self):
+ if self.headers_sent:
+ # To late; application has already started sending data back
+ # to the client
+ # TODO: maybe log a warning if self.hundred_continue_headers
+ # is not None?
+ return
+
+ towrite = []
+
+ # 100 Continue status line
+ towrite.append(self.wfile_line)
+
+ # Optional headers
+ if self.hundred_continue_headers is not None:
+ # 100 Continue headers
+ for header in self.hundred_continue_headers:
+ towrite.append(('%s: %s\r\n' % header).encode())
+
+ # Blank line
+ towrite.append(b'\r\n')
+
+ self.wfile.writelines(towrite)
+ self.wfile.flush()
+
+ # Reinitialize chunk_length (expect more data)
+ self.chunk_length = -1
+
+ @property
+ def should_send_hundred_continue(self):
+ return self.wfile is not None and not self.is_hundred_continue_response_sent
+
+ def _do_read(self, reader, length=None):
+ if self.should_send_hundred_continue:
+ # 100 Continue response
+ self.send_hundred_continue_response()
+ self.is_hundred_continue_response_sent = True
+ if length is None or length > self.content_length - self.position:
+ length = self.content_length - self.position
+ if not length:
+ return b''
+ try:
+ read = reader(length)
+ except greenio.SSL.ZeroReturnError:
+ read = b''
+ self.position += len(read)
+ return read
+
+ def _discard_trailers(self, rfile):
+ while True:
+ line = rfile.readline()
+ if not line or line in (b'\r\n', b'\n', b''):
+ break
+
+ def _chunked_read(self, rfile, length=None, use_readline=False):
+ if self.should_send_hundred_continue:
+ # 100 Continue response
+ self.send_hundred_continue_response()
+ self.is_hundred_continue_response_sent = True
+ try:
+ if length == 0:
+ return b""
+
+ if length and length < 0:
+ length = None
+
+ if use_readline:
+ reader = self.rfile.readline
+ else:
+ reader = self.rfile.read
+
+ response = []
+ while self.chunk_length != 0:
+ maxreadlen = self.chunk_length - self.position
+ if length is not None and length < maxreadlen:
+ maxreadlen = length
+
+ if maxreadlen > 0:
+ data = reader(maxreadlen)
+ if not data:
+ self.chunk_length = 0
+ raise OSError("unexpected end of file while parsing chunked data")
+
+ datalen = len(data)
+ response.append(data)
+
+ self.position += datalen
+ if self.chunk_length == self.position:
+ rfile.readline()
+
+ if length is not None:
+ length -= datalen
+ if length == 0:
+ break
+ if use_readline and data[-1:] == b"\n":
+ break
+ else:
+ try:
+ self.chunk_length = int(rfile.readline().split(b";", 1)[0], 16)
+ except ValueError as err:
+ raise ChunkReadError(err)
+ self.position = 0
+ if self.chunk_length == 0:
+ self._discard_trailers(rfile)
+ except greenio.SSL.ZeroReturnError:
+ pass
+ return b''.join(response)
+
+ def read(self, length=None):
+ if self.chunked_input:
+ return self._chunked_read(self.rfile, length)
+ return self._do_read(self.rfile.read, length)
+
+ def readline(self, size=None):
+ if self.chunked_input:
+ return self._chunked_read(self.rfile, size, True)
+ else:
+ return self._do_read(self.rfile.readline, size)
+
+ def readlines(self, hint=None):
+ if self.chunked_input:
+ lines = []
+ for line in iter(self.readline, b''):
+ lines.append(line)
+ if hint and hint > 0:
+ hint -= len(line)
+ if hint <= 0:
+ break
+ return lines
+ else:
+ return self._do_read(self.rfile.readlines, hint)
+
+ def __iter__(self):
+ return iter(self.read, b'')
+
+ def get_socket(self):
+ return self._sock
+
+ def set_hundred_continue_response_headers(self, headers,
+ capitalize_response_headers=True):
+ # Response headers capitalization (default)
+ # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
+ # Per HTTP RFC standard, header name is case-insensitive.
+ # Please, fix your client to ignore header case if possible.
+ if capitalize_response_headers:
+ headers = [
+ ('-'.join([x.capitalize() for x in key.split('-')]), value)
+ for key, value in headers]
+ self.hundred_continue_headers = headers
+
+ def discard(self, buffer_size=16 << 10):
+ while self.read(buffer_size):
+ pass
+
+
+class HeaderLineTooLong(Exception):
+ pass
+
+
+class HeadersTooLarge(Exception):
+ pass
+
+
+def get_logger(log, debug):
+ if callable(getattr(log, 'info', None)) \
+ and callable(getattr(log, 'debug', None)):
+ return log
+ else:
+ return LoggerFileWrapper(log or sys.stderr, debug)
+
+
+class LoggerNull:
+ def __init__(self):
+ pass
+
+ def error(self, msg, *args, **kwargs):
+ pass
+
+ def info(self, msg, *args, **kwargs):
+ pass
+
+ def debug(self, msg, *args, **kwargs):
+ pass
+
+ def write(self, msg, *args):
+ pass
+
+
+class LoggerFileWrapper(LoggerNull):
+ def __init__(self, log, debug):
+ self.log = log
+ self._debug = debug
+
+ def error(self, msg, *args, **kwargs):
+ self.write(msg, *args)
+
+ def info(self, msg, *args, **kwargs):
+ self.write(msg, *args)
+
+ def debug(self, msg, *args, **kwargs):
+ if self._debug:
+ self.write(msg, *args)
+
+ def write(self, msg, *args):
+ msg = msg + '\n'
+ if args:
+ msg = msg % args
+ self.log.write(msg)
+
+
+class FileObjectForHeaders:
+
+ def __init__(self, fp):
+ self.fp = fp
+ self.total_header_size = 0
+
+ def readline(self, size=-1):
+ sz = size
+ if size < 0:
+ sz = MAX_HEADER_LINE
+ rv = self.fp.readline(sz)
+ if len(rv) >= MAX_HEADER_LINE:
+ raise HeaderLineTooLong()
+ self.total_header_size += len(rv)
+ if self.total_header_size > MAX_TOTAL_HEADER_SIZE:
+ raise HeadersTooLarge()
+ return rv
+
+
+class HttpProtocol(BaseHTTPServer.BaseHTTPRequestHandler):
+ """This class is used to handle the HTTP requests that arrive
+ at the server.
+
+ The handler will parse the request and the headers, then call a method
+ specific to the request type.
+
+ :param conn_state: The given connection status.
+ :param server: The server accessible by the request handler.
+ """
+ protocol_version = 'HTTP/1.1'
+ minimum_chunk_size = MINIMUM_CHUNK_SIZE
+ capitalize_response_headers = True
+ reject_bad_requests = True
+
+ # https://github.com/eventlet/eventlet/issues/295
+ # Stdlib default is 0 (unbuffered), but then `wfile.writelines()` looses data
+ # so before going back to unbuffered, remove any usage of `writelines`.
+ wbufsize = 16 << 10
+
+ def __init__(self, conn_state, server):
+ self.request = conn_state[1]
+ self.client_address = conn_state[0]
+ self.conn_state = conn_state
+ self.server = server
+ # Want to allow some overrides from the server before running setup
+ if server.minimum_chunk_size is not None:
+ self.minimum_chunk_size = server.minimum_chunk_size
+ self.capitalize_response_headers = server.capitalize_response_headers
+
+ self.setup()
+ try:
+ self.handle()
+ finally:
+ self.finish()
+
+ def setup(self):
+ # overriding SocketServer.setup to correctly handle SSL.Connection objects
+ conn = self.connection = self.request
+
+ # TCP_QUICKACK is a better alternative to disabling Nagle's algorithm
+ # https://news.ycombinator.com/item?id=10607422
+ if getattr(socket, 'TCP_QUICKACK', None):
+ try:
+ conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True)
+ except OSError:
+ pass
+
+ try:
+ self.rfile = conn.makefile('rb', self.rbufsize)
+ self.wfile = conn.makefile('wb', self.wbufsize)
+ except (AttributeError, NotImplementedError):
+ if hasattr(conn, 'send') and hasattr(conn, 'recv'):
+ # it's an SSL.Connection
+ self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
+ self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
+ else:
+ # it's a SSLObject, or a martian
+ raise NotImplementedError(
+ '''eventlet.wsgi doesn't support sockets of type {}'''.format(type(conn)))
+
+ def handle(self):
+ self.close_connection = True
+
+ while True:
+ self.handle_one_request()
+ if self.conn_state[2] == STATE_CLOSE:
+ self.close_connection = 1
+ else:
+ self.conn_state[2] = STATE_IDLE
+ if self.close_connection:
+ break
+
+ def _read_request_line(self):
+ if self.rfile.closed:
+ self.close_connection = 1
+ return ''
+
+ try:
+ sock = self.connection
+ if self.server.keepalive and not isinstance(self.server.keepalive, bool):
+ sock.settimeout(self.server.keepalive)
+ line = self.rfile.readline(self.server.url_length_limit)
+ sock.settimeout(self.server.socket_timeout)
+ return line
+ except greenio.SSL.ZeroReturnError:
+ pass
+ except OSError as e:
+ last_errno = support.get_errno(e)
+ if last_errno in BROKEN_SOCK:
+ self.server.log.debug('({}) connection reset by peer {!r}'.format(
+ self.server.pid,
+ self.client_address))
+ elif last_errno not in BAD_SOCK:
+ raise
+ return ''
+
+ def handle_one_request(self):
+ if self.server.max_http_version:
+ self.protocol_version = self.server.max_http_version
+
+ self.raw_requestline = self._read_request_line()
+ self.conn_state[2] = STATE_REQUEST
+ if not self.raw_requestline:
+ self.close_connection = 1
+ return
+ if len(self.raw_requestline) >= self.server.url_length_limit:
+ self.wfile.write(RESPONSE_414)
+ self.close_connection = 1
+ return
+
+ orig_rfile = self.rfile
+ try:
+ self.rfile = FileObjectForHeaders(self.rfile)
+ if not self.parse_request():
+ return
+ except HeaderLineTooLong:
+ self.wfile.write(
+ b"HTTP/1.0 400 Header Line Too Long\r\n"
+ b"Connection: close\r\nContent-length: 0\r\n\r\n")
+ self.close_connection = 1
+ return
+ except HeadersTooLarge:
+ self.wfile.write(
+ b"HTTP/1.0 400 Headers Too Large\r\n"
+ b"Connection: close\r\nContent-length: 0\r\n\r\n")
+ self.close_connection = 1
+ return
+ finally:
+ self.rfile = orig_rfile
+
+ content_length = self.headers.get('content-length')
+ transfer_encoding = self.headers.get('transfer-encoding')
+ if content_length is not None:
+ try:
+ if int(content_length) < 0:
+ raise ValueError
+ except ValueError:
+ # Negative, or not an int at all
+ self.wfile.write(
+ b"HTTP/1.0 400 Bad Request\r\n"
+ b"Connection: close\r\nContent-length: 0\r\n\r\n")
+ self.close_connection = 1
+ return
+
+ if transfer_encoding is not None:
+ if self.reject_bad_requests:
+ msg = b"Content-Length and Transfer-Encoding are not allowed together\n"
+ self.wfile.write(
+ b"HTTP/1.0 400 Bad Request\r\n"
+ b"Connection: close\r\n"
+ b"Content-Length: %d\r\n"
+ b"\r\n%s" % (len(msg), msg))
+ self.close_connection = 1
+ return
+
+ self.environ = self.get_environ()
+ self.application = self.server.app
+ try:
+ self.server.outstanding_requests += 1
+ try:
+ self.handle_one_response()
+ except OSError as e:
+ # Broken pipe, connection reset by peer
+ if support.get_errno(e) not in BROKEN_SOCK:
+ raise
+ finally:
+ self.server.outstanding_requests -= 1
+
+ def handle_one_response(self):
+ start = time.time()
+ headers_set = []
+ headers_sent = []
+ # Grab the request input now; app may try to replace it in the environ
+ request_input = self.environ['eventlet.input']
+ # Push the headers-sent state into the Input so it won't send a
+ # 100 Continue response if we've already started a response.
+ request_input.headers_sent = headers_sent
+
+ wfile = self.wfile
+ result = None
+ use_chunked = [False]
+ length = [0]
+ status_code = [200]
+ # Status code of 1xx or 204 or 2xx to CONNECT request MUST NOT send body and related headers
+ # https://httpwg.org/specs/rfc7230.html#rfc.section.3.3.1
+ bodyless = [False]
+
+ def write(data):
+ towrite = []
+ if not headers_set:
+ raise AssertionError("write() before start_response()")
+ elif not headers_sent:
+ status, response_headers = headers_set
+ headers_sent.append(1)
+ header_list = [header[0].lower() for header in response_headers]
+ towrite.append(('%s %s\r\n' % (self.protocol_version, status)).encode())
+ for header in response_headers:
+ towrite.append(('%s: %s\r\n' % header).encode('latin-1'))
+
+ # send Date header?
+ if 'date' not in header_list:
+ towrite.append(('Date: %s\r\n' % (format_date_time(time.time()),)).encode())
+
+ client_conn = self.headers.get('Connection', '').lower()
+ send_keep_alive = False
+ if self.close_connection == 0 and \
+ self.server.keepalive and (client_conn == 'keep-alive' or
+ (self.request_version == 'HTTP/1.1' and
+ not client_conn == 'close')):
+ # only send keep-alives back to clients that sent them,
+ # it's redundant for 1.1 connections
+ send_keep_alive = (client_conn == 'keep-alive')
+ self.close_connection = 0
+ else:
+ self.close_connection = 1
+
+ if 'content-length' not in header_list:
+ if bodyless[0]:
+ pass # client didn't expect a body anyway
+ elif self.request_version == 'HTTP/1.1':
+ use_chunked[0] = True
+ towrite.append(b'Transfer-Encoding: chunked\r\n')
+ else:
+ # client is 1.0 and therefore must read to EOF
+ self.close_connection = 1
+
+ if self.close_connection:
+ towrite.append(b'Connection: close\r\n')
+ elif send_keep_alive:
+ towrite.append(b'Connection: keep-alive\r\n')
+ # Spec says timeout must be an integer, but we allow sub-second
+ int_timeout = int(self.server.keepalive or 0)
+ if not isinstance(self.server.keepalive, bool) and int_timeout:
+ towrite.append(b'Keep-Alive: timeout=%d\r\n' % int_timeout)
+ towrite.append(b'\r\n')
+ # end of header writing
+
+ if use_chunked[0]:
+ # Write the chunked encoding
+ towrite.append(("%x" % (len(data),)).encode() + b"\r\n" + data + b"\r\n")
+ else:
+ towrite.append(data)
+ wfile.writelines(towrite)
+ wfile.flush()
+ length[0] = length[0] + sum(map(len, towrite))
+
+ def start_response(status, response_headers, exc_info=None):
+ status_code[0] = int(status.split(" ", 1)[0])
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[1].with_traceback(exc_info[2])
+ finally:
+ # Avoid dangling circular ref
+ exc_info = None
+
+ bodyless[0] = (
+ status_code[0] in (204, 304)
+ or self.command == "HEAD"
+ or (100 <= status_code[0] < 200)
+ or (self.command == "CONNECT" and 200 <= status_code[0] < 300)
+ )
+
+ # Response headers capitalization
+ # CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
+ # Per HTTP RFC standard, header name is case-insensitive.
+ # Please, fix your client to ignore header case if possible.
+ if self.capitalize_response_headers:
+ def cap(x):
+ return x.encode('latin1').capitalize().decode('latin1')
+
+ response_headers = [
+ ('-'.join([cap(x) for x in key.split('-')]), value)
+ for key, value in response_headers]
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ try:
+ try:
+ WSGI_LOCAL.already_handled = False
+ result = self.application(self.environ, start_response)
+
+ # Set content-length if possible
+ if headers_set and not headers_sent and hasattr(result, '__len__'):
+ # We've got a complete final response
+ if not bodyless[0] and 'Content-Length' not in [h for h, _v in headers_set[1]]:
+ headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
+ if request_input.should_send_hundred_continue:
+ # We've got a complete final response, and never sent a 100 Continue.
+ # There's no chance we'll need to read the body as we stream out the
+ # response, so we can be nice and send a Connection: close header.
+ self.close_connection = 1
+
+ towrite = []
+ towrite_size = 0
+ just_written_size = 0
+ minimum_write_chunk_size = int(self.environ.get(
+ 'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
+ for data in result:
+ if len(data) == 0:
+ continue
+ if isinstance(data, str):
+ data = data.encode('ascii')
+
+ towrite.append(data)
+ towrite_size += len(data)
+ if towrite_size >= minimum_write_chunk_size:
+ write(b''.join(towrite))
+ towrite = []
+ just_written_size = towrite_size
+ towrite_size = 0
+ if WSGI_LOCAL.already_handled:
+ self.close_connection = 1
+ return
+ if towrite:
+ just_written_size = towrite_size
+ write(b''.join(towrite))
+ if not headers_sent or (use_chunked[0] and just_written_size):
+ write(b'')
+ except (Exception, eventlet.Timeout):
+ self.close_connection = 1
+ tb = traceback.format_exc()
+ self.server.log.info(tb)
+ if not headers_sent:
+ err_body = tb.encode() if self.server.debug else b''
+ start_response("500 Internal Server Error",
+ [('Content-type', 'text/plain'),
+ ('Content-length', len(err_body))])
+ write(err_body)
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ if request_input.should_send_hundred_continue:
+ # We just sent the final response, no 100 Continue. Client may or
+ # may not have started to send a body, and if we keep the connection
+ # open we've seen clients either
+ # * send a body, then start a new request
+ # * skip the body and go straight to a new request
+ # Looks like the most broadly compatible option is to close the
+ # connection and let the client retry.
+ # https://curl.se/mail/lib-2004-08/0002.html
+ # Note that we likely *won't* send a Connection: close header at this point
+ self.close_connection = 1
+
+ if (request_input.chunked_input or
+ request_input.position < (request_input.content_length or 0)):
+ # Read and discard body if connection is going to be reused
+ if self.close_connection == 0:
+ try:
+ request_input.discard()
+ except ChunkReadError as e:
+ self.close_connection = 1
+ self.server.log.error((
+ 'chunked encoding error while discarding request body.'
+ + ' client={0} request="{1}" error="{2}"').format(
+ self.get_client_address()[0], self.requestline, e,
+ ))
+ except OSError as e:
+ self.close_connection = 1
+ self.server.log.error((
+ 'I/O error while discarding request body.'
+ + ' client={0} request="{1}" error="{2}"').format(
+ self.get_client_address()[0], self.requestline, e,
+ ))
+ finish = time.time()
+
+ for hook, args, kwargs in self.environ['eventlet.posthooks']:
+ hook(self.environ, *args, **kwargs)
+
+ if self.server.log_output:
+ client_host, client_port = self.get_client_address()
+
+ self.server.log.info(self.server.log_format % {
+ 'client_ip': client_host,
+ 'client_port': client_port,
+ 'date_time': self.log_date_time_string(),
+ 'request_line': self.requestline,
+ 'status_code': status_code[0],
+ 'body_length': length[0],
+ 'wall_seconds': finish - start,
+ })
+
+ def get_client_address(self):
+ host, port = addr_to_host_port(self.client_address)
+
+ if self.server.log_x_forwarded_for:
+ forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
+ if forward:
+ host = forward + ',' + host
+ return (host, port)
+
+ def formalize_key_naming(self, k):
+ """
+ Headers containing underscores are permitted by RFC9110,
+ but evenlet joining headers of different names into
+ the same environment variable will dangerously confuse applications as to which is which.
+ Cf.
+ - Nginx: http://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers
+ - Django: https://www.djangoproject.com/weblog/2015/jan/13/security/
+ - Gunicorn: https://github.com/benoitc/gunicorn/commit/72b8970dbf2bf3444eb2e8b12aeff1a3d5922a9a
+ - Werkzeug: https://github.com/pallets/werkzeug/commit/5ee439a692dc4474e0311de2496b567eed2d02cf
+ - ...
+ """
+ if "_" in k:
+ return
+
+ return k.replace('-', '_').upper()
+
+ def get_environ(self):
+ env = self.server.get_environ()
+ env['REQUEST_METHOD'] = self.command
+ env['SCRIPT_NAME'] = ''
+
+ pq = self.path.split('?', 1)
+ env['RAW_PATH_INFO'] = pq[0]
+ env['PATH_INFO'] = urllib.parse.unquote(pq[0], encoding='latin1')
+ if len(pq) > 1:
+ env['QUERY_STRING'] = pq[1]
+
+ ct = self.headers.get('content-type')
+ if ct is None:
+ try:
+ ct = self.headers.type
+ except AttributeError:
+ ct = self.headers.get_content_type()
+ env['CONTENT_TYPE'] = ct
+
+ length = self.headers.get('content-length')
+ if length:
+ env['CONTENT_LENGTH'] = length
+ env['SERVER_PROTOCOL'] = 'HTTP/1.0'
+
+ sockname = self.request.getsockname()
+ server_addr = addr_to_host_port(sockname)
+ env['SERVER_NAME'] = server_addr[0]
+ env['SERVER_PORT'] = str(server_addr[1])
+ client_addr = addr_to_host_port(self.client_address)
+ env['REMOTE_ADDR'] = client_addr[0]
+ env['REMOTE_PORT'] = str(client_addr[1])
+ env['GATEWAY_INTERFACE'] = 'CGI/1.1'
+
+ try:
+ headers = self.headers.headers
+ except AttributeError:
+ headers = self.headers._headers
+ else:
+ headers = [h.split(':', 1) for h in headers]
+
+ env['headers_raw'] = headers_raw = tuple((k, v.strip(' \t\n\r')) for k, v in headers)
+ for k, v in headers_raw:
+ k = self.formalize_key_naming(k)
+ if not k:
+ continue
+
+ if k in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+ # These do not get the HTTP_ prefix and were handled above
+ continue
+ envk = 'HTTP_' + k
+ if envk in env:
+ env[envk] += ',' + v
+ else:
+ env[envk] = v
+
+ if env.get('HTTP_EXPECT', '').lower() == '100-continue':
+ wfile = self.wfile
+ wfile_line = b'HTTP/1.1 100 Continue\r\n'
+ else:
+ wfile = None
+ wfile_line = None
+ chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
+ if not chunked and length is None:
+ # https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.7
+ # "If this is a request message and none of the above are true, then
+ # the message body length is zero (no message body is present)."
+ length = '0'
+ env['wsgi.input'] = env['eventlet.input'] = Input(
+ self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
+ chunked_input=chunked)
+ env['eventlet.posthooks'] = []
+
+ # WebSocketWSGI needs a way to flag the connection as idle,
+ # since it may never fall out of handle_one_request
+ def set_idle():
+ self.conn_state[2] = STATE_IDLE
+ env['eventlet.set_idle'] = set_idle
+
+ return env
+
+ def finish(self):
+ try:
+ BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
+ except OSError as e:
+ # Broken pipe, connection reset by peer
+ if support.get_errno(e) not in BROKEN_SOCK:
+ raise
+ greenio.shutdown_safe(self.connection)
+ self.connection.close()
+
+ def handle_expect_100(self):
+ return True
+
+
+class Server(BaseHTTPServer.HTTPServer):
+
+ def __init__(self,
+ socket,
+ address,
+ app,
+ log=None,
+ environ=None,
+ max_http_version=None,
+ protocol=HttpProtocol,
+ minimum_chunk_size=None,
+ log_x_forwarded_for=True,
+ keepalive=True,
+ log_output=True,
+ log_format=DEFAULT_LOG_FORMAT,
+ url_length_limit=MAX_REQUEST_LINE,
+ debug=True,
+ socket_timeout=None,
+ capitalize_response_headers=True):
+
+ self.outstanding_requests = 0
+ self.socket = socket
+ self.address = address
+ self.log = LoggerNull()
+ if log_output:
+ self.log = get_logger(log, debug)
+ self.app = app
+ self.keepalive = keepalive
+ self.environ = environ
+ self.max_http_version = max_http_version
+ self.protocol = protocol
+ self.pid = os.getpid()
+ self.minimum_chunk_size = minimum_chunk_size
+ self.log_x_forwarded_for = log_x_forwarded_for
+ self.log_output = log_output
+ self.log_format = log_format
+ self.url_length_limit = url_length_limit
+ self.debug = debug
+ self.socket_timeout = socket_timeout
+ self.capitalize_response_headers = capitalize_response_headers
+
+ if not self.capitalize_response_headers:
+ warnings.warn("""capitalize_response_headers is disabled.
+ Please, make sure you know what you are doing.
+ HTTP headers names are case-insensitive per RFC standard.
+ Most likely, you need to fix HTTP parsing in your client software.""",
+ DeprecationWarning, stacklevel=3)
+
+ def get_environ(self):
+ d = {
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.version': (1, 0),
+ 'wsgi.multithread': True,
+ 'wsgi.multiprocess': False,
+ 'wsgi.run_once': False,
+ 'wsgi.url_scheme': 'http',
+ }
+ # detect secure socket
+ if hasattr(self.socket, 'do_handshake'):
+ d['wsgi.url_scheme'] = 'https'
+ d['HTTPS'] = 'on'
+ if self.environ is not None:
+ d.update(self.environ)
+ return d
+
+ def process_request(self, conn_state):
+ try:
+ # protocol is responsible for pulling out any overrides it needs itself
+ # before it starts processing
+ self.protocol(conn_state, self)
+ except socket.timeout:
+ # Expected exceptions are not exceptional
+ conn_state[1].close()
+ # similar to logging "accepted" in server()
+ self.log.debug('({}) timed out {!r}'.format(self.pid, conn_state[0]))
+
+ def log_message(self, message):
+ raise AttributeError('''\
+eventlet.wsgi.server.log_message was deprecated and deleted.
+Please use server.log.info instead.''')
+
+
+try:
+ import ssl
+ ACCEPT_EXCEPTIONS = (socket.error, ssl.SSLError)
+ ACCEPT_ERRNO = {errno.EPIPE, errno.ECONNRESET,
+ errno.ESHUTDOWN, ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_SSL}
+except ImportError:
+ ACCEPT_EXCEPTIONS = (socket.error,)
+ ACCEPT_ERRNO = {errno.EPIPE, errno.ECONNRESET, errno.ESHUTDOWN}
+
+
+def socket_repr(sock):
+ scheme = 'http'
+ if hasattr(sock, 'do_handshake'):
+ scheme = 'https'
+
+ name = sock.getsockname()
+ if sock.family == socket.AF_INET:
+ hier_part = '//{}:{}'.format(*name)
+ elif sock.family == socket.AF_INET6:
+ hier_part = '//[{}]:{}'.format(*name[:2])
+ elif sock.family == socket.AF_UNIX:
+ hier_part = name
+ else:
+ hier_part = repr(name)
+
+ return scheme + ':' + hier_part
+
+
+def server(sock, site,
+ log=None,
+ environ=None,
+ max_size=None,
+ max_http_version=DEFAULT_MAX_HTTP_VERSION,
+ protocol=HttpProtocol,
+ server_event=None,
+ minimum_chunk_size=None,
+ log_x_forwarded_for=True,
+ custom_pool=None,
+ keepalive=True,
+ log_output=True,
+ log_format=DEFAULT_LOG_FORMAT,
+ url_length_limit=MAX_REQUEST_LINE,
+ debug=True,
+ socket_timeout=None,
+ capitalize_response_headers=True):
+ """Start up a WSGI server handling requests from the supplied server
+ socket. This function loops forever. The *sock* object will be
+ closed after server exits, but the underlying file descriptor will
+ remain open, so if you have a dup() of *sock*, it will remain usable.
+
+ .. warning::
+
+ At the moment :func:`server` will always wait for active connections to finish before
+ exiting, even if there's an exception raised inside it
+ (*all* exceptions are handled the same way, including :class:`greenlet.GreenletExit`
+ and those inheriting from `BaseException`).
+
+ While this may not be an issue normally, when it comes to long running HTTP connections
+ (like :mod:`eventlet.websocket`) it will become problematic and calling
+ :meth:`~eventlet.greenthread.GreenThread.wait` on a thread that runs the server may hang,
+ even after using :meth:`~eventlet.greenthread.GreenThread.kill`, as long
+ as there are active connections.
+
+ :param sock: Server socket, must be already bound to a port and listening.
+ :param site: WSGI application function.
+ :param log: logging.Logger instance or file-like object that logs should be written to.
+ If a Logger instance is supplied, messages are sent to the INFO log level.
+ If not specified, sys.stderr is used.
+ :param environ: Additional parameters that go into the environ dictionary of every request.
+ :param max_size: Maximum number of client connections opened at any time by this server.
+ Default is 1024.
+ :param max_http_version: Set to "HTTP/1.0" to make the server pretend it only supports HTTP 1.0.
+ This can help with applications or clients that don't behave properly using HTTP 1.1.
+ :param protocol: Protocol class. Deprecated.
+ :param server_event: Used to collect the Server object. Deprecated.
+ :param minimum_chunk_size: Minimum size in bytes for http chunks. This can be used to improve
+ performance of applications which yield many small strings, though
+ using it technically violates the WSGI spec. This can be overridden
+ on a per request basis by setting environ['eventlet.minimum_write_chunk_size'].
+ :param log_x_forwarded_for: If True (the default), logs the contents of the x-forwarded-for
+ header in addition to the actual client ip address in the 'client_ip' field of the
+ log line.
+ :param custom_pool: A custom GreenPool instance which is used to spawn client green threads.
+ If this is supplied, max_size is ignored.
+ :param keepalive: If set to False or zero, disables keepalives on the server; all connections
+ will be closed after serving one request. If numeric, it will be the timeout used
+ when reading the next request.
+ :param log_output: A Boolean indicating if the server will log data or not.
+ :param log_format: A python format string that is used as the template to generate log lines.
+ The following values can be formatted into it: client_ip, date_time, request_line,
+ status_code, body_length, wall_seconds. The default is a good example of how to
+ use it.
+ :param url_length_limit: A maximum allowed length of the request url. If exceeded, 414 error
+ is returned.
+ :param debug: True if the server should send exception tracebacks to the clients on 500 errors.
+ If False, the server will respond with empty bodies.
+ :param socket_timeout: Timeout for client connections' socket operations. Default None means
+ wait forever.
+ :param capitalize_response_headers: Normalize response headers' names to Foo-Bar.
+ Default is True.
+ """
+ serv = Server(
+ sock, sock.getsockname(),
+ site, log,
+ environ=environ,
+ max_http_version=max_http_version,
+ protocol=protocol,
+ minimum_chunk_size=minimum_chunk_size,
+ log_x_forwarded_for=log_x_forwarded_for,
+ keepalive=keepalive,
+ log_output=log_output,
+ log_format=log_format,
+ url_length_limit=url_length_limit,
+ debug=debug,
+ socket_timeout=socket_timeout,
+ capitalize_response_headers=capitalize_response_headers,
+ )
+ if server_event is not None:
+ warnings.warn(
+ 'eventlet.wsgi.Server() server_event kwarg is deprecated and will be removed soon',
+ DeprecationWarning, stacklevel=2)
+ server_event.send(serv)
+ if max_size is None:
+ max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS
+ if custom_pool is not None:
+ pool = custom_pool
+ else:
+ pool = eventlet.GreenPool(max_size)
+
+ if not (hasattr(pool, 'spawn') and hasattr(pool, 'waitall')):
+ raise AttributeError('''\
+eventlet.wsgi.Server pool must provide methods: `spawn`, `waitall`.
+If unsure, use eventlet.GreenPool.''')
+
+ # [addr, socket, state]
+ connections = {}
+
+ def _clean_connection(_, conn):
+ connections.pop(conn[0], None)
+ conn[2] = STATE_CLOSE
+ greenio.shutdown_safe(conn[1])
+ conn[1].close()
+
+ try:
+ serv.log.info('({}) wsgi starting up on {}'.format(serv.pid, socket_repr(sock)))
+ while is_accepting:
+ try:
+ client_socket, client_addr = sock.accept()
+ client_socket.settimeout(serv.socket_timeout)
+ serv.log.debug('({}) accepted {!r}'.format(serv.pid, client_addr))
+ connections[client_addr] = connection = [client_addr, client_socket, STATE_IDLE]
+ (pool.spawn(serv.process_request, connection)
+ .link(_clean_connection, connection))
+ except ACCEPT_EXCEPTIONS as e:
+ if support.get_errno(e) not in ACCEPT_ERRNO:
+ raise
+ else:
+ break
+ except (KeyboardInterrupt, SystemExit):
+ serv.log.info('wsgi exiting')
+ break
+ finally:
+ for cs in connections.values():
+ prev_state = cs[2]
+ cs[2] = STATE_CLOSE
+ if prev_state == STATE_IDLE:
+ greenio.shutdown_safe(cs[1])
+ pool.waitall()
+ serv.log.info('({}) wsgi exited, is_accepting={}'.format(serv.pid, is_accepting))
+ try:
+ # NOTE: It's not clear whether we want this to leave the
+ # socket open or close it. Use cases like Spawning want
+ # the underlying fd to remain open, but if we're going
+ # that far we might as well not bother closing sock at
+ # all.
+ sock.close()
+ except OSError as e:
+ if support.get_errno(e) not in BROKEN_SOCK:
+ traceback.print_exc()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/README.rst b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/README.rst
new file mode 100644
index 0000000..b094781
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/README.rst
@@ -0,0 +1,130 @@
+eventlet.zipkin
+===============
+
+`Zipkin `_ is a distributed tracing system developed at Twitter.
+This package provides a WSGI application using eventlet
+with tracing facility that complies with Zipkin.
+
+Why use it?
+From the http://twitter.github.io/zipkin/:
+
+"Collecting traces helps developers gain deeper knowledge about how
+certain requests perform in a distributed system. Let's say we're having
+problems with user requests timing out. We can look up traced requests
+that timed out and display it in the web UI. We'll be able to quickly
+find the service responsible for adding the unexpected response time. If
+the service has been annotated adequately we can also find out where in
+that service the issue is happening."
+
+
+Screenshot
+----------
+
+Zipkin web ui screenshots obtained when applying this module to
+`OpenStack swift `_ are in example/.
+
+
+Requirement
+-----------
+
+A eventlet.zipkin needs `python scribe client `_
+and `thrift `_ (>=0.9),
+because the zipkin collector speaks `scribe `_ protocol.
+Below command will install both scribe client and thrift.
+
+Install facebook-scribe:
+
+::
+
+ pip install facebook-scribe
+
+**Python**: ``2.7`` (Because the current Python Thrift release doesn't support Python 3)
+
+
+How to use
+----------
+
+Add tracing facility to your application
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Apply the monkey patch before you start wsgi server.
+
+.. code:: python
+
+ # Add only 2 lines to your code
+ from eventlet.zipkin import patcher
+ patcher.enable_trace_patch()
+
+ # existing code
+ from eventlet import wsgi
+ wsgi.server(sock, app)
+
+You can pass some parameters to ``enable_trace_patch()``
+
+* host: Scribe daemon IP address (default: '127.0.0.1')
+* port: Scribe daemon port (default: 9410)
+* trace_app_log: A Boolean indicating if the tracer will trace application log together or not. This facility assume that your application uses python standard logging library. (default: False)
+* sampling_rate: A Float value (0.0~1.0) that indicates the tracing frequency. If you specify 1.0, all requests are traced and sent to Zipkin collecotr. If you specify 0.1, only 1/10 requests are traced. (defult: 1.0)
+
+
+(Option) Annotation API
+~~~~~~~~~~~~~~~~~~~~~~~
+If you want to record additional information,
+you can use below API from anywhere in your code.
+
+.. code:: python
+
+ from eventlet.zipkin import api
+
+ api.put_annotation('Cache miss for %s' % request)
+ api.put_key_value('key', 'value')
+
+
+
+
+Zipkin simple setup
+-------------------
+
+::
+
+ $ git clone https://github.com/twitter/zipkin.git
+ $ cd zipkin
+ # Open 3 terminals
+ (terminal1) $ bin/collector
+ (terminal2) $ bin/query
+ (terminal3) $ bin/web
+
+Access http://localhost:8080 from your browser.
+
+
+(Option) fluentd
+----------------
+If you want to buffer the tracing data for performance,
+`fluentd scribe plugin `_ is available.
+Since ``out_scribe plugin`` extends `Buffer Plugin `_ ,
+you can customize buffering parameters in the manner of fluentd.
+Scribe plugin is included in td-agent by default.
+
+
+Sample: ``/etc/td-agent/td-agent.conf``
+
+::
+
+ # in_scribe
+
+ type scribe
+ port 9999
+
+
+ # out_scribe
+
+ type scribe
+ host Zipkin_collector_IP
+ port 9410
+ flush_interval 60s
+ buffer_chunk_limit 256m
+
+
+| And, you need to specify ``patcher.enable_trace_patch(port=9999)`` for in_scribe.
+| In this case, trace data is passed like below.
+| Your application => Local fluentd in_scribe (9999) => Local fluentd out_scribe =====> Remote zipkin collector (9410)
+
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/README.rst b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/README.rst
new file mode 100644
index 0000000..0317d50
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/README.rst
@@ -0,0 +1,8 @@
+_thrift
+========
+
+* This directory is auto-generated by Thrift Compiler by using
+ https://github.com/twitter/zipkin/blob/master/zipkin-thrift/src/main/thrift/com/twitter/zipkin/zipkinCore.thrift
+
+* Do not modify this directory.
+
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore.thrift b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore.thrift
new file mode 100644
index 0000000..0787ca8
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore.thrift
@@ -0,0 +1,55 @@
+# Copyright 2012 Twitter Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+namespace java com.twitter.zipkin.gen
+namespace rb Zipkin
+
+//************** Collection related structs **************
+
+// these are the annotations we always expect to find in a span
+const string CLIENT_SEND = "cs"
+const string CLIENT_RECV = "cr"
+const string SERVER_SEND = "ss"
+const string SERVER_RECV = "sr"
+
+// this represents a host and port in a network
+struct Endpoint {
+ 1: i32 ipv4,
+ 2: i16 port // beware that this will give us negative ports. some conversion needed
+ 3: string service_name // which service did this operation happen on?
+}
+
+// some event took place, either one by the framework or by the user
+struct Annotation {
+ 1: i64 timestamp // microseconds from epoch
+ 2: string value // what happened at the timestamp?
+ 3: optional Endpoint host // host this happened on
+}
+
+enum AnnotationType { BOOL, BYTES, I16, I32, I64, DOUBLE, STRING }
+
+struct BinaryAnnotation {
+ 1: string key,
+ 2: binary value,
+ 3: AnnotationType annotation_type,
+ 4: optional Endpoint host
+}
+
+struct Span {
+ 1: i64 trace_id // unique trace id, use for all spans in trace
+ 3: string name, // span name, rpc method for example
+ 4: i64 id, // unique span id, only used for this span
+ 5: optional i64 parent_id, // parent span id
+ 6: list annotations, // list of all annotations/events that occured
+ 8: list binary_annotations // any binary annotations
+}
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/__init__.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/__init__.py
new file mode 100644
index 0000000..adefd8e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/__init__.py
@@ -0,0 +1 @@
+__all__ = ['ttypes', 'constants']
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/constants.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/constants.py
new file mode 100644
index 0000000..3e04f77
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/constants.py
@@ -0,0 +1,14 @@
+#
+# Autogenerated by Thrift Compiler (0.8.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#
+
+from thrift.Thrift import TType, TMessageType, TException
+from ttypes import *
+
+CLIENT_SEND = "cs"
+CLIENT_RECV = "cr"
+SERVER_SEND = "ss"
+SERVER_RECV = "sr"
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/ttypes.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/ttypes.py
new file mode 100644
index 0000000..418911f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/_thrift/zipkinCore/ttypes.py
@@ -0,0 +1,452 @@
+#
+# Autogenerated by Thrift Compiler (0.8.0)
+#
+# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+#
+#
+
+from thrift.Thrift import TType, TMessageType, TException
+
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol, TProtocol
+try:
+ from thrift.protocol import fastbinary
+except:
+ fastbinary = None
+
+
+class AnnotationType:
+ BOOL = 0
+ BYTES = 1
+ I16 = 2
+ I32 = 3
+ I64 = 4
+ DOUBLE = 5
+ STRING = 6
+
+ _VALUES_TO_NAMES = {
+ 0: "BOOL",
+ 1: "BYTES",
+ 2: "I16",
+ 3: "I32",
+ 4: "I64",
+ 5: "DOUBLE",
+ 6: "STRING",
+ }
+
+ _NAMES_TO_VALUES = {
+ "BOOL": 0,
+ "BYTES": 1,
+ "I16": 2,
+ "I32": 3,
+ "I64": 4,
+ "DOUBLE": 5,
+ "STRING": 6,
+ }
+
+
+class Endpoint:
+ """
+ Attributes:
+ - ipv4
+ - port
+ - service_name
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.I32, 'ipv4', None, None, ), # 1
+ (2, TType.I16, 'port', None, None, ), # 2
+ (3, TType.STRING, 'service_name', None, None, ), # 3
+ )
+
+ def __init__(self, ipv4=None, port=None, service_name=None,):
+ self.ipv4 = ipv4
+ self.port = port
+ self.service_name = service_name
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I32:
+ self.ipv4 = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.I16:
+ self.port = iprot.readI16();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.service_name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('Endpoint')
+ if self.ipv4 is not None:
+ oprot.writeFieldBegin('ipv4', TType.I32, 1)
+ oprot.writeI32(self.ipv4)
+ oprot.writeFieldEnd()
+ if self.port is not None:
+ oprot.writeFieldBegin('port', TType.I16, 2)
+ oprot.writeI16(self.port)
+ oprot.writeFieldEnd()
+ if self.service_name is not None:
+ oprot.writeFieldBegin('service_name', TType.STRING, 3)
+ oprot.writeString(self.service_name)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class Annotation:
+ """
+ Attributes:
+ - timestamp
+ - value
+ - host
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.I64, 'timestamp', None, None, ), # 1
+ (2, TType.STRING, 'value', None, None, ), # 2
+ (3, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 3
+ )
+
+ def __init__(self, timestamp=None, value=None, host=None,):
+ self.timestamp = timestamp
+ self.value = value
+ self.host = host
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I64:
+ self.timestamp = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.value = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRUCT:
+ self.host = Endpoint()
+ self.host.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('Annotation')
+ if self.timestamp is not None:
+ oprot.writeFieldBegin('timestamp', TType.I64, 1)
+ oprot.writeI64(self.timestamp)
+ oprot.writeFieldEnd()
+ if self.value is not None:
+ oprot.writeFieldBegin('value', TType.STRING, 2)
+ oprot.writeString(self.value)
+ oprot.writeFieldEnd()
+ if self.host is not None:
+ oprot.writeFieldBegin('host', TType.STRUCT, 3)
+ self.host.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class BinaryAnnotation:
+ """
+ Attributes:
+ - key
+ - value
+ - annotation_type
+ - host
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.STRING, 'key', None, None, ), # 1
+ (2, TType.STRING, 'value', None, None, ), # 2
+ (3, TType.I32, 'annotation_type', None, None, ), # 3
+ (4, TType.STRUCT, 'host', (Endpoint, Endpoint.thrift_spec), None, ), # 4
+ )
+
+ def __init__(self, key=None, value=None, annotation_type=None, host=None,):
+ self.key = key
+ self.value = value
+ self.annotation_type = annotation_type
+ self.host = host
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.STRING:
+ self.key = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 2:
+ if ftype == TType.STRING:
+ self.value = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.I32:
+ self.annotation_type = iprot.readI32();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.STRUCT:
+ self.host = Endpoint()
+ self.host.read(iprot)
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('BinaryAnnotation')
+ if self.key is not None:
+ oprot.writeFieldBegin('key', TType.STRING, 1)
+ oprot.writeString(self.key)
+ oprot.writeFieldEnd()
+ if self.value is not None:
+ oprot.writeFieldBegin('value', TType.STRING, 2)
+ oprot.writeString(self.value)
+ oprot.writeFieldEnd()
+ if self.annotation_type is not None:
+ oprot.writeFieldBegin('annotation_type', TType.I32, 3)
+ oprot.writeI32(self.annotation_type)
+ oprot.writeFieldEnd()
+ if self.host is not None:
+ oprot.writeFieldBegin('host', TType.STRUCT, 4)
+ self.host.write(oprot)
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+class Span:
+ """
+ Attributes:
+ - trace_id
+ - name
+ - id
+ - parent_id
+ - annotations
+ - binary_annotations
+ """
+
+ thrift_spec = (
+ None, # 0
+ (1, TType.I64, 'trace_id', None, None, ), # 1
+ None, # 2
+ (3, TType.STRING, 'name', None, None, ), # 3
+ (4, TType.I64, 'id', None, None, ), # 4
+ (5, TType.I64, 'parent_id', None, None, ), # 5
+ (6, TType.LIST, 'annotations', (TType.STRUCT,(Annotation, Annotation.thrift_spec)), None, ), # 6
+ None, # 7
+ (8, TType.LIST, 'binary_annotations', (TType.STRUCT,(BinaryAnnotation, BinaryAnnotation.thrift_spec)), None, ), # 8
+ )
+
+ def __init__(self, trace_id=None, name=None, id=None, parent_id=None, annotations=None, binary_annotations=None,):
+ self.trace_id = trace_id
+ self.name = name
+ self.id = id
+ self.parent_id = parent_id
+ self.annotations = annotations
+ self.binary_annotations = binary_annotations
+
+ def read(self, iprot):
+ if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+ fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+ return
+ iprot.readStructBegin()
+ while True:
+ (fname, ftype, fid) = iprot.readFieldBegin()
+ if ftype == TType.STOP:
+ break
+ if fid == 1:
+ if ftype == TType.I64:
+ self.trace_id = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 3:
+ if ftype == TType.STRING:
+ self.name = iprot.readString();
+ else:
+ iprot.skip(ftype)
+ elif fid == 4:
+ if ftype == TType.I64:
+ self.id = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 5:
+ if ftype == TType.I64:
+ self.parent_id = iprot.readI64();
+ else:
+ iprot.skip(ftype)
+ elif fid == 6:
+ if ftype == TType.LIST:
+ self.annotations = []
+ (_etype3, _size0) = iprot.readListBegin()
+ for _i4 in xrange(_size0):
+ _elem5 = Annotation()
+ _elem5.read(iprot)
+ self.annotations.append(_elem5)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ elif fid == 8:
+ if ftype == TType.LIST:
+ self.binary_annotations = []
+ (_etype9, _size6) = iprot.readListBegin()
+ for _i10 in xrange(_size6):
+ _elem11 = BinaryAnnotation()
+ _elem11.read(iprot)
+ self.binary_annotations.append(_elem11)
+ iprot.readListEnd()
+ else:
+ iprot.skip(ftype)
+ else:
+ iprot.skip(ftype)
+ iprot.readFieldEnd()
+ iprot.readStructEnd()
+
+ def write(self, oprot):
+ if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+ oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+ return
+ oprot.writeStructBegin('Span')
+ if self.trace_id is not None:
+ oprot.writeFieldBegin('trace_id', TType.I64, 1)
+ oprot.writeI64(self.trace_id)
+ oprot.writeFieldEnd()
+ if self.name is not None:
+ oprot.writeFieldBegin('name', TType.STRING, 3)
+ oprot.writeString(self.name)
+ oprot.writeFieldEnd()
+ if self.id is not None:
+ oprot.writeFieldBegin('id', TType.I64, 4)
+ oprot.writeI64(self.id)
+ oprot.writeFieldEnd()
+ if self.parent_id is not None:
+ oprot.writeFieldBegin('parent_id', TType.I64, 5)
+ oprot.writeI64(self.parent_id)
+ oprot.writeFieldEnd()
+ if self.annotations is not None:
+ oprot.writeFieldBegin('annotations', TType.LIST, 6)
+ oprot.writeListBegin(TType.STRUCT, len(self.annotations))
+ for iter12 in self.annotations:
+ iter12.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ if self.binary_annotations is not None:
+ oprot.writeFieldBegin('binary_annotations', TType.LIST, 8)
+ oprot.writeListBegin(TType.STRUCT, len(self.binary_annotations))
+ for iter13 in self.binary_annotations:
+ iter13.write(oprot)
+ oprot.writeListEnd()
+ oprot.writeFieldEnd()
+ oprot.writeFieldStop()
+ oprot.writeStructEnd()
+
+ def validate(self):
+ return
+
+
+ def __repr__(self):
+ L = ['%s=%r' % (key, value)
+ for key, value in self.__dict__.iteritems()]
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+
+ def __eq__(self, other):
+ return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ return not (self == other)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/api.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/api.py
new file mode 100644
index 0000000..8edde5c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/api.py
@@ -0,0 +1,187 @@
+import os
+import sys
+import time
+import struct
+import socket
+import random
+
+from eventlet.green import threading
+from eventlet.zipkin._thrift.zipkinCore import ttypes
+from eventlet.zipkin._thrift.zipkinCore.constants import SERVER_SEND
+
+
+client = None
+_tls = threading.local() # thread local storage
+
+
+def put_annotation(msg, endpoint=None):
+ """ This is annotation API.
+ You can add your own annotation from in your code.
+ Annotation is recorded with timestamp automatically.
+ e.g.) put_annotation('cache hit for %s' % request)
+
+ :param msg: String message
+ :param endpoint: host info
+ """
+ if is_sample():
+ a = ZipkinDataBuilder.build_annotation(msg, endpoint)
+ trace_data = get_trace_data()
+ trace_data.add_annotation(a)
+
+
+def put_key_value(key, value, endpoint=None):
+ """ This is binary annotation API.
+ You can add your own key-value extra information from in your code.
+ Key-value doesn't have a time component.
+ e.g.) put_key_value('http.uri', '/hoge/index.html')
+
+ :param key: String
+ :param value: String
+ :param endpoint: host info
+ """
+ if is_sample():
+ b = ZipkinDataBuilder.build_binary_annotation(key, value, endpoint)
+ trace_data = get_trace_data()
+ trace_data.add_binary_annotation(b)
+
+
+def is_tracing():
+ """ Return whether the current thread is tracking or not """
+ return hasattr(_tls, 'trace_data')
+
+
+def is_sample():
+ """ Return whether it should record trace information
+ for the request or not
+ """
+ return is_tracing() and _tls.trace_data.sampled
+
+
+def get_trace_data():
+ if is_tracing():
+ return _tls.trace_data
+
+
+def set_trace_data(trace_data):
+ _tls.trace_data = trace_data
+
+
+def init_trace_data():
+ if is_tracing():
+ del _tls.trace_data
+
+
+def _uniq_id():
+ """
+ Create a random 64-bit signed integer appropriate
+ for use as trace and span IDs.
+ XXX: By experimentation zipkin has trouble recording traces with ids
+ larger than (2 ** 56) - 1
+ """
+ return random.randint(0, (2 ** 56) - 1)
+
+
+def generate_trace_id():
+ return _uniq_id()
+
+
+def generate_span_id():
+ return _uniq_id()
+
+
+class TraceData:
+
+ END_ANNOTATION = SERVER_SEND
+
+ def __init__(self, name, trace_id, span_id, parent_id, sampled, endpoint):
+ """
+ :param name: RPC name (String)
+ :param trace_id: int
+ :param span_id: int
+ :param parent_id: int or None
+ :param sampled: lets the downstream servers know
+ if I should record trace data for the request (bool)
+ :param endpoint: zipkin._thrift.zipkinCore.ttypes.EndPoint
+ """
+ self.name = name
+ self.trace_id = trace_id
+ self.span_id = span_id
+ self.parent_id = parent_id
+ self.sampled = sampled
+ self.endpoint = endpoint
+ self.annotations = []
+ self.bannotations = []
+ self._done = False
+
+ def add_annotation(self, annotation):
+ if annotation.host is None:
+ annotation.host = self.endpoint
+ if not self._done:
+ self.annotations.append(annotation)
+ if annotation.value == self.END_ANNOTATION:
+ self.flush()
+
+ def add_binary_annotation(self, bannotation):
+ if bannotation.host is None:
+ bannotation.host = self.endpoint
+ if not self._done:
+ self.bannotations.append(bannotation)
+
+ def flush(self):
+ span = ZipkinDataBuilder.build_span(name=self.name,
+ trace_id=self.trace_id,
+ span_id=self.span_id,
+ parent_id=self.parent_id,
+ annotations=self.annotations,
+ bannotations=self.bannotations)
+ client.send_to_collector(span)
+ self.annotations = []
+ self.bannotations = []
+ self._done = True
+
+
+class ZipkinDataBuilder:
+ @staticmethod
+ def build_span(name, trace_id, span_id, parent_id,
+ annotations, bannotations):
+ return ttypes.Span(
+ name=name,
+ trace_id=trace_id,
+ id=span_id,
+ parent_id=parent_id,
+ annotations=annotations,
+ binary_annotations=bannotations
+ )
+
+ @staticmethod
+ def build_annotation(value, endpoint=None):
+ if isinstance(value, str):
+ value = value.encode('utf-8')
+ assert isinstance(value, bytes)
+ return ttypes.Annotation(time.time() * 1000 * 1000,
+ value, endpoint)
+
+ @staticmethod
+ def build_binary_annotation(key, value, endpoint=None):
+ annotation_type = ttypes.AnnotationType.STRING
+ return ttypes.BinaryAnnotation(key, value, annotation_type, endpoint)
+
+ @staticmethod
+ def build_endpoint(ipv4=None, port=None, service_name=None):
+ if ipv4 is not None:
+ ipv4 = ZipkinDataBuilder._ipv4_to_int(ipv4)
+ if service_name is None:
+ service_name = ZipkinDataBuilder._get_script_name()
+ return ttypes.Endpoint(
+ ipv4=ipv4,
+ port=port,
+ service_name=service_name
+ )
+
+ @staticmethod
+ def _ipv4_to_int(ipv4):
+ return struct.unpack('!i', socket.inet_aton(ipv4))[0]
+
+ @staticmethod
+ def _get_script_name():
+ return os.path.basename(sys.argv[0])
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/client.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/client.py
new file mode 100644
index 0000000..faff244
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/client.py
@@ -0,0 +1,56 @@
+import base64
+import warnings
+
+from scribe import scribe
+from thrift.transport import TTransport, TSocket
+from thrift.protocol import TBinaryProtocol
+
+from eventlet import GreenPile
+
+
+CATEGORY = 'zipkin'
+
+
+class ZipkinClient:
+
+ def __init__(self, host='127.0.0.1', port=9410):
+ """
+ :param host: zipkin collector IP address (default '127.0.0.1')
+ :param port: zipkin collector port (default 9410)
+ """
+ self.host = host
+ self.port = port
+ self.pile = GreenPile(1)
+ self._connect()
+
+ def _connect(self):
+ socket = TSocket.TSocket(self.host, self.port)
+ self.transport = TTransport.TFramedTransport(socket)
+ protocol = TBinaryProtocol.TBinaryProtocol(self.transport,
+ False, False)
+ self.scribe_client = scribe.Client(protocol)
+ try:
+ self.transport.open()
+ except TTransport.TTransportException as e:
+ warnings.warn(e.message)
+
+ def _build_message(self, thrift_obj):
+ trans = TTransport.TMemoryBuffer()
+ protocol = TBinaryProtocol.TBinaryProtocolAccelerated(trans=trans)
+ thrift_obj.write(protocol)
+ return base64.b64encode(trans.getvalue())
+
+ def send_to_collector(self, span):
+ self.pile.spawn(self._send, span)
+
+ def _send(self, span):
+ log_entry = scribe.LogEntry(CATEGORY, self._build_message(span))
+ try:
+ self.scribe_client.Log([log_entry])
+ except Exception as e:
+ msg = 'ZipkinClient send error %s' % str(e)
+ warnings.warn(msg)
+ self._connect()
+
+ def close(self):
+ self.transport.close()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex1.png b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex1.png
new file mode 100755
index 0000000..7f7a049
Binary files /dev/null and b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex1.png differ
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex2.png b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex2.png
new file mode 100755
index 0000000..19dbc3a
Binary files /dev/null and b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex2.png differ
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex3.png b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex3.png
new file mode 100755
index 0000000..5ff9860
Binary files /dev/null and b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/example/ex3.png differ
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/greenthread.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/greenthread.py
new file mode 100644
index 0000000..37e12d6
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/greenthread.py
@@ -0,0 +1,33 @@
+from eventlet import greenthread
+
+from eventlet.zipkin import api
+
+
+__original_init__ = greenthread.GreenThread.__init__
+__original_main__ = greenthread.GreenThread.main
+
+
+def _patched__init(self, parent):
+ # parent thread saves current TraceData from tls to self
+ if api.is_tracing():
+ self.trace_data = api.get_trace_data()
+
+ __original_init__(self, parent)
+
+
+def _patched_main(self, function, args, kwargs):
+ # child thread inherits TraceData
+ if hasattr(self, 'trace_data'):
+ api.set_trace_data(self.trace_data)
+
+ __original_main__(self, function, args, kwargs)
+
+
+def patch():
+ greenthread.GreenThread.__init__ = _patched__init
+ greenthread.GreenThread.main = _patched_main
+
+
+def unpatch():
+ greenthread.GreenThread.__init__ = __original_init__
+ greenthread.GreenThread.main = __original_main__
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/http.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/http.py
new file mode 100644
index 0000000..f981a17
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/http.py
@@ -0,0 +1,29 @@
+import warnings
+
+from eventlet.green import httplib
+from eventlet.zipkin import api
+
+
+# see https://twitter.github.io/zipkin/Instrumenting.html
+HDR_TRACE_ID = 'X-B3-TraceId'
+HDR_SPAN_ID = 'X-B3-SpanId'
+HDR_PARENT_SPAN_ID = 'X-B3-ParentSpanId'
+HDR_SAMPLED = 'X-B3-Sampled'
+
+
+def patch():
+ warnings.warn("Since current Python thrift release \
+ doesn't support Python 3, eventlet.zipkin.http \
+ doesn't also support Python 3 (http.client)")
+
+
+def unpatch():
+ pass
+
+
+def hex_str(n):
+ """
+ Thrift uses a binary representation of trace and span ids
+ HTTP headers use a hexadecimal representation of the same
+ """
+ return '%0.16x' % (n,)
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/log.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/log.py
new file mode 100644
index 0000000..b7f9d32
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/log.py
@@ -0,0 +1,19 @@
+import logging
+
+from eventlet.zipkin import api
+
+
+__original_handle__ = logging.Logger.handle
+
+
+def _patched_handle(self, record):
+ __original_handle__(self, record)
+ api.put_annotation(record.getMessage())
+
+
+def patch():
+ logging.Logger.handle = _patched_handle
+
+
+def unpatch():
+ logging.Logger.handle = __original_handle__
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/patcher.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/patcher.py
new file mode 100644
index 0000000..8e7d8ad
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/patcher.py
@@ -0,0 +1,41 @@
+from eventlet.zipkin import http
+from eventlet.zipkin import wsgi
+from eventlet.zipkin import greenthread
+from eventlet.zipkin import log
+from eventlet.zipkin import api
+from eventlet.zipkin.client import ZipkinClient
+
+
+def enable_trace_patch(host='127.0.0.1', port=9410,
+ trace_app_log=False, sampling_rate=1.0):
+ """ Apply monkey patch to trace your WSGI application.
+
+ :param host: Scribe daemon IP address (default: '127.0.0.1')
+ :param port: Scribe daemon port (default: 9410)
+ :param trace_app_log: A Boolean indicating if the tracer will trace
+ application log together or not. This facility assume that
+ your application uses python standard logging library.
+ (default: False)
+ :param sampling_rate: A Float value (0.0~1.0) that indicates
+ the tracing frequency. If you specify 1.0, all request
+ are traced (and sent to Zipkin collecotr).
+ If you specify 0.1, only 1/10 requests are traced. (default: 1.0)
+ """
+ api.client = ZipkinClient(host, port)
+
+ # monkey patch for adding tracing facility
+ wsgi.patch(sampling_rate)
+ http.patch()
+ greenthread.patch()
+
+ # monkey patch for capturing application log
+ if trace_app_log:
+ log.patch()
+
+
+def disable_trace_patch():
+ http.unpatch()
+ wsgi.unpatch()
+ greenthread.unpatch()
+ log.unpatch()
+ api.client.close()
diff --git a/tapdown/lib/python3.11/site-packages/eventlet/zipkin/wsgi.py b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/wsgi.py
new file mode 100644
index 0000000..402d142
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/eventlet/zipkin/wsgi.py
@@ -0,0 +1,78 @@
+import random
+
+from eventlet import wsgi
+from eventlet.zipkin import api
+from eventlet.zipkin._thrift.zipkinCore.constants import \
+ SERVER_RECV, SERVER_SEND
+from eventlet.zipkin.http import \
+ HDR_TRACE_ID, HDR_SPAN_ID, HDR_PARENT_SPAN_ID, HDR_SAMPLED
+
+
+_sampler = None
+__original_handle_one_response__ = wsgi.HttpProtocol.handle_one_response
+
+
+def _patched_handle_one_response(self):
+ api.init_trace_data()
+ trace_id = int_or_none(self.headers.getheader(HDR_TRACE_ID))
+ span_id = int_or_none(self.headers.getheader(HDR_SPAN_ID))
+ parent_id = int_or_none(self.headers.getheader(HDR_PARENT_SPAN_ID))
+ sampled = bool_or_none(self.headers.getheader(HDR_SAMPLED))
+ if trace_id is None: # front-end server
+ trace_id = span_id = api.generate_trace_id()
+ parent_id = None
+ sampled = _sampler.sampling()
+ ip, port = self.request.getsockname()[:2]
+ ep = api.ZipkinDataBuilder.build_endpoint(ip, port)
+ trace_data = api.TraceData(name=self.command,
+ trace_id=trace_id,
+ span_id=span_id,
+ parent_id=parent_id,
+ sampled=sampled,
+ endpoint=ep)
+ api.set_trace_data(trace_data)
+ api.put_annotation(SERVER_RECV)
+ api.put_key_value('http.uri', self.path)
+
+ __original_handle_one_response__(self)
+
+ if api.is_sample():
+ api.put_annotation(SERVER_SEND)
+
+
+class Sampler:
+ def __init__(self, sampling_rate):
+ self.sampling_rate = sampling_rate
+
+ def sampling(self):
+ # avoid generating unneeded random numbers
+ if self.sampling_rate == 1.0:
+ return True
+ r = random.random()
+ if r < self.sampling_rate:
+ return True
+ return False
+
+
+def int_or_none(val):
+ if val is None:
+ return None
+ return int(val, 16)
+
+
+def bool_or_none(val):
+ if val == '1':
+ return True
+ if val == '0':
+ return False
+ return None
+
+
+def patch(sampling_rate):
+ global _sampler
+ _sampler = Sampler(sampling_rate)
+ wsgi.HttpProtocol.handle_one_response = _patched_handle_one_response
+
+
+def unpatch():
+ wsgi.HttpProtocol.handle_one_response = __original_handle_one_response__
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/INSTALLER b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/METADATA b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/METADATA
new file mode 100644
index 0000000..46028fb
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/METADATA
@@ -0,0 +1,91 @@
+Metadata-Version: 2.4
+Name: Flask
+Version: 3.1.2
+Summary: A simple framework for building complex web applications.
+Maintainer-email: Pallets
+Requires-Python: >=3.9
+Description-Content-Type: text/markdown
+License-Expression: BSD-3-Clause
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Framework :: Flask
+Classifier: Intended Audience :: Developers
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI
+Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application
+Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
+Classifier: Typing :: Typed
+License-File: LICENSE.txt
+Requires-Dist: blinker>=1.9.0
+Requires-Dist: click>=8.1.3
+Requires-Dist: importlib-metadata>=3.6.0; python_version < '3.10'
+Requires-Dist: itsdangerous>=2.2.0
+Requires-Dist: jinja2>=3.1.2
+Requires-Dist: markupsafe>=2.1.1
+Requires-Dist: werkzeug>=3.1.0
+Requires-Dist: asgiref>=3.2 ; extra == "async"
+Requires-Dist: python-dotenv ; extra == "dotenv"
+Project-URL: Changes, https://flask.palletsprojects.com/page/changes/
+Project-URL: Chat, https://discord.gg/pallets
+Project-URL: Documentation, https://flask.palletsprojects.com/
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Source, https://github.com/pallets/flask/
+Provides-Extra: async
+Provides-Extra: dotenv
+
+
+
+# Flask
+
+Flask is a lightweight [WSGI] web application framework. It is designed
+to make getting started quick and easy, with the ability to scale up to
+complex applications. It began as a simple wrapper around [Werkzeug]
+and [Jinja], and has become one of the most popular Python web
+application frameworks.
+
+Flask offers suggestions, but doesn't enforce any dependencies or
+project layout. It is up to the developer to choose the tools and
+libraries they want to use. There are many extensions provided by the
+community that make adding new functionality easy.
+
+[WSGI]: https://wsgi.readthedocs.io/
+[Werkzeug]: https://werkzeug.palletsprojects.com/
+[Jinja]: https://jinja.palletsprojects.com/
+
+## A Simple Example
+
+```python
+# save this as app.py
+from flask import Flask
+
+app = Flask(__name__)
+
+@app.route("/")
+def hello():
+ return "Hello, World!"
+```
+
+```
+$ flask run
+ * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit)
+```
+
+## Donate
+
+The Pallets organization develops and supports Flask and the libraries
+it uses. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, [please
+donate today].
+
+[please donate today]: https://palletsprojects.com/donate
+
+## Contributing
+
+See our [detailed contributing documentation][contrib] for many ways to
+contribute, including reporting issues, requesting features, asking or answering
+questions, and making PRs.
+
+[contrib]: https://palletsprojects.com/contributing/
+
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/RECORD b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/RECORD
new file mode 100644
index 0000000..0174f8f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/RECORD
@@ -0,0 +1,58 @@
+../../../bin/flask,sha256=3YsdpRBia0sH9o6LqvfP1WDLeD1TD3aqyp4JkltIfKI,226
+flask-3.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+flask-3.1.2.dist-info/METADATA,sha256=oRg63DAAIcoLAr7kzTgIEKfm8_4HMTRpmWmIptdY_js,3167
+flask-3.1.2.dist-info/RECORD,,
+flask-3.1.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+flask-3.1.2.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
+flask-3.1.2.dist-info/entry_points.txt,sha256=bBP7hTOS5fz9zLtC7sPofBZAlMkEvBxu7KqS6l5lvc4,40
+flask-3.1.2.dist-info/licenses/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
+flask/__init__.py,sha256=mHvJN9Swtl1RDtjCqCIYyIniK_SZ_l_hqUynOzgpJ9o,2701
+flask/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
+flask/__pycache__/__init__.cpython-311.pyc,,
+flask/__pycache__/__main__.cpython-311.pyc,,
+flask/__pycache__/app.cpython-311.pyc,,
+flask/__pycache__/blueprints.cpython-311.pyc,,
+flask/__pycache__/cli.cpython-311.pyc,,
+flask/__pycache__/config.cpython-311.pyc,,
+flask/__pycache__/ctx.cpython-311.pyc,,
+flask/__pycache__/debughelpers.cpython-311.pyc,,
+flask/__pycache__/globals.cpython-311.pyc,,
+flask/__pycache__/helpers.cpython-311.pyc,,
+flask/__pycache__/logging.cpython-311.pyc,,
+flask/__pycache__/sessions.cpython-311.pyc,,
+flask/__pycache__/signals.cpython-311.pyc,,
+flask/__pycache__/templating.cpython-311.pyc,,
+flask/__pycache__/testing.cpython-311.pyc,,
+flask/__pycache__/typing.cpython-311.pyc,,
+flask/__pycache__/views.cpython-311.pyc,,
+flask/__pycache__/wrappers.cpython-311.pyc,,
+flask/app.py,sha256=XGqgFRsLgBhzIoB2HSftoMTIM3hjDiH6rdV7c3g3IKc,61744
+flask/blueprints.py,sha256=p5QE2lY18GItbdr_RKRpZ8Do17g0PvQGIgZkSUDhX2k,4541
+flask/cli.py,sha256=Pfh72-BxlvoH0QHCDOc1HvXG7Kq5Xetf3zzNz2kNSHk,37184
+flask/config.py,sha256=PiqF0DPam6HW0FH4CH1hpXTBe30NSzjPEOwrz1b6kt0,13219
+flask/ctx.py,sha256=sPKzahqtgxaS7O0y9E_NzUJNUDyTD6M4GkDrVu2fU3Y,15064
+flask/debughelpers.py,sha256=PGIDhStW_efRjpaa3zHIpo-htStJOR41Ip3OJWPYBwo,6080
+flask/globals.py,sha256=XdQZmStBmPIs8t93tjx6pO7Bm3gobAaONWkFcUHaGas,1713
+flask/helpers.py,sha256=rJZge7_J288J1UQv5-kNf4oEaw332PP8NTW0QRIBbXE,23517
+flask/json/__init__.py,sha256=hLNR898paqoefdeAhraa5wyJy-bmRB2k2dV4EgVy2Z8,5602
+flask/json/__pycache__/__init__.cpython-311.pyc,,
+flask/json/__pycache__/provider.cpython-311.pyc,,
+flask/json/__pycache__/tag.cpython-311.pyc,,
+flask/json/provider.py,sha256=5imEzY5HjV2HoUVrQbJLqXCzMNpZXfD0Y1XqdLV2XBA,7672
+flask/json/tag.py,sha256=DhaNwuIOhdt2R74oOC9Y4Z8ZprxFYiRb5dUP5byyINw,9281
+flask/logging.py,sha256=8sM3WMTubi1cBb2c_lPkWpN0J8dMAqrgKRYLLi1dCVI,2377
+flask/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+flask/sansio/README.md,sha256=-0X1tECnilmz1cogx-YhNw5d7guK7GKrq_DEV2OzlU0,228
+flask/sansio/__pycache__/app.cpython-311.pyc,,
+flask/sansio/__pycache__/blueprints.cpython-311.pyc,,
+flask/sansio/__pycache__/scaffold.cpython-311.pyc,,
+flask/sansio/app.py,sha256=5EbxwHOchgcpZqQyalA9vyDBopknOvDg6BVwXFyFD2s,38099
+flask/sansio/blueprints.py,sha256=Tqe-7EkZ-tbWchm8iDoCfD848f0_3nLv6NNjeIPvHwM,24637
+flask/sansio/scaffold.py,sha256=wSASXYdFRWJmqcL0Xq-T7N-PDVUSiFGvjO9kPZg58bk,30371
+flask/sessions.py,sha256=duvYGmCGh_H3cgMuy2oeSjrCsCvLylF4CBKOXpN0Qms,15480
+flask/signals.py,sha256=V7lMUww7CqgJ2ThUBn1PiatZtQanOyt7OZpu2GZI-34,750
+flask/templating.py,sha256=IHsdsF-eBJPCJE0AJLCi1VhhnytOGdzHCn3yThz87c4,7536
+flask/testing.py,sha256=zzC7XxhBWOP9H697IV_4SG7Lg3Lzb5PWiyEP93_KQXE,10117
+flask/typing.py,sha256=L-L5t2jKgS0aOmVhioQ_ylqcgiVFnA6yxO-RLNhq-GU,3293
+flask/views.py,sha256=xzJx6oJqGElThtEghZN7ZQGMw5TDFyuRxUkecwRuAoA,6962
+flask/wrappers.py,sha256=jUkv4mVek2Iq4hwxd4RvqrIMb69Bv0PElDgWLmd5ORo,9406
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/REQUESTED b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/WHEEL b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/WHEEL
new file mode 100644
index 0000000..d8b9936
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.12.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/entry_points.txt b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/entry_points.txt
new file mode 100644
index 0000000..eec6733
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+flask=flask.cli:main
+
diff --git a/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/licenses/LICENSE.txt b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/licenses/LICENSE.txt
new file mode 100644
index 0000000..9d227a0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask-3.1.2.dist-info/licenses/LICENSE.txt
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/tapdown/lib/python3.11/site-packages/flask/__init__.py b/tapdown/lib/python3.11/site-packages/flask/__init__.py
new file mode 100644
index 0000000..1fdc50c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/__init__.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import typing as t
+
+from . import json as json
+from .app import Flask as Flask
+from .blueprints import Blueprint as Blueprint
+from .config import Config as Config
+from .ctx import after_this_request as after_this_request
+from .ctx import copy_current_request_context as copy_current_request_context
+from .ctx import has_app_context as has_app_context
+from .ctx import has_request_context as has_request_context
+from .globals import current_app as current_app
+from .globals import g as g
+from .globals import request as request
+from .globals import session as session
+from .helpers import abort as abort
+from .helpers import flash as flash
+from .helpers import get_flashed_messages as get_flashed_messages
+from .helpers import get_template_attribute as get_template_attribute
+from .helpers import make_response as make_response
+from .helpers import redirect as redirect
+from .helpers import send_file as send_file
+from .helpers import send_from_directory as send_from_directory
+from .helpers import stream_with_context as stream_with_context
+from .helpers import url_for as url_for
+from .json import jsonify as jsonify
+from .signals import appcontext_popped as appcontext_popped
+from .signals import appcontext_pushed as appcontext_pushed
+from .signals import appcontext_tearing_down as appcontext_tearing_down
+from .signals import before_render_template as before_render_template
+from .signals import got_request_exception as got_request_exception
+from .signals import message_flashed as message_flashed
+from .signals import request_finished as request_finished
+from .signals import request_started as request_started
+from .signals import request_tearing_down as request_tearing_down
+from .signals import template_rendered as template_rendered
+from .templating import render_template as render_template
+from .templating import render_template_string as render_template_string
+from .templating import stream_template as stream_template
+from .templating import stream_template_string as stream_template_string
+from .wrappers import Request as Request
+from .wrappers import Response as Response
+
+if not t.TYPE_CHECKING:
+
+ def __getattr__(name: str) -> t.Any:
+ if name == "__version__":
+ import importlib.metadata
+ import warnings
+
+ warnings.warn(
+ "The '__version__' attribute is deprecated and will be removed in"
+ " Flask 3.2. Use feature detection or"
+ " 'importlib.metadata.version(\"flask\")' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return importlib.metadata.version("flask")
+
+ raise AttributeError(name)
diff --git a/tapdown/lib/python3.11/site-packages/flask/__main__.py b/tapdown/lib/python3.11/site-packages/flask/__main__.py
new file mode 100644
index 0000000..4e28416
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/tapdown/lib/python3.11/site-packages/flask/app.py b/tapdown/lib/python3.11/site-packages/flask/app.py
new file mode 100644
index 0000000..1232b03
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/app.py
@@ -0,0 +1,1536 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import os
+import sys
+import typing as t
+import weakref
+from datetime import timedelta
+from inspect import iscoroutinefunction
+from itertools import chain
+from types import TracebackType
+from urllib.parse import quote as _url_quote
+
+import click
+from werkzeug.datastructures import Headers
+from werkzeug.datastructures import ImmutableDict
+from werkzeug.exceptions import BadRequestKeyError
+from werkzeug.exceptions import HTTPException
+from werkzeug.exceptions import InternalServerError
+from werkzeug.routing import BuildError
+from werkzeug.routing import MapAdapter
+from werkzeug.routing import RequestRedirect
+from werkzeug.routing import RoutingException
+from werkzeug.routing import Rule
+from werkzeug.serving import is_running_from_reloader
+from werkzeug.wrappers import Response as BaseResponse
+from werkzeug.wsgi import get_host
+
+from . import cli
+from . import typing as ft
+from .ctx import AppContext
+from .ctx import RequestContext
+from .globals import _cv_app
+from .globals import _cv_request
+from .globals import current_app
+from .globals import g
+from .globals import request
+from .globals import request_ctx
+from .globals import session
+from .helpers import get_debug_flag
+from .helpers import get_flashed_messages
+from .helpers import get_load_dotenv
+from .helpers import send_from_directory
+from .sansio.app import App
+from .sansio.scaffold import _sentinel
+from .sessions import SecureCookieSessionInterface
+from .sessions import SessionInterface
+from .signals import appcontext_tearing_down
+from .signals import got_request_exception
+from .signals import request_finished
+from .signals import request_started
+from .signals import request_tearing_down
+from .templating import Environment
+from .wrappers import Request
+from .wrappers import Response
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .testing import FlaskClient
+ from .testing import FlaskCliRunner
+ from .typing import HeadersValue
+
+T_shell_context_processor = t.TypeVar(
+ "T_shell_context_processor", bound=ft.ShellContextProcessorCallable
+)
+T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
+T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
+T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
+T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
+
+
+def _make_timedelta(value: timedelta | int | None) -> timedelta | None:
+ if value is None or isinstance(value, timedelta):
+ return value
+
+ return timedelta(seconds=value)
+
+
+class Flask(App):
+ """The flask object implements a WSGI application and acts as the central
+ object. It is passed the name of the module or package of the
+ application. Once it is created it will act as a central registry for
+ the view functions, the URL rules, template configuration and much more.
+
+ The name of the package is used to resolve resources from inside the
+ package or the folder the module is contained in depending on if the
+ package parameter resolves to an actual python package (a folder with
+ an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
+
+ For more information about resource loading, see :func:`open_resource`.
+
+ Usually you create a :class:`Flask` instance in your main module or
+ in the :file:`__init__.py` file of your package like this::
+
+ from flask import Flask
+ app = Flask(__name__)
+
+ .. admonition:: About the First Parameter
+
+ The idea of the first parameter is to give Flask an idea of what
+ belongs to your application. This name is used to find resources
+ on the filesystem, can be used by extensions to improve debugging
+ information and a lot more.
+
+ So it's important what you provide there. If you are using a single
+ module, `__name__` is always the correct value. If you however are
+ using a package, it's usually recommended to hardcode the name of
+ your package there.
+
+ For example if your application is defined in :file:`yourapplication/app.py`
+ you should create it with one of the two versions below::
+
+ app = Flask('yourapplication')
+ app = Flask(__name__.split('.')[0])
+
+ Why is that? The application will work even with `__name__`, thanks
+ to how resources are looked up. However it will make debugging more
+ painful. Certain extensions can make assumptions based on the
+ import name of your application. For example the Flask-SQLAlchemy
+ extension will look for the code in your application that triggered
+ an SQL query in debug mode. If the import name is not properly set
+ up, that debugging information is lost. (For example it would only
+ pick up SQL queries in `yourapplication.app` and not
+ `yourapplication.views.frontend`)
+
+ .. versionadded:: 0.7
+ The `static_url_path`, `static_folder`, and `template_folder`
+ parameters were added.
+
+ .. versionadded:: 0.8
+ The `instance_path` and `instance_relative_config` parameters were
+ added.
+
+ .. versionadded:: 0.11
+ The `root_path` parameter was added.
+
+ .. versionadded:: 1.0
+ The ``host_matching`` and ``static_host`` parameters were added.
+
+ .. versionadded:: 1.0
+ The ``subdomain_matching`` parameter was added. Subdomain
+ matching needs to be enabled manually now. Setting
+ :data:`SERVER_NAME` does not implicitly enable it.
+
+ :param import_name: the name of the application package
+ :param static_url_path: can be used to specify a different path for the
+ static files on the web. Defaults to the name
+ of the `static_folder` folder.
+ :param static_folder: The folder with static files that is served at
+ ``static_url_path``. Relative to the application ``root_path``
+ or an absolute path. Defaults to ``'static'``.
+ :param static_host: the host to use when adding the static route.
+ Defaults to None. Required when using ``host_matching=True``
+ with a ``static_folder`` configured.
+ :param host_matching: set ``url_map.host_matching`` attribute.
+ Defaults to False.
+ :param subdomain_matching: consider the subdomain relative to
+ :data:`SERVER_NAME` when matching routes. Defaults to False.
+ :param template_folder: the folder that contains the templates that should
+ be used by the application. Defaults to
+ ``'templates'`` folder in the root path of the
+ application.
+ :param instance_path: An alternative instance path for the application.
+ By default the folder ``'instance'`` next to the
+ package or module is assumed to be the instance
+ path.
+ :param instance_relative_config: if set to ``True`` relative filenames
+ for loading the config are assumed to
+ be relative to the instance path instead
+ of the application root.
+ :param root_path: The path to the root of the application files.
+ This should only be set manually when it can't be detected
+ automatically, such as for namespace packages.
+ """
+
+ default_config = ImmutableDict(
+ {
+ "DEBUG": None,
+ "TESTING": False,
+ "PROPAGATE_EXCEPTIONS": None,
+ "SECRET_KEY": None,
+ "SECRET_KEY_FALLBACKS": None,
+ "PERMANENT_SESSION_LIFETIME": timedelta(days=31),
+ "USE_X_SENDFILE": False,
+ "TRUSTED_HOSTS": None,
+ "SERVER_NAME": None,
+ "APPLICATION_ROOT": "/",
+ "SESSION_COOKIE_NAME": "session",
+ "SESSION_COOKIE_DOMAIN": None,
+ "SESSION_COOKIE_PATH": None,
+ "SESSION_COOKIE_HTTPONLY": True,
+ "SESSION_COOKIE_SECURE": False,
+ "SESSION_COOKIE_PARTITIONED": False,
+ "SESSION_COOKIE_SAMESITE": None,
+ "SESSION_REFRESH_EACH_REQUEST": True,
+ "MAX_CONTENT_LENGTH": None,
+ "MAX_FORM_MEMORY_SIZE": 500_000,
+ "MAX_FORM_PARTS": 1_000,
+ "SEND_FILE_MAX_AGE_DEFAULT": None,
+ "TRAP_BAD_REQUEST_ERRORS": None,
+ "TRAP_HTTP_EXCEPTIONS": False,
+ "EXPLAIN_TEMPLATE_LOADING": False,
+ "PREFERRED_URL_SCHEME": "http",
+ "TEMPLATES_AUTO_RELOAD": None,
+ "MAX_COOKIE_SIZE": 4093,
+ "PROVIDE_AUTOMATIC_OPTIONS": True,
+ }
+ )
+
+ #: The class that is used for request objects. See :class:`~flask.Request`
+ #: for more information.
+ request_class: type[Request] = Request
+
+ #: The class that is used for response objects. See
+ #: :class:`~flask.Response` for more information.
+ response_class: type[Response] = Response
+
+ #: the session interface to use. By default an instance of
+ #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here.
+ #:
+ #: .. versionadded:: 0.8
+ session_interface: SessionInterface = SecureCookieSessionInterface()
+
+ def __init__(
+ self,
+ import_name: str,
+ static_url_path: str | None = None,
+ static_folder: str | os.PathLike[str] | None = "static",
+ static_host: str | None = None,
+ host_matching: bool = False,
+ subdomain_matching: bool = False,
+ template_folder: str | os.PathLike[str] | None = "templates",
+ instance_path: str | None = None,
+ instance_relative_config: bool = False,
+ root_path: str | None = None,
+ ):
+ super().__init__(
+ import_name=import_name,
+ static_url_path=static_url_path,
+ static_folder=static_folder,
+ static_host=static_host,
+ host_matching=host_matching,
+ subdomain_matching=subdomain_matching,
+ template_folder=template_folder,
+ instance_path=instance_path,
+ instance_relative_config=instance_relative_config,
+ root_path=root_path,
+ )
+
+ #: The Click command group for registering CLI commands for this
+ #: object. The commands are available from the ``flask`` command
+ #: once the application has been discovered and blueprints have
+ #: been registered.
+ self.cli = cli.AppGroup()
+
+ # Set the name of the Click group in case someone wants to add
+ # the app's commands to another CLI tool.
+ self.cli.name = self.name
+
+ # Add a static route using the provided static_url_path, static_host,
+ # and static_folder if there is a configured static_folder.
+ # Note we do this without checking if static_folder exists.
+ # For one, it might be created while the server is running (e.g. during
+ # development). Also, Google App Engine stores static files somewhere
+ if self.has_static_folder:
+ assert bool(static_host) == host_matching, (
+ "Invalid static_host/host_matching combination"
+ )
+ # Use a weakref to avoid creating a reference cycle between the app
+ # and the view function (see #3761).
+ self_ref = weakref.ref(self)
+ self.add_url_rule(
+ f"{self.static_url_path}/",
+ endpoint="static",
+ host=static_host,
+ view_func=lambda **kw: self_ref().send_static_file(**kw), # type: ignore # noqa: B950
+ )
+
+ def get_send_file_max_age(self, filename: str | None) -> int | None:
+ """Used by :func:`send_file` to determine the ``max_age`` cache
+ value for a given file path if it wasn't passed.
+
+ By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
+ the configuration of :data:`~flask.current_app`. This defaults
+ to ``None``, which tells the browser to use conditional requests
+ instead of a timed cache, which is usually preferable.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionchanged:: 2.0
+ The default configuration is ``None`` instead of 12 hours.
+
+ .. versionadded:: 0.9
+ """
+ value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"]
+
+ if value is None:
+ return None
+
+ if isinstance(value, timedelta):
+ return int(value.total_seconds())
+
+ return value # type: ignore[no-any-return]
+
+ def send_static_file(self, filename: str) -> Response:
+ """The view function used to serve files from
+ :attr:`static_folder`. A route is automatically registered for
+ this view at :attr:`static_url_path` if :attr:`static_folder` is
+ set.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionadded:: 0.5
+
+ """
+ if not self.has_static_folder:
+ raise RuntimeError("'static_folder' must be set to serve static_files.")
+
+ # send_file only knows to call get_send_file_max_age on the app,
+ # call it here so it works for blueprints too.
+ max_age = self.get_send_file_max_age(filename)
+ return send_from_directory(
+ t.cast(str, self.static_folder), filename, max_age=max_age
+ )
+
+ def open_resource(
+ self, resource: str, mode: str = "rb", encoding: str | None = None
+ ) -> t.IO[t.AnyStr]:
+ """Open a resource file relative to :attr:`root_path` for reading.
+
+ For example, if the file ``schema.sql`` is next to the file
+ ``app.py`` where the ``Flask`` app is defined, it can be opened
+ with:
+
+ .. code-block:: python
+
+ with app.open_resource("schema.sql") as f:
+ conn.executescript(f.read())
+
+ :param resource: Path to the resource relative to :attr:`root_path`.
+ :param mode: Open the file in this mode. Only reading is supported,
+ valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
+ :param encoding: Open the file with this encoding when opening in text
+ mode. This is ignored when opening in binary mode.
+
+ .. versionchanged:: 3.1
+ Added the ``encoding`` parameter.
+ """
+ if mode not in {"r", "rt", "rb"}:
+ raise ValueError("Resources can only be opened for reading.")
+
+ path = os.path.join(self.root_path, resource)
+
+ if mode == "rb":
+ return open(path, mode) # pyright: ignore
+
+ return open(path, mode, encoding=encoding)
+
+ def open_instance_resource(
+ self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
+ ) -> t.IO[t.AnyStr]:
+ """Open a resource file relative to the application's instance folder
+ :attr:`instance_path`. Unlike :meth:`open_resource`, files in the
+ instance folder can be opened for writing.
+
+ :param resource: Path to the resource relative to :attr:`instance_path`.
+ :param mode: Open the file in this mode.
+ :param encoding: Open the file with this encoding when opening in text
+ mode. This is ignored when opening in binary mode.
+
+ .. versionchanged:: 3.1
+ Added the ``encoding`` parameter.
+ """
+ path = os.path.join(self.instance_path, resource)
+
+ if "b" in mode:
+ return open(path, mode)
+
+ return open(path, mode, encoding=encoding)
+
+ def create_jinja_environment(self) -> Environment:
+ """Create the Jinja environment based on :attr:`jinja_options`
+ and the various Jinja-related methods of the app. Changing
+ :attr:`jinja_options` after this will have no effect. Also adds
+ Flask-related globals and filters to the environment.
+
+ .. versionchanged:: 0.11
+ ``Environment.auto_reload`` set in accordance with
+ ``TEMPLATES_AUTO_RELOAD`` configuration option.
+
+ .. versionadded:: 0.5
+ """
+ options = dict(self.jinja_options)
+
+ if "autoescape" not in options:
+ options["autoescape"] = self.select_jinja_autoescape
+
+ if "auto_reload" not in options:
+ auto_reload = self.config["TEMPLATES_AUTO_RELOAD"]
+
+ if auto_reload is None:
+ auto_reload = self.debug
+
+ options["auto_reload"] = auto_reload
+
+ rv = self.jinja_environment(self, **options)
+ rv.globals.update(
+ url_for=self.url_for,
+ get_flashed_messages=get_flashed_messages,
+ config=self.config,
+ # request, session and g are normally added with the
+ # context processor for efficiency reasons but for imported
+ # templates we also want the proxies in there.
+ request=request,
+ session=session,
+ g=g,
+ )
+ rv.policies["json.dumps_function"] = self.json.dumps
+ return rv
+
+ def create_url_adapter(self, request: Request | None) -> MapAdapter | None:
+ """Creates a URL adapter for the given request. The URL adapter
+ is created at a point where the request context is not yet set
+ up so the request is passed explicitly.
+
+ .. versionchanged:: 3.1
+ If :data:`SERVER_NAME` is set, it does not restrict requests to
+ only that domain, for both ``subdomain_matching`` and
+ ``host_matching``.
+
+ .. versionchanged:: 1.0
+ :data:`SERVER_NAME` no longer implicitly enables subdomain
+ matching. Use :attr:`subdomain_matching` instead.
+
+ .. versionchanged:: 0.9
+ This can be called outside a request when the URL adapter is created
+ for an application context.
+
+ .. versionadded:: 0.6
+ """
+ if request is not None:
+ if (trusted_hosts := self.config["TRUSTED_HOSTS"]) is not None:
+ request.trusted_hosts = trusted_hosts
+
+ # Check trusted_hosts here until bind_to_environ does.
+ request.host = get_host(request.environ, request.trusted_hosts) # pyright: ignore
+ subdomain = None
+ server_name = self.config["SERVER_NAME"]
+
+ if self.url_map.host_matching:
+ # Don't pass SERVER_NAME, otherwise it's used and the actual
+ # host is ignored, which breaks host matching.
+ server_name = None
+ elif not self.subdomain_matching:
+ # Werkzeug doesn't implement subdomain matching yet. Until then,
+ # disable it by forcing the current subdomain to the default, or
+ # the empty string.
+ subdomain = self.url_map.default_subdomain or ""
+
+ return self.url_map.bind_to_environ(
+ request.environ, server_name=server_name, subdomain=subdomain
+ )
+
+ # Need at least SERVER_NAME to match/build outside a request.
+ if self.config["SERVER_NAME"] is not None:
+ return self.url_map.bind(
+ self.config["SERVER_NAME"],
+ script_name=self.config["APPLICATION_ROOT"],
+ url_scheme=self.config["PREFERRED_URL_SCHEME"],
+ )
+
+ return None
+
+ def raise_routing_exception(self, request: Request) -> t.NoReturn:
+ """Intercept routing exceptions and possibly do something else.
+
+ In debug mode, intercept a routing redirect and replace it with
+ an error if the body will be discarded.
+
+ With modern Werkzeug this shouldn't occur, since it now uses a
+ 308 status which tells the browser to resend the method and
+ body.
+
+ .. versionchanged:: 2.1
+ Don't intercept 307 and 308 redirects.
+
+ :meta private:
+ :internal:
+ """
+ if (
+ not self.debug
+ or not isinstance(request.routing_exception, RequestRedirect)
+ or request.routing_exception.code in {307, 308}
+ or request.method in {"GET", "HEAD", "OPTIONS"}
+ ):
+ raise request.routing_exception # type: ignore[misc]
+
+ from .debughelpers import FormDataRoutingRedirect
+
+ raise FormDataRoutingRedirect(request)
+
+ def update_template_context(self, context: dict[str, t.Any]) -> None:
+ """Update the template context with some commonly used variables.
+ This injects request, session, config and g into the template
+ context as well as everything template context processors want
+ to inject. Note that the as of Flask 0.6, the original values
+ in the context will not be overridden if a context processor
+ decides to return a value with the same key.
+
+ :param context: the context as a dictionary that is updated in place
+ to add extra variables.
+ """
+ names: t.Iterable[str | None] = (None,)
+
+ # A template may be rendered outside a request context.
+ if request:
+ names = chain(names, reversed(request.blueprints))
+
+ # The values passed to render_template take precedence. Keep a
+ # copy to re-apply after all context functions.
+ orig_ctx = context.copy()
+
+ for name in names:
+ if name in self.template_context_processors:
+ for func in self.template_context_processors[name]:
+ context.update(self.ensure_sync(func)())
+
+ context.update(orig_ctx)
+
+ def make_shell_context(self) -> dict[str, t.Any]:
+ """Returns the shell context for an interactive shell for this
+ application. This runs all the registered shell context
+ processors.
+
+ .. versionadded:: 0.11
+ """
+ rv = {"app": self, "g": g}
+ for processor in self.shell_context_processors:
+ rv.update(processor())
+ return rv
+
+ def run(
+ self,
+ host: str | None = None,
+ port: int | None = None,
+ debug: bool | None = None,
+ load_dotenv: bool = True,
+ **options: t.Any,
+ ) -> None:
+ """Runs the application on a local development server.
+
+ Do not use ``run()`` in a production setting. It is not intended to
+ meet security and performance requirements for a production server.
+ Instead, see :doc:`/deploying/index` for WSGI server recommendations.
+
+ If the :attr:`debug` flag is set the server will automatically reload
+ for code changes and show a debugger in case an exception happened.
+
+ If you want to run the application in debug mode, but disable the
+ code execution on the interactive debugger, you can pass
+ ``use_evalex=False`` as parameter. This will keep the debugger's
+ traceback screen active, but disable code execution.
+
+ It is not recommended to use this function for development with
+ automatic reloading as this is badly supported. Instead you should
+ be using the :command:`flask` command line script's ``run`` support.
+
+ .. admonition:: Keep in Mind
+
+ Flask will suppress any server error with a generic error page
+ unless it is in debug mode. As such to enable just the
+ interactive debugger without the code reloading, you have to
+ invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``.
+ Setting ``use_debugger`` to ``True`` without being in debug mode
+ won't catch any exceptions because there won't be any to
+ catch.
+
+ :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to
+ have the server available externally as well. Defaults to
+ ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable
+ if present.
+ :param port: the port of the webserver. Defaults to ``5000`` or the
+ port defined in the ``SERVER_NAME`` config variable if present.
+ :param debug: if given, enable or disable debug mode. See
+ :attr:`debug`.
+ :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
+ files to set environment variables. Will also change the working
+ directory to the directory containing the first file found.
+ :param options: the options to be forwarded to the underlying Werkzeug
+ server. See :func:`werkzeug.serving.run_simple` for more
+ information.
+
+ .. versionchanged:: 1.0
+ If installed, python-dotenv will be used to load environment
+ variables from :file:`.env` and :file:`.flaskenv` files.
+
+ The :envvar:`FLASK_DEBUG` environment variable will override :attr:`debug`.
+
+ Threaded mode is enabled by default.
+
+ .. versionchanged:: 0.10
+ The default port is now picked from the ``SERVER_NAME``
+ variable.
+ """
+ # Ignore this call so that it doesn't start another server if
+ # the 'flask run' command is used.
+ if os.environ.get("FLASK_RUN_FROM_CLI") == "true":
+ if not is_running_from_reloader():
+ click.secho(
+ " * Ignoring a call to 'app.run()' that would block"
+ " the current 'flask' CLI command.\n"
+ " Only call 'app.run()' in an 'if __name__ =="
+ ' "__main__"\' guard.',
+ fg="red",
+ )
+
+ return
+
+ if get_load_dotenv(load_dotenv):
+ cli.load_dotenv()
+
+ # if set, env var overrides existing value
+ if "FLASK_DEBUG" in os.environ:
+ self.debug = get_debug_flag()
+
+ # debug passed to method overrides all other sources
+ if debug is not None:
+ self.debug = bool(debug)
+
+ server_name = self.config.get("SERVER_NAME")
+ sn_host = sn_port = None
+
+ if server_name:
+ sn_host, _, sn_port = server_name.partition(":")
+
+ if not host:
+ if sn_host:
+ host = sn_host
+ else:
+ host = "127.0.0.1"
+
+ if port or port == 0:
+ port = int(port)
+ elif sn_port:
+ port = int(sn_port)
+ else:
+ port = 5000
+
+ options.setdefault("use_reloader", self.debug)
+ options.setdefault("use_debugger", self.debug)
+ options.setdefault("threaded", True)
+
+ cli.show_server_banner(self.debug, self.name)
+
+ from werkzeug.serving import run_simple
+
+ try:
+ run_simple(t.cast(str, host), port, self, **options)
+ finally:
+ # reset the first request information if the development server
+ # reset normally. This makes it possible to restart the server
+ # without reloader and that stuff from an interactive shell.
+ self._got_first_request = False
+
+ def test_client(self, use_cookies: bool = True, **kwargs: t.Any) -> FlaskClient:
+ """Creates a test client for this application. For information
+ about unit testing head over to :doc:`/testing`.
+
+ Note that if you are testing for assertions or exceptions in your
+ application code, you must set ``app.testing = True`` in order for the
+ exceptions to propagate to the test client. Otherwise, the exception
+ will be handled by the application (not visible to the test client) and
+ the only indication of an AssertionError or other exception will be a
+ 500 status code response to the test client. See the :attr:`testing`
+ attribute. For example::
+
+ app.testing = True
+ client = app.test_client()
+
+ The test client can be used in a ``with`` block to defer the closing down
+ of the context until the end of the ``with`` block. This is useful if
+ you want to access the context locals for testing::
+
+ with app.test_client() as c:
+ rv = c.get('/?vodka=42')
+ assert request.args['vodka'] == '42'
+
+ Additionally, you may pass optional keyword arguments that will then
+ be passed to the application's :attr:`test_client_class` constructor.
+ For example::
+
+ from flask.testing import FlaskClient
+
+ class CustomClient(FlaskClient):
+ def __init__(self, *args, **kwargs):
+ self._authentication = kwargs.pop("authentication")
+ super(CustomClient,self).__init__( *args, **kwargs)
+
+ app.test_client_class = CustomClient
+ client = app.test_client(authentication='Basic ....')
+
+ See :class:`~flask.testing.FlaskClient` for more information.
+
+ .. versionchanged:: 0.4
+ added support for ``with`` block usage for the client.
+
+ .. versionadded:: 0.7
+ The `use_cookies` parameter was added as well as the ability
+ to override the client to be used by setting the
+ :attr:`test_client_class` attribute.
+
+ .. versionchanged:: 0.11
+ Added `**kwargs` to support passing additional keyword arguments to
+ the constructor of :attr:`test_client_class`.
+ """
+ cls = self.test_client_class
+ if cls is None:
+ from .testing import FlaskClient as cls
+ return cls( # type: ignore
+ self, self.response_class, use_cookies=use_cookies, **kwargs
+ )
+
+ def test_cli_runner(self, **kwargs: t.Any) -> FlaskCliRunner:
+ """Create a CLI runner for testing CLI commands.
+ See :ref:`testing-cli`.
+
+ Returns an instance of :attr:`test_cli_runner_class`, by default
+ :class:`~flask.testing.FlaskCliRunner`. The Flask app object is
+ passed as the first argument.
+
+ .. versionadded:: 1.0
+ """
+ cls = self.test_cli_runner_class
+
+ if cls is None:
+ from .testing import FlaskCliRunner as cls
+
+ return cls(self, **kwargs) # type: ignore
+
+ def handle_http_exception(
+ self, e: HTTPException
+ ) -> HTTPException | ft.ResponseReturnValue:
+ """Handles an HTTP exception. By default this will invoke the
+ registered error handlers and fall back to returning the
+ exception as response.
+
+ .. versionchanged:: 1.0.3
+ ``RoutingException``, used internally for actions such as
+ slash redirects during routing, is not passed to error
+ handlers.
+
+ .. versionchanged:: 1.0
+ Exceptions are looked up by code *and* by MRO, so
+ ``HTTPException`` subclasses can be handled with a catch-all
+ handler for the base ``HTTPException``.
+
+ .. versionadded:: 0.3
+ """
+ # Proxy exceptions don't have error codes. We want to always return
+ # those unchanged as errors
+ if e.code is None:
+ return e
+
+ # RoutingExceptions are used internally to trigger routing
+ # actions, such as slash redirects raising RequestRedirect. They
+ # are not raised or handled in user code.
+ if isinstance(e, RoutingException):
+ return e
+
+ handler = self._find_error_handler(e, request.blueprints)
+ if handler is None:
+ return e
+ return self.ensure_sync(handler)(e) # type: ignore[no-any-return]
+
+ def handle_user_exception(
+ self, e: Exception
+ ) -> HTTPException | ft.ResponseReturnValue:
+ """This method is called whenever an exception occurs that
+ should be handled. A special case is :class:`~werkzeug
+ .exceptions.HTTPException` which is forwarded to the
+ :meth:`handle_http_exception` method. This function will either
+ return a response value or reraise the exception with the same
+ traceback.
+
+ .. versionchanged:: 1.0
+ Key errors raised from request data like ``form`` show the
+ bad key in debug mode rather than a generic bad request
+ message.
+
+ .. versionadded:: 0.7
+ """
+ if isinstance(e, BadRequestKeyError) and (
+ self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"]
+ ):
+ e.show_exception = True
+
+ if isinstance(e, HTTPException) and not self.trap_http_exception(e):
+ return self.handle_http_exception(e)
+
+ handler = self._find_error_handler(e, request.blueprints)
+
+ if handler is None:
+ raise
+
+ return self.ensure_sync(handler)(e) # type: ignore[no-any-return]
+
+ def handle_exception(self, e: Exception) -> Response:
+ """Handle an exception that did not have an error handler
+ associated with it, or that was raised from an error handler.
+ This always causes a 500 ``InternalServerError``.
+
+ Always sends the :data:`got_request_exception` signal.
+
+ If :data:`PROPAGATE_EXCEPTIONS` is ``True``, such as in debug
+ mode, the error will be re-raised so that the debugger can
+ display it. Otherwise, the original exception is logged, and
+ an :exc:`~werkzeug.exceptions.InternalServerError` is returned.
+
+ If an error handler is registered for ``InternalServerError`` or
+ ``500``, it will be used. For consistency, the handler will
+ always receive the ``InternalServerError``. The original
+ unhandled exception is available as ``e.original_exception``.
+
+ .. versionchanged:: 1.1.0
+ Always passes the ``InternalServerError`` instance to the
+ handler, setting ``original_exception`` to the unhandled
+ error.
+
+ .. versionchanged:: 1.1.0
+ ``after_request`` functions and other finalization is done
+ even for the default 500 response when there is no handler.
+
+ .. versionadded:: 0.3
+ """
+ exc_info = sys.exc_info()
+ got_request_exception.send(self, _async_wrapper=self.ensure_sync, exception=e)
+ propagate = self.config["PROPAGATE_EXCEPTIONS"]
+
+ if propagate is None:
+ propagate = self.testing or self.debug
+
+ if propagate:
+ # Re-raise if called with an active exception, otherwise
+ # raise the passed in exception.
+ if exc_info[1] is e:
+ raise
+
+ raise e
+
+ self.log_exception(exc_info)
+ server_error: InternalServerError | ft.ResponseReturnValue
+ server_error = InternalServerError(original_exception=e)
+ handler = self._find_error_handler(server_error, request.blueprints)
+
+ if handler is not None:
+ server_error = self.ensure_sync(handler)(server_error)
+
+ return self.finalize_request(server_error, from_error_handler=True)
+
+ def log_exception(
+ self,
+ exc_info: (tuple[type, BaseException, TracebackType] | tuple[None, None, None]),
+ ) -> None:
+ """Logs an exception. This is called by :meth:`handle_exception`
+ if debugging is disabled and right before the handler is called.
+ The default implementation logs the exception as error on the
+ :attr:`logger`.
+
+ .. versionadded:: 0.8
+ """
+ self.logger.error(
+ f"Exception on {request.path} [{request.method}]", exc_info=exc_info
+ )
+
+ def dispatch_request(self) -> ft.ResponseReturnValue:
+ """Does the request dispatching. Matches the URL and returns the
+ return value of the view or error handler. This does not have to
+ be a response object. In order to convert the return value to a
+ proper response object, call :func:`make_response`.
+
+ .. versionchanged:: 0.7
+ This no longer does the exception handling, this code was
+ moved to the new :meth:`full_dispatch_request`.
+ """
+ req = request_ctx.request
+ if req.routing_exception is not None:
+ self.raise_routing_exception(req)
+ rule: Rule = req.url_rule # type: ignore[assignment]
+ # if we provide automatic options for this URL and the
+ # request came with the OPTIONS method, reply automatically
+ if (
+ getattr(rule, "provide_automatic_options", False)
+ and req.method == "OPTIONS"
+ ):
+ return self.make_default_options_response()
+ # otherwise dispatch to the handler for that endpoint
+ view_args: dict[str, t.Any] = req.view_args # type: ignore[assignment]
+ return self.ensure_sync(self.view_functions[rule.endpoint])(**view_args) # type: ignore[no-any-return]
+
+ def full_dispatch_request(self) -> Response:
+ """Dispatches the request and on top of that performs request
+ pre and postprocessing as well as HTTP exception catching and
+ error handling.
+
+ .. versionadded:: 0.7
+ """
+ self._got_first_request = True
+
+ try:
+ request_started.send(self, _async_wrapper=self.ensure_sync)
+ rv = self.preprocess_request()
+ if rv is None:
+ rv = self.dispatch_request()
+ except Exception as e:
+ rv = self.handle_user_exception(e)
+ return self.finalize_request(rv)
+
+ def finalize_request(
+ self,
+ rv: ft.ResponseReturnValue | HTTPException,
+ from_error_handler: bool = False,
+ ) -> Response:
+ """Given the return value from a view function this finalizes
+ the request by converting it into a response and invoking the
+ postprocessing functions. This is invoked for both normal
+ request dispatching as well as error handlers.
+
+ Because this means that it might be called as a result of a
+ failure a special safe mode is available which can be enabled
+ with the `from_error_handler` flag. If enabled, failures in
+ response processing will be logged and otherwise ignored.
+
+ :internal:
+ """
+ response = self.make_response(rv)
+ try:
+ response = self.process_response(response)
+ request_finished.send(
+ self, _async_wrapper=self.ensure_sync, response=response
+ )
+ except Exception:
+ if not from_error_handler:
+ raise
+ self.logger.exception(
+ "Request finalizing failed with an error while handling an error"
+ )
+ return response
+
+ def make_default_options_response(self) -> Response:
+ """This method is called to create the default ``OPTIONS`` response.
+ This can be changed through subclassing to change the default
+ behavior of ``OPTIONS`` responses.
+
+ .. versionadded:: 0.7
+ """
+ adapter = request_ctx.url_adapter
+ methods = adapter.allowed_methods() # type: ignore[union-attr]
+ rv = self.response_class()
+ rv.allow.update(methods)
+ return rv
+
+ def ensure_sync(self, func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]:
+ """Ensure that the function is synchronous for WSGI workers.
+ Plain ``def`` functions are returned as-is. ``async def``
+ functions are wrapped to run and wait for the response.
+
+ Override this method to change how the app runs async views.
+
+ .. versionadded:: 2.0
+ """
+ if iscoroutinefunction(func):
+ return self.async_to_sync(func)
+
+ return func
+
+ def async_to_sync(
+ self, func: t.Callable[..., t.Coroutine[t.Any, t.Any, t.Any]]
+ ) -> t.Callable[..., t.Any]:
+ """Return a sync function that will run the coroutine function.
+
+ .. code-block:: python
+
+ result = app.async_to_sync(func)(*args, **kwargs)
+
+ Override this method to change how the app converts async code
+ to be synchronously callable.
+
+ .. versionadded:: 2.0
+ """
+ try:
+ from asgiref.sync import async_to_sync as asgiref_async_to_sync
+ except ImportError:
+ raise RuntimeError(
+ "Install Flask with the 'async' extra in order to use async views."
+ ) from None
+
+ return asgiref_async_to_sync(func)
+
+ def url_for(
+ self,
+ /,
+ endpoint: str,
+ *,
+ _anchor: str | None = None,
+ _method: str | None = None,
+ _scheme: str | None = None,
+ _external: bool | None = None,
+ **values: t.Any,
+ ) -> str:
+ """Generate a URL to the given endpoint with the given values.
+
+ This is called by :func:`flask.url_for`, and can be called
+ directly as well.
+
+ An *endpoint* is the name of a URL rule, usually added with
+ :meth:`@app.route() `, and usually the same name as the
+ view function. A route defined in a :class:`~flask.Blueprint`
+ will prepend the blueprint's name separated by a ``.`` to the
+ endpoint.
+
+ In some cases, such as email messages, you want URLs to include
+ the scheme and domain, like ``https://example.com/hello``. When
+ not in an active request, URLs will be external by default, but
+ this requires setting :data:`SERVER_NAME` so Flask knows what
+ domain to use. :data:`APPLICATION_ROOT` and
+ :data:`PREFERRED_URL_SCHEME` should also be configured as
+ needed. This config is only used when not in an active request.
+
+ Functions can be decorated with :meth:`url_defaults` to modify
+ keyword arguments before the URL is built.
+
+ If building fails for some reason, such as an unknown endpoint
+ or incorrect values, the app's :meth:`handle_url_build_error`
+ method is called. If that returns a string, that is returned,
+ otherwise a :exc:`~werkzeug.routing.BuildError` is raised.
+
+ :param endpoint: The endpoint name associated with the URL to
+ generate. If this starts with a ``.``, the current blueprint
+ name (if any) will be used.
+ :param _anchor: If given, append this as ``#anchor`` to the URL.
+ :param _method: If given, generate the URL associated with this
+ method for the endpoint.
+ :param _scheme: If given, the URL will have this scheme if it
+ is external.
+ :param _external: If given, prefer the URL to be internal
+ (False) or require it to be external (True). External URLs
+ include the scheme and domain. When not in an active
+ request, URLs are external by default.
+ :param values: Values to use for the variable parts of the URL
+ rule. Unknown keys are appended as query string arguments,
+ like ``?a=b&c=d``.
+
+ .. versionadded:: 2.2
+ Moved from ``flask.url_for``, which calls this method.
+ """
+ req_ctx = _cv_request.get(None)
+
+ if req_ctx is not None:
+ url_adapter = req_ctx.url_adapter
+ blueprint_name = req_ctx.request.blueprint
+
+ # If the endpoint starts with "." and the request matches a
+ # blueprint, the endpoint is relative to the blueprint.
+ if endpoint[:1] == ".":
+ if blueprint_name is not None:
+ endpoint = f"{blueprint_name}{endpoint}"
+ else:
+ endpoint = endpoint[1:]
+
+ # When in a request, generate a URL without scheme and
+ # domain by default, unless a scheme is given.
+ if _external is None:
+ _external = _scheme is not None
+ else:
+ app_ctx = _cv_app.get(None)
+
+ # If called by helpers.url_for, an app context is active,
+ # use its url_adapter. Otherwise, app.url_for was called
+ # directly, build an adapter.
+ if app_ctx is not None:
+ url_adapter = app_ctx.url_adapter
+ else:
+ url_adapter = self.create_url_adapter(None)
+
+ if url_adapter is None:
+ raise RuntimeError(
+ "Unable to build URLs outside an active request"
+ " without 'SERVER_NAME' configured. Also configure"
+ " 'APPLICATION_ROOT' and 'PREFERRED_URL_SCHEME' as"
+ " needed."
+ )
+
+ # When outside a request, generate a URL with scheme and
+ # domain by default.
+ if _external is None:
+ _external = True
+
+ # It is an error to set _scheme when _external=False, in order
+ # to avoid accidental insecure URLs.
+ if _scheme is not None and not _external:
+ raise ValueError("When specifying '_scheme', '_external' must be True.")
+
+ self.inject_url_defaults(endpoint, values)
+
+ try:
+ rv = url_adapter.build( # type: ignore[union-attr]
+ endpoint,
+ values,
+ method=_method,
+ url_scheme=_scheme,
+ force_external=_external,
+ )
+ except BuildError as error:
+ values.update(
+ _anchor=_anchor, _method=_method, _scheme=_scheme, _external=_external
+ )
+ return self.handle_url_build_error(error, endpoint, values)
+
+ if _anchor is not None:
+ _anchor = _url_quote(_anchor, safe="%!#$&'()*+,/:;=?@")
+ rv = f"{rv}#{_anchor}"
+
+ return rv
+
+ def make_response(self, rv: ft.ResponseReturnValue) -> Response:
+ """Convert the return value from a view function to an instance of
+ :attr:`response_class`.
+
+ :param rv: the return value from the view function. The view function
+ must return a response. Returning ``None``, or the view ending
+ without returning, is not allowed. The following types are allowed
+ for ``view_rv``:
+
+ ``str``
+ A response object is created with the string encoded to UTF-8
+ as the body.
+
+ ``bytes``
+ A response object is created with the bytes as the body.
+
+ ``dict``
+ A dictionary that will be jsonify'd before being returned.
+
+ ``list``
+ A list that will be jsonify'd before being returned.
+
+ ``generator`` or ``iterator``
+ A generator that returns ``str`` or ``bytes`` to be
+ streamed as the response.
+
+ ``tuple``
+ Either ``(body, status, headers)``, ``(body, status)``, or
+ ``(body, headers)``, where ``body`` is any of the other types
+ allowed here, ``status`` is a string or an integer, and
+ ``headers`` is a dictionary or a list of ``(key, value)``
+ tuples. If ``body`` is a :attr:`response_class` instance,
+ ``status`` overwrites the exiting value and ``headers`` are
+ extended.
+
+ :attr:`response_class`
+ The object is returned unchanged.
+
+ other :class:`~werkzeug.wrappers.Response` class
+ The object is coerced to :attr:`response_class`.
+
+ :func:`callable`
+ The function is called as a WSGI application. The result is
+ used to create a response object.
+
+ .. versionchanged:: 2.2
+ A generator will be converted to a streaming response.
+ A list will be converted to a JSON response.
+
+ .. versionchanged:: 1.1
+ A dict will be converted to a JSON response.
+
+ .. versionchanged:: 0.9
+ Previously a tuple was interpreted as the arguments for the
+ response object.
+ """
+
+ status: int | None = None
+ headers: HeadersValue | None = None
+
+ # unpack tuple returns
+ if isinstance(rv, tuple):
+ len_rv = len(rv)
+
+ # a 3-tuple is unpacked directly
+ if len_rv == 3:
+ rv, status, headers = rv # type: ignore[misc]
+ # decide if a 2-tuple has status or headers
+ elif len_rv == 2:
+ if isinstance(rv[1], (Headers, dict, tuple, list)):
+ rv, headers = rv # pyright: ignore
+ else:
+ rv, status = rv # type: ignore[assignment,misc]
+ # other sized tuples are not allowed
+ else:
+ raise TypeError(
+ "The view function did not return a valid response tuple."
+ " The tuple must have the form (body, status, headers),"
+ " (body, status), or (body, headers)."
+ )
+
+ # the body must not be None
+ if rv is None:
+ raise TypeError(
+ f"The view function for {request.endpoint!r} did not"
+ " return a valid response. The function either returned"
+ " None or ended without a return statement."
+ )
+
+ # make sure the body is an instance of the response class
+ if not isinstance(rv, self.response_class):
+ if isinstance(rv, (str, bytes, bytearray)) or isinstance(rv, cabc.Iterator):
+ # let the response class set the status and headers instead of
+ # waiting to do it manually, so that the class can handle any
+ # special logic
+ rv = self.response_class(
+ rv, # pyright: ignore
+ status=status,
+ headers=headers, # type: ignore[arg-type]
+ )
+ status = headers = None
+ elif isinstance(rv, (dict, list)):
+ rv = self.json.response(rv)
+ elif isinstance(rv, BaseResponse) or callable(rv):
+ # evaluate a WSGI callable, or coerce a different response
+ # class to the correct type
+ try:
+ rv = self.response_class.force_type(
+ rv, # type: ignore[arg-type]
+ request.environ,
+ )
+ except TypeError as e:
+ raise TypeError(
+ f"{e}\nThe view function did not return a valid"
+ " response. The return type must be a string,"
+ " dict, list, tuple with headers or status,"
+ " Response instance, or WSGI callable, but it"
+ f" was a {type(rv).__name__}."
+ ).with_traceback(sys.exc_info()[2]) from None
+ else:
+ raise TypeError(
+ "The view function did not return a valid"
+ " response. The return type must be a string,"
+ " dict, list, tuple with headers or status,"
+ " Response instance, or WSGI callable, but it was a"
+ f" {type(rv).__name__}."
+ )
+
+ rv = t.cast(Response, rv)
+ # prefer the status if it was provided
+ if status is not None:
+ if isinstance(status, (str, bytes, bytearray)):
+ rv.status = status
+ else:
+ rv.status_code = status
+
+ # extend existing headers with provided headers
+ if headers:
+ rv.headers.update(headers)
+
+ return rv
+
+ def preprocess_request(self) -> ft.ResponseReturnValue | None:
+ """Called before the request is dispatched. Calls
+ :attr:`url_value_preprocessors` registered with the app and the
+ current blueprint (if any). Then calls :attr:`before_request_funcs`
+ registered with the app and the blueprint.
+
+ If any :meth:`before_request` handler returns a non-None value, the
+ value is handled as if it was the return value from the view, and
+ further request handling is stopped.
+ """
+ names = (None, *reversed(request.blueprints))
+
+ for name in names:
+ if name in self.url_value_preprocessors:
+ for url_func in self.url_value_preprocessors[name]:
+ url_func(request.endpoint, request.view_args)
+
+ for name in names:
+ if name in self.before_request_funcs:
+ for before_func in self.before_request_funcs[name]:
+ rv = self.ensure_sync(before_func)()
+
+ if rv is not None:
+ return rv # type: ignore[no-any-return]
+
+ return None
+
+ def process_response(self, response: Response) -> Response:
+ """Can be overridden in order to modify the response object
+ before it's sent to the WSGI server. By default this will
+ call all the :meth:`after_request` decorated functions.
+
+ .. versionchanged:: 0.5
+ As of Flask 0.5 the functions registered for after request
+ execution are called in reverse order of registration.
+
+ :param response: a :attr:`response_class` object.
+ :return: a new response object or the same, has to be an
+ instance of :attr:`response_class`.
+ """
+ ctx = request_ctx._get_current_object() # type: ignore[attr-defined]
+
+ for func in ctx._after_request_functions:
+ response = self.ensure_sync(func)(response)
+
+ for name in chain(request.blueprints, (None,)):
+ if name in self.after_request_funcs:
+ for func in reversed(self.after_request_funcs[name]):
+ response = self.ensure_sync(func)(response)
+
+ if not self.session_interface.is_null_session(ctx.session):
+ self.session_interface.save_session(self, ctx.session, response)
+
+ return response
+
+ def do_teardown_request(
+ self,
+ exc: BaseException | None = _sentinel, # type: ignore[assignment]
+ ) -> None:
+ """Called after the request is dispatched and the response is
+ returned, right before the request context is popped.
+
+ This calls all functions decorated with
+ :meth:`teardown_request`, and :meth:`Blueprint.teardown_request`
+ if a blueprint handled the request. Finally, the
+ :data:`request_tearing_down` signal is sent.
+
+ This is called by
+ :meth:`RequestContext.pop() `,
+ which may be delayed during testing to maintain access to
+ resources.
+
+ :param exc: An unhandled exception raised while dispatching the
+ request. Detected from the current exception information if
+ not passed. Passed to each teardown function.
+
+ .. versionchanged:: 0.9
+ Added the ``exc`` argument.
+ """
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+
+ for name in chain(request.blueprints, (None,)):
+ if name in self.teardown_request_funcs:
+ for func in reversed(self.teardown_request_funcs[name]):
+ self.ensure_sync(func)(exc)
+
+ request_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
+
+ def do_teardown_appcontext(
+ self,
+ exc: BaseException | None = _sentinel, # type: ignore[assignment]
+ ) -> None:
+ """Called right before the application context is popped.
+
+ When handling a request, the application context is popped
+ after the request context. See :meth:`do_teardown_request`.
+
+ This calls all functions decorated with
+ :meth:`teardown_appcontext`. Then the
+ :data:`appcontext_tearing_down` signal is sent.
+
+ This is called by
+ :meth:`AppContext.pop() `.
+
+ .. versionadded:: 0.9
+ """
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+
+ for func in reversed(self.teardown_appcontext_funcs):
+ self.ensure_sync(func)(exc)
+
+ appcontext_tearing_down.send(self, _async_wrapper=self.ensure_sync, exc=exc)
+
+ def app_context(self) -> AppContext:
+ """Create an :class:`~flask.ctx.AppContext`. Use as a ``with``
+ block to push the context, which will make :data:`current_app`
+ point at this application.
+
+ An application context is automatically pushed by
+ :meth:`RequestContext.push() `
+ when handling a request, and when running a CLI command. Use
+ this to manually create a context outside of these situations.
+
+ ::
+
+ with app.app_context():
+ init_db()
+
+ See :doc:`/appcontext`.
+
+ .. versionadded:: 0.9
+ """
+ return AppContext(self)
+
+ def request_context(self, environ: WSGIEnvironment) -> RequestContext:
+ """Create a :class:`~flask.ctx.RequestContext` representing a
+ WSGI environment. Use a ``with`` block to push the context,
+ which will make :data:`request` point at this request.
+
+ See :doc:`/reqcontext`.
+
+ Typically you should not call this from your own code. A request
+ context is automatically pushed by the :meth:`wsgi_app` when
+ handling a request. Use :meth:`test_request_context` to create
+ an environment and context instead of this method.
+
+ :param environ: a WSGI environment
+ """
+ return RequestContext(self, environ)
+
+ def test_request_context(self, *args: t.Any, **kwargs: t.Any) -> RequestContext:
+ """Create a :class:`~flask.ctx.RequestContext` for a WSGI
+ environment created from the given values. This is mostly useful
+ during testing, where you may want to run a function that uses
+ request data without dispatching a full request.
+
+ See :doc:`/reqcontext`.
+
+ Use a ``with`` block to push the context, which will make
+ :data:`request` point at the request for the created
+ environment. ::
+
+ with app.test_request_context(...):
+ generate_report()
+
+ When using the shell, it may be easier to push and pop the
+ context manually to avoid indentation. ::
+
+ ctx = app.test_request_context(...)
+ ctx.push()
+ ...
+ ctx.pop()
+
+ Takes the same arguments as Werkzeug's
+ :class:`~werkzeug.test.EnvironBuilder`, with some defaults from
+ the application. See the linked Werkzeug docs for most of the
+ available arguments. Flask-specific behavior is listed here.
+
+ :param path: URL path being requested.
+ :param base_url: Base URL where the app is being served, which
+ ``path`` is relative to. If not given, built from
+ :data:`PREFERRED_URL_SCHEME`, ``subdomain``,
+ :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
+ :param subdomain: Subdomain name to append to
+ :data:`SERVER_NAME`.
+ :param url_scheme: Scheme to use instead of
+ :data:`PREFERRED_URL_SCHEME`.
+ :param data: The request body, either as a string or a dict of
+ form keys and values.
+ :param json: If given, this is serialized as JSON and passed as
+ ``data``. Also defaults ``content_type`` to
+ ``application/json``.
+ :param args: other positional arguments passed to
+ :class:`~werkzeug.test.EnvironBuilder`.
+ :param kwargs: other keyword arguments passed to
+ :class:`~werkzeug.test.EnvironBuilder`.
+ """
+ from .testing import EnvironBuilder
+
+ builder = EnvironBuilder(self, *args, **kwargs)
+
+ try:
+ return self.request_context(builder.get_environ())
+ finally:
+ builder.close()
+
+ def wsgi_app(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> cabc.Iterable[bytes]:
+ """The actual WSGI application. This is not implemented in
+ :meth:`__call__` so that middlewares can be applied without
+ losing a reference to the app object. Instead of doing this::
+
+ app = MyMiddleware(app)
+
+ It's a better idea to do this instead::
+
+ app.wsgi_app = MyMiddleware(app.wsgi_app)
+
+ Then you still have the original application object around and
+ can continue to call methods on it.
+
+ .. versionchanged:: 0.7
+ Teardown events for the request and app contexts are called
+ even if an unhandled error occurs. Other events may not be
+ called depending on when an error occurs during dispatch.
+ See :ref:`callbacks-and-errors`.
+
+ :param environ: A WSGI environment.
+ :param start_response: A callable accepting a status code,
+ a list of headers, and an optional exception context to
+ start the response.
+ """
+ ctx = self.request_context(environ)
+ error: BaseException | None = None
+ try:
+ try:
+ ctx.push()
+ response = self.full_dispatch_request()
+ except Exception as e:
+ error = e
+ response = self.handle_exception(e)
+ except: # noqa: B001
+ error = sys.exc_info()[1]
+ raise
+ return response(environ, start_response)
+ finally:
+ if "werkzeug.debug.preserve_context" in environ:
+ environ["werkzeug.debug.preserve_context"](_cv_app.get())
+ environ["werkzeug.debug.preserve_context"](_cv_request.get())
+
+ if error is not None and self.should_ignore_error(error):
+ error = None
+
+ ctx.pop(error)
+
+ def __call__(
+ self, environ: WSGIEnvironment, start_response: StartResponse
+ ) -> cabc.Iterable[bytes]:
+ """The WSGI server calls the Flask application object as the
+ WSGI application. This calls :meth:`wsgi_app`, which can be
+ wrapped to apply middleware.
+ """
+ return self.wsgi_app(environ, start_response)
diff --git a/tapdown/lib/python3.11/site-packages/flask/blueprints.py b/tapdown/lib/python3.11/site-packages/flask/blueprints.py
new file mode 100644
index 0000000..b6d4e43
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/blueprints.py
@@ -0,0 +1,128 @@
+from __future__ import annotations
+
+import os
+import typing as t
+from datetime import timedelta
+
+from .cli import AppGroup
+from .globals import current_app
+from .helpers import send_from_directory
+from .sansio.blueprints import Blueprint as SansioBlueprint
+from .sansio.blueprints import BlueprintSetupState as BlueprintSetupState # noqa
+from .sansio.scaffold import _sentinel
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from .wrappers import Response
+
+
+class Blueprint(SansioBlueprint):
+ def __init__(
+ self,
+ name: str,
+ import_name: str,
+ static_folder: str | os.PathLike[str] | None = None,
+ static_url_path: str | None = None,
+ template_folder: str | os.PathLike[str] | None = None,
+ url_prefix: str | None = None,
+ subdomain: str | None = None,
+ url_defaults: dict[str, t.Any] | None = None,
+ root_path: str | None = None,
+ cli_group: str | None = _sentinel, # type: ignore
+ ) -> None:
+ super().__init__(
+ name,
+ import_name,
+ static_folder,
+ static_url_path,
+ template_folder,
+ url_prefix,
+ subdomain,
+ url_defaults,
+ root_path,
+ cli_group,
+ )
+
+ #: The Click command group for registering CLI commands for this
+ #: object. The commands are available from the ``flask`` command
+ #: once the application has been discovered and blueprints have
+ #: been registered.
+ self.cli = AppGroup()
+
+ # Set the name of the Click group in case someone wants to add
+ # the app's commands to another CLI tool.
+ self.cli.name = self.name
+
+ def get_send_file_max_age(self, filename: str | None) -> int | None:
+ """Used by :func:`send_file` to determine the ``max_age`` cache
+ value for a given file path if it wasn't passed.
+
+ By default, this returns :data:`SEND_FILE_MAX_AGE_DEFAULT` from
+ the configuration of :data:`~flask.current_app`. This defaults
+ to ``None``, which tells the browser to use conditional requests
+ instead of a timed cache, which is usually preferable.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionchanged:: 2.0
+ The default configuration is ``None`` instead of 12 hours.
+
+ .. versionadded:: 0.9
+ """
+ value = current_app.config["SEND_FILE_MAX_AGE_DEFAULT"]
+
+ if value is None:
+ return None
+
+ if isinstance(value, timedelta):
+ return int(value.total_seconds())
+
+ return value # type: ignore[no-any-return]
+
+ def send_static_file(self, filename: str) -> Response:
+ """The view function used to serve files from
+ :attr:`static_folder`. A route is automatically registered for
+ this view at :attr:`static_url_path` if :attr:`static_folder` is
+ set.
+
+ Note this is a duplicate of the same method in the Flask
+ class.
+
+ .. versionadded:: 0.5
+
+ """
+ if not self.has_static_folder:
+ raise RuntimeError("'static_folder' must be set to serve static_files.")
+
+ # send_file only knows to call get_send_file_max_age on the app,
+ # call it here so it works for blueprints too.
+ max_age = self.get_send_file_max_age(filename)
+ return send_from_directory(
+ t.cast(str, self.static_folder), filename, max_age=max_age
+ )
+
+ def open_resource(
+ self, resource: str, mode: str = "rb", encoding: str | None = "utf-8"
+ ) -> t.IO[t.AnyStr]:
+ """Open a resource file relative to :attr:`root_path` for reading. The
+ blueprint-relative equivalent of the app's :meth:`~.Flask.open_resource`
+ method.
+
+ :param resource: Path to the resource relative to :attr:`root_path`.
+ :param mode: Open the file in this mode. Only reading is supported,
+ valid values are ``"r"`` (or ``"rt"``) and ``"rb"``.
+ :param encoding: Open the file with this encoding when opening in text
+ mode. This is ignored when opening in binary mode.
+
+ .. versionchanged:: 3.1
+ Added the ``encoding`` parameter.
+ """
+ if mode not in {"r", "rt", "rb"}:
+ raise ValueError("Resources can only be opened for reading.")
+
+ path = os.path.join(self.root_path, resource)
+
+ if mode == "rb":
+ return open(path, mode) # pyright: ignore
+
+ return open(path, mode, encoding=encoding)
diff --git a/tapdown/lib/python3.11/site-packages/flask/cli.py b/tapdown/lib/python3.11/site-packages/flask/cli.py
new file mode 100644
index 0000000..ed11f25
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/cli.py
@@ -0,0 +1,1135 @@
+from __future__ import annotations
+
+import ast
+import collections.abc as cabc
+import importlib.metadata
+import inspect
+import os
+import platform
+import re
+import sys
+import traceback
+import typing as t
+from functools import update_wrapper
+from operator import itemgetter
+from types import ModuleType
+
+import click
+from click.core import ParameterSource
+from werkzeug import run_simple
+from werkzeug.serving import is_running_from_reloader
+from werkzeug.utils import import_string
+
+from .globals import current_app
+from .helpers import get_debug_flag
+from .helpers import get_load_dotenv
+
+if t.TYPE_CHECKING:
+ import ssl
+
+ from _typeshed.wsgi import StartResponse
+ from _typeshed.wsgi import WSGIApplication
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .app import Flask
+
+
+class NoAppException(click.UsageError):
+ """Raised if an application cannot be found or loaded."""
+
+
+def find_best_app(module: ModuleType) -> Flask:
+ """Given a module instance this tries to find the best possible
+ application in the module or raises an exception.
+ """
+ from . import Flask
+
+ # Search for the most common names first.
+ for attr_name in ("app", "application"):
+ app = getattr(module, attr_name, None)
+
+ if isinstance(app, Flask):
+ return app
+
+ # Otherwise find the only object that is a Flask instance.
+ matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
+
+ if len(matches) == 1:
+ return matches[0]
+ elif len(matches) > 1:
+ raise NoAppException(
+ "Detected multiple Flask applications in module"
+ f" '{module.__name__}'. Use '{module.__name__}:name'"
+ " to specify the correct one."
+ )
+
+ # Search for app factory functions.
+ for attr_name in ("create_app", "make_app"):
+ app_factory = getattr(module, attr_name, None)
+
+ if inspect.isfunction(app_factory):
+ try:
+ app = app_factory()
+
+ if isinstance(app, Flask):
+ return app
+ except TypeError as e:
+ if not _called_with_wrong_args(app_factory):
+ raise
+
+ raise NoAppException(
+ f"Detected factory '{attr_name}' in module '{module.__name__}',"
+ " but could not call it without arguments. Use"
+ f" '{module.__name__}:{attr_name}(args)'"
+ " to specify arguments."
+ ) from e
+
+ raise NoAppException(
+ "Failed to find Flask application or factory in module"
+ f" '{module.__name__}'. Use '{module.__name__}:name'"
+ " to specify one."
+ )
+
+
+def _called_with_wrong_args(f: t.Callable[..., Flask]) -> bool:
+ """Check whether calling a function raised a ``TypeError`` because
+ the call failed or because something in the factory raised the
+ error.
+
+ :param f: The function that was called.
+ :return: ``True`` if the call failed.
+ """
+ tb = sys.exc_info()[2]
+
+ try:
+ while tb is not None:
+ if tb.tb_frame.f_code is f.__code__:
+ # In the function, it was called successfully.
+ return False
+
+ tb = tb.tb_next
+
+ # Didn't reach the function.
+ return True
+ finally:
+ # Delete tb to break a circular reference.
+ # https://docs.python.org/2/library/sys.html#sys.exc_info
+ del tb
+
+
+def find_app_by_string(module: ModuleType, app_name: str) -> Flask:
+ """Check if the given string is a variable name or a function. Call
+ a function to get the app instance, or return the variable directly.
+ """
+ from . import Flask
+
+ # Parse app_name as a single expression to determine if it's a valid
+ # attribute name or function call.
+ try:
+ expr = ast.parse(app_name.strip(), mode="eval").body
+ except SyntaxError:
+ raise NoAppException(
+ f"Failed to parse {app_name!r} as an attribute name or function call."
+ ) from None
+
+ if isinstance(expr, ast.Name):
+ name = expr.id
+ args = []
+ kwargs = {}
+ elif isinstance(expr, ast.Call):
+ # Ensure the function name is an attribute name only.
+ if not isinstance(expr.func, ast.Name):
+ raise NoAppException(
+ f"Function reference must be a simple name: {app_name!r}."
+ )
+
+ name = expr.func.id
+
+ # Parse the positional and keyword arguments as literals.
+ try:
+ args = [ast.literal_eval(arg) for arg in expr.args]
+ kwargs = {
+ kw.arg: ast.literal_eval(kw.value)
+ for kw in expr.keywords
+ if kw.arg is not None
+ }
+ except ValueError:
+ # literal_eval gives cryptic error messages, show a generic
+ # message with the full expression instead.
+ raise NoAppException(
+ f"Failed to parse arguments as literal values: {app_name!r}."
+ ) from None
+ else:
+ raise NoAppException(
+ f"Failed to parse {app_name!r} as an attribute name or function call."
+ )
+
+ try:
+ attr = getattr(module, name)
+ except AttributeError as e:
+ raise NoAppException(
+ f"Failed to find attribute {name!r} in {module.__name__!r}."
+ ) from e
+
+ # If the attribute is a function, call it with any args and kwargs
+ # to get the real application.
+ if inspect.isfunction(attr):
+ try:
+ app = attr(*args, **kwargs)
+ except TypeError as e:
+ if not _called_with_wrong_args(attr):
+ raise
+
+ raise NoAppException(
+ f"The factory {app_name!r} in module"
+ f" {module.__name__!r} could not be called with the"
+ " specified arguments."
+ ) from e
+ else:
+ app = attr
+
+ if isinstance(app, Flask):
+ return app
+
+ raise NoAppException(
+ "A valid Flask application was not obtained from"
+ f" '{module.__name__}:{app_name}'."
+ )
+
+
+def prepare_import(path: str) -> str:
+ """Given a filename this will try to calculate the python path, add it
+ to the search path and return the actual module name that is expected.
+ """
+ path = os.path.realpath(path)
+
+ fname, ext = os.path.splitext(path)
+ if ext == ".py":
+ path = fname
+
+ if os.path.basename(path) == "__init__":
+ path = os.path.dirname(path)
+
+ module_name = []
+
+ # move up until outside package structure (no __init__.py)
+ while True:
+ path, name = os.path.split(path)
+ module_name.append(name)
+
+ if not os.path.exists(os.path.join(path, "__init__.py")):
+ break
+
+ if sys.path[0] != path:
+ sys.path.insert(0, path)
+
+ return ".".join(module_name[::-1])
+
+
+@t.overload
+def locate_app(
+ module_name: str, app_name: str | None, raise_if_not_found: t.Literal[True] = True
+) -> Flask: ...
+
+
+@t.overload
+def locate_app(
+ module_name: str, app_name: str | None, raise_if_not_found: t.Literal[False] = ...
+) -> Flask | None: ...
+
+
+def locate_app(
+ module_name: str, app_name: str | None, raise_if_not_found: bool = True
+) -> Flask | None:
+ try:
+ __import__(module_name)
+ except ImportError:
+ # Reraise the ImportError if it occurred within the imported module.
+ # Determine this by checking whether the trace has a depth > 1.
+ if sys.exc_info()[2].tb_next: # type: ignore[union-attr]
+ raise NoAppException(
+ f"While importing {module_name!r}, an ImportError was"
+ f" raised:\n\n{traceback.format_exc()}"
+ ) from None
+ elif raise_if_not_found:
+ raise NoAppException(f"Could not import {module_name!r}.") from None
+ else:
+ return None
+
+ module = sys.modules[module_name]
+
+ if app_name is None:
+ return find_best_app(module)
+ else:
+ return find_app_by_string(module, app_name)
+
+
+def get_version(ctx: click.Context, param: click.Parameter, value: t.Any) -> None:
+ if not value or ctx.resilient_parsing:
+ return
+
+ flask_version = importlib.metadata.version("flask")
+ werkzeug_version = importlib.metadata.version("werkzeug")
+
+ click.echo(
+ f"Python {platform.python_version()}\n"
+ f"Flask {flask_version}\n"
+ f"Werkzeug {werkzeug_version}",
+ color=ctx.color,
+ )
+ ctx.exit()
+
+
+version_option = click.Option(
+ ["--version"],
+ help="Show the Flask version.",
+ expose_value=False,
+ callback=get_version,
+ is_flag=True,
+ is_eager=True,
+)
+
+
+class ScriptInfo:
+ """Helper object to deal with Flask applications. This is usually not
+ necessary to interface with as it's used internally in the dispatching
+ to click. In future versions of Flask this object will most likely play
+ a bigger role. Typically it's created automatically by the
+ :class:`FlaskGroup` but you can also manually create it and pass it
+ onwards as click object.
+
+ .. versionchanged:: 3.1
+ Added the ``load_dotenv_defaults`` parameter and attribute.
+ """
+
+ def __init__(
+ self,
+ app_import_path: str | None = None,
+ create_app: t.Callable[..., Flask] | None = None,
+ set_debug_flag: bool = True,
+ load_dotenv_defaults: bool = True,
+ ) -> None:
+ #: Optionally the import path for the Flask application.
+ self.app_import_path = app_import_path
+ #: Optionally a function that is passed the script info to create
+ #: the instance of the application.
+ self.create_app = create_app
+ #: A dictionary with arbitrary data that can be associated with
+ #: this script info.
+ self.data: dict[t.Any, t.Any] = {}
+ self.set_debug_flag = set_debug_flag
+
+ self.load_dotenv_defaults = get_load_dotenv(load_dotenv_defaults)
+ """Whether default ``.flaskenv`` and ``.env`` files should be loaded.
+
+ ``ScriptInfo`` doesn't load anything, this is for reference when doing
+ the load elsewhere during processing.
+
+ .. versionadded:: 3.1
+ """
+
+ self._loaded_app: Flask | None = None
+
+ def load_app(self) -> Flask:
+ """Loads the Flask app (if not yet loaded) and returns it. Calling
+ this multiple times will just result in the already loaded app to
+ be returned.
+ """
+ if self._loaded_app is not None:
+ return self._loaded_app
+ app: Flask | None = None
+ if self.create_app is not None:
+ app = self.create_app()
+ else:
+ if self.app_import_path:
+ path, name = (
+ re.split(r":(?![\\/])", self.app_import_path, maxsplit=1) + [None]
+ )[:2]
+ import_name = prepare_import(path)
+ app = locate_app(import_name, name)
+ else:
+ for path in ("wsgi.py", "app.py"):
+ import_name = prepare_import(path)
+ app = locate_app(import_name, None, raise_if_not_found=False)
+
+ if app is not None:
+ break
+
+ if app is None:
+ raise NoAppException(
+ "Could not locate a Flask application. Use the"
+ " 'flask --app' option, 'FLASK_APP' environment"
+ " variable, or a 'wsgi.py' or 'app.py' file in the"
+ " current directory."
+ )
+
+ if self.set_debug_flag:
+ # Update the app's debug flag through the descriptor so that
+ # other values repopulate as well.
+ app.debug = get_debug_flag()
+
+ self._loaded_app = app
+ return app
+
+
+pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+
+def with_appcontext(f: F) -> F:
+ """Wraps a callback so that it's guaranteed to be executed with the
+ script's application context.
+
+ Custom commands (and their options) registered under ``app.cli`` or
+ ``blueprint.cli`` will always have an app context available, this
+ decorator is not required in that case.
+
+ .. versionchanged:: 2.2
+ The app context is active for subcommands as well as the
+ decorated callback. The app context is always available to
+ ``app.cli`` command and parameter callbacks.
+ """
+
+ @click.pass_context
+ def decorator(ctx: click.Context, /, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ if not current_app:
+ app = ctx.ensure_object(ScriptInfo).load_app()
+ ctx.with_resource(app.app_context())
+
+ return ctx.invoke(f, *args, **kwargs)
+
+ return update_wrapper(decorator, f) # type: ignore[return-value]
+
+
+class AppGroup(click.Group):
+ """This works similar to a regular click :class:`~click.Group` but it
+ changes the behavior of the :meth:`command` decorator so that it
+ automatically wraps the functions in :func:`with_appcontext`.
+
+ Not to be confused with :class:`FlaskGroup`.
+ """
+
+ def command( # type: ignore[override]
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[t.Callable[..., t.Any]], click.Command]:
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
+ unless it's disabled by passing ``with_appcontext=False``.
+ """
+ wrap_for_ctx = kwargs.pop("with_appcontext", True)
+
+ def decorator(f: t.Callable[..., t.Any]) -> click.Command:
+ if wrap_for_ctx:
+ f = with_appcontext(f)
+ return super(AppGroup, self).command(*args, **kwargs)(f) # type: ignore[no-any-return]
+
+ return decorator
+
+ def group( # type: ignore[override]
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Callable[[t.Callable[..., t.Any]], click.Group]:
+ """This works exactly like the method of the same name on a regular
+ :class:`click.Group` but it defaults the group class to
+ :class:`AppGroup`.
+ """
+ kwargs.setdefault("cls", AppGroup)
+ return super().group(*args, **kwargs) # type: ignore[no-any-return]
+
+
+def _set_app(ctx: click.Context, param: click.Option, value: str | None) -> str | None:
+ if value is None:
+ return None
+
+ info = ctx.ensure_object(ScriptInfo)
+ info.app_import_path = value
+ return value
+
+
+# This option is eager so the app will be available if --help is given.
+# --help is also eager, so --app must be before it in the param list.
+# no_args_is_help bypasses eager processing, so this option must be
+# processed manually in that case to ensure FLASK_APP gets picked up.
+_app_option = click.Option(
+ ["-A", "--app"],
+ metavar="IMPORT",
+ help=(
+ "The Flask application or factory function to load, in the form 'module:name'."
+ " Module can be a dotted import or file path. Name is not required if it is"
+ " 'app', 'application', 'create_app', or 'make_app', and can be 'name(args)' to"
+ " pass arguments."
+ ),
+ is_eager=True,
+ expose_value=False,
+ callback=_set_app,
+)
+
+
+def _set_debug(ctx: click.Context, param: click.Option, value: bool) -> bool | None:
+ # If the flag isn't provided, it will default to False. Don't use
+ # that, let debug be set by env in that case.
+ source = ctx.get_parameter_source(param.name) # type: ignore[arg-type]
+
+ if source is not None and source in (
+ ParameterSource.DEFAULT,
+ ParameterSource.DEFAULT_MAP,
+ ):
+ return None
+
+ # Set with env var instead of ScriptInfo.load so that it can be
+ # accessed early during a factory function.
+ os.environ["FLASK_DEBUG"] = "1" if value else "0"
+ return value
+
+
+_debug_option = click.Option(
+ ["--debug/--no-debug"],
+ help="Set debug mode.",
+ expose_value=False,
+ callback=_set_debug,
+)
+
+
+def _env_file_callback(
+ ctx: click.Context, param: click.Option, value: str | None
+) -> str | None:
+ try:
+ import dotenv # noqa: F401
+ except ImportError:
+ # Only show an error if a value was passed, otherwise we still want to
+ # call load_dotenv and show a message without exiting.
+ if value is not None:
+ raise click.BadParameter(
+ "python-dotenv must be installed to load an env file.",
+ ctx=ctx,
+ param=param,
+ ) from None
+
+ # Load if a value was passed, or we want to load default files, or both.
+ if value is not None or ctx.obj.load_dotenv_defaults:
+ load_dotenv(value, load_defaults=ctx.obj.load_dotenv_defaults)
+
+ return value
+
+
+# This option is eager so env vars are loaded as early as possible to be
+# used by other options.
+_env_file_option = click.Option(
+ ["-e", "--env-file"],
+ type=click.Path(exists=True, dir_okay=False),
+ help=(
+ "Load environment variables from this file, taking precedence over"
+ " those set by '.env' and '.flaskenv'. Variables set directly in the"
+ " environment take highest precedence. python-dotenv must be installed."
+ ),
+ is_eager=True,
+ expose_value=False,
+ callback=_env_file_callback,
+)
+
+
+class FlaskGroup(AppGroup):
+ """Special subclass of the :class:`AppGroup` group that supports
+ loading more commands from the configured Flask app. Normally a
+ developer does not have to interface with this class but there are
+ some very advanced use cases for which it makes sense to create an
+ instance of this. see :ref:`custom-scripts`.
+
+ :param add_default_commands: if this is True then the default run and
+ shell commands will be added.
+ :param add_version_option: adds the ``--version`` option.
+ :param create_app: an optional callback that is passed the script info and
+ returns the loaded app.
+ :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
+ files to set environment variables. Will also change the working
+ directory to the directory containing the first file found.
+ :param set_debug_flag: Set the app's debug flag.
+
+ .. versionchanged:: 3.1
+ ``-e path`` takes precedence over default ``.env`` and ``.flaskenv`` files.
+
+ .. versionchanged:: 2.2
+ Added the ``-A/--app``, ``--debug/--no-debug``, ``-e/--env-file`` options.
+
+ .. versionchanged:: 2.2
+ An app context is pushed when running ``app.cli`` commands, so
+ ``@with_appcontext`` is no longer required for those commands.
+
+ .. versionchanged:: 1.0
+ If installed, python-dotenv will be used to load environment variables
+ from :file:`.env` and :file:`.flaskenv` files.
+ """
+
+ def __init__(
+ self,
+ add_default_commands: bool = True,
+ create_app: t.Callable[..., Flask] | None = None,
+ add_version_option: bool = True,
+ load_dotenv: bool = True,
+ set_debug_flag: bool = True,
+ **extra: t.Any,
+ ) -> None:
+ params: list[click.Parameter] = list(extra.pop("params", None) or ())
+ # Processing is done with option callbacks instead of a group
+ # callback. This allows users to make a custom group callback
+ # without losing the behavior. --env-file must come first so
+ # that it is eagerly evaluated before --app.
+ params.extend((_env_file_option, _app_option, _debug_option))
+
+ if add_version_option:
+ params.append(version_option)
+
+ if "context_settings" not in extra:
+ extra["context_settings"] = {}
+
+ extra["context_settings"].setdefault("auto_envvar_prefix", "FLASK")
+
+ super().__init__(params=params, **extra)
+
+ self.create_app = create_app
+ self.load_dotenv = load_dotenv
+ self.set_debug_flag = set_debug_flag
+
+ if add_default_commands:
+ self.add_command(run_command)
+ self.add_command(shell_command)
+ self.add_command(routes_command)
+
+ self._loaded_plugin_commands = False
+
+ def _load_plugin_commands(self) -> None:
+ if self._loaded_plugin_commands:
+ return
+
+ if sys.version_info >= (3, 10):
+ from importlib import metadata
+ else:
+ # Use a backport on Python < 3.10. We technically have
+ # importlib.metadata on 3.8+, but the API changed in 3.10,
+ # so use the backport for consistency.
+ import importlib_metadata as metadata # pyright: ignore
+
+ for ep in metadata.entry_points(group="flask.commands"):
+ self.add_command(ep.load(), ep.name)
+
+ self._loaded_plugin_commands = True
+
+ def get_command(self, ctx: click.Context, name: str) -> click.Command | None:
+ self._load_plugin_commands()
+ # Look up built-in and plugin commands, which should be
+ # available even if the app fails to load.
+ rv = super().get_command(ctx, name)
+
+ if rv is not None:
+ return rv
+
+ info = ctx.ensure_object(ScriptInfo)
+
+ # Look up commands provided by the app, showing an error and
+ # continuing if the app couldn't be loaded.
+ try:
+ app = info.load_app()
+ except NoAppException as e:
+ click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
+ return None
+
+ # Push an app context for the loaded app unless it is already
+ # active somehow. This makes the context available to parameter
+ # and command callbacks without needing @with_appcontext.
+ if not current_app or current_app._get_current_object() is not app: # type: ignore[attr-defined]
+ ctx.with_resource(app.app_context())
+
+ return app.cli.get_command(ctx, name)
+
+ def list_commands(self, ctx: click.Context) -> list[str]:
+ self._load_plugin_commands()
+ # Start with the built-in and plugin commands.
+ rv = set(super().list_commands(ctx))
+ info = ctx.ensure_object(ScriptInfo)
+
+ # Add commands provided by the app, showing an error and
+ # continuing if the app couldn't be loaded.
+ try:
+ rv.update(info.load_app().cli.list_commands(ctx))
+ except NoAppException as e:
+ # When an app couldn't be loaded, show the error message
+ # without the traceback.
+ click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
+ except Exception:
+ # When any other errors occurred during loading, show the
+ # full traceback.
+ click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
+
+ return sorted(rv)
+
+ def make_context(
+ self,
+ info_name: str | None,
+ args: list[str],
+ parent: click.Context | None = None,
+ **extra: t.Any,
+ ) -> click.Context:
+ # Set a flag to tell app.run to become a no-op. If app.run was
+ # not in a __name__ == __main__ guard, it would start the server
+ # when importing, blocking whatever command is being called.
+ os.environ["FLASK_RUN_FROM_CLI"] = "true"
+
+ if "obj" not in extra and "obj" not in self.context_settings:
+ extra["obj"] = ScriptInfo(
+ create_app=self.create_app,
+ set_debug_flag=self.set_debug_flag,
+ load_dotenv_defaults=self.load_dotenv,
+ )
+
+ return super().make_context(info_name, args, parent=parent, **extra)
+
+ def parse_args(self, ctx: click.Context, args: list[str]) -> list[str]:
+ if (not args and self.no_args_is_help) or (
+ len(args) == 1 and args[0] in self.get_help_option_names(ctx)
+ ):
+ # Attempt to load --env-file and --app early in case they
+ # were given as env vars. Otherwise no_args_is_help will not
+ # see commands from app.cli.
+ _env_file_option.handle_parse_result(ctx, {}, [])
+ _app_option.handle_parse_result(ctx, {}, [])
+
+ return super().parse_args(ctx, args)
+
+
+def _path_is_ancestor(path: str, other: str) -> bool:
+ """Take ``other`` and remove the length of ``path`` from it. Then join it
+ to ``path``. If it is the original value, ``path`` is an ancestor of
+ ``other``."""
+ return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
+
+
+def load_dotenv(
+ path: str | os.PathLike[str] | None = None, load_defaults: bool = True
+) -> bool:
+ """Load "dotenv" files to set environment variables. A given path takes
+ precedence over ``.env``, which takes precedence over ``.flaskenv``. After
+ loading and combining these files, values are only set if the key is not
+ already set in ``os.environ``.
+
+ This is a no-op if `python-dotenv`_ is not installed.
+
+ .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
+
+ :param path: Load the file at this location.
+ :param load_defaults: Search for and load the default ``.flaskenv`` and
+ ``.env`` files.
+ :return: ``True`` if at least one env var was loaded.
+
+ .. versionchanged:: 3.1
+ Added the ``load_defaults`` parameter. A given path takes precedence
+ over default files.
+
+ .. versionchanged:: 2.0
+ The current directory is not changed to the location of the
+ loaded file.
+
+ .. versionchanged:: 2.0
+ When loading the env files, set the default encoding to UTF-8.
+
+ .. versionchanged:: 1.1.0
+ Returns ``False`` when python-dotenv is not installed, or when
+ the given path isn't a file.
+
+ .. versionadded:: 1.0
+ """
+ try:
+ import dotenv
+ except ImportError:
+ if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
+ click.secho(
+ " * Tip: There are .env files present. Install python-dotenv"
+ " to use them.",
+ fg="yellow",
+ err=True,
+ )
+
+ return False
+
+ data: dict[str, str | None] = {}
+
+ if load_defaults:
+ for default_name in (".flaskenv", ".env"):
+ if not (default_path := dotenv.find_dotenv(default_name, usecwd=True)):
+ continue
+
+ data |= dotenv.dotenv_values(default_path, encoding="utf-8")
+
+ if path is not None and os.path.isfile(path):
+ data |= dotenv.dotenv_values(path, encoding="utf-8")
+
+ for key, value in data.items():
+ if key in os.environ or value is None:
+ continue
+
+ os.environ[key] = value
+
+ return bool(data) # True if at least one env var was loaded.
+
+
+def show_server_banner(debug: bool, app_import_path: str | None) -> None:
+ """Show extra startup messages the first time the server is run,
+ ignoring the reloader.
+ """
+ if is_running_from_reloader():
+ return
+
+ if app_import_path is not None:
+ click.echo(f" * Serving Flask app '{app_import_path}'")
+
+ if debug is not None:
+ click.echo(f" * Debug mode: {'on' if debug else 'off'}")
+
+
+class CertParamType(click.ParamType):
+ """Click option type for the ``--cert`` option. Allows either an
+ existing file, the string ``'adhoc'``, or an import for a
+ :class:`~ssl.SSLContext` object.
+ """
+
+ name = "path"
+
+ def __init__(self) -> None:
+ self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
+
+ def convert(
+ self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
+ ) -> t.Any:
+ try:
+ import ssl
+ except ImportError:
+ raise click.BadParameter(
+ 'Using "--cert" requires Python to be compiled with SSL support.',
+ ctx,
+ param,
+ ) from None
+
+ try:
+ return self.path_type(value, param, ctx)
+ except click.BadParameter:
+ value = click.STRING(value, param, ctx).lower()
+
+ if value == "adhoc":
+ try:
+ import cryptography # noqa: F401
+ except ImportError:
+ raise click.BadParameter(
+ "Using ad-hoc certificates requires the cryptography library.",
+ ctx,
+ param,
+ ) from None
+
+ return value
+
+ obj = import_string(value, silent=True)
+
+ if isinstance(obj, ssl.SSLContext):
+ return obj
+
+ raise
+
+
+def _validate_key(ctx: click.Context, param: click.Parameter, value: t.Any) -> t.Any:
+ """The ``--key`` option must be specified when ``--cert`` is a file.
+ Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
+ """
+ cert = ctx.params.get("cert")
+ is_adhoc = cert == "adhoc"
+
+ try:
+ import ssl
+ except ImportError:
+ is_context = False
+ else:
+ is_context = isinstance(cert, ssl.SSLContext)
+
+ if value is not None:
+ if is_adhoc:
+ raise click.BadParameter(
+ 'When "--cert" is "adhoc", "--key" is not used.', ctx, param
+ )
+
+ if is_context:
+ raise click.BadParameter(
+ 'When "--cert" is an SSLContext object, "--key" is not used.',
+ ctx,
+ param,
+ )
+
+ if not cert:
+ raise click.BadParameter('"--cert" must also be specified.', ctx, param)
+
+ ctx.params["cert"] = cert, value
+
+ else:
+ if cert and not (is_adhoc or is_context):
+ raise click.BadParameter('Required when using "--cert".', ctx, param)
+
+ return value
+
+
+class SeparatedPathType(click.Path):
+ """Click option type that accepts a list of values separated by the
+ OS's path separator (``:``, ``;`` on Windows). Each value is
+ validated as a :class:`click.Path` type.
+ """
+
+ def convert(
+ self, value: t.Any, param: click.Parameter | None, ctx: click.Context | None
+ ) -> t.Any:
+ items = self.split_envvar_value(value)
+ # can't call no-arg super() inside list comprehension until Python 3.12
+ super_convert = super().convert
+ return [super_convert(item, param, ctx) for item in items]
+
+
+@click.command("run", short_help="Run a development server.")
+@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
+@click.option("--port", "-p", default=5000, help="The port to bind to.")
+@click.option(
+ "--cert",
+ type=CertParamType(),
+ help="Specify a certificate file to use HTTPS.",
+ is_eager=True,
+)
+@click.option(
+ "--key",
+ type=click.Path(exists=True, dir_okay=False, resolve_path=True),
+ callback=_validate_key,
+ expose_value=False,
+ help="The key file to use when specifying a certificate.",
+)
+@click.option(
+ "--reload/--no-reload",
+ default=None,
+ help="Enable or disable the reloader. By default the reloader "
+ "is active if debug is enabled.",
+)
+@click.option(
+ "--debugger/--no-debugger",
+ default=None,
+ help="Enable or disable the debugger. By default the debugger "
+ "is active if debug is enabled.",
+)
+@click.option(
+ "--with-threads/--without-threads",
+ default=True,
+ help="Enable or disable multithreading.",
+)
+@click.option(
+ "--extra-files",
+ default=None,
+ type=SeparatedPathType(),
+ help=(
+ "Extra files that trigger a reload on change. Multiple paths"
+ f" are separated by {os.path.pathsep!r}."
+ ),
+)
+@click.option(
+ "--exclude-patterns",
+ default=None,
+ type=SeparatedPathType(),
+ help=(
+ "Files matching these fnmatch patterns will not trigger a reload"
+ " on change. Multiple patterns are separated by"
+ f" {os.path.pathsep!r}."
+ ),
+)
+@pass_script_info
+def run_command(
+ info: ScriptInfo,
+ host: str,
+ port: int,
+ reload: bool,
+ debugger: bool,
+ with_threads: bool,
+ cert: ssl.SSLContext | tuple[str, str | None] | t.Literal["adhoc"] | None,
+ extra_files: list[str] | None,
+ exclude_patterns: list[str] | None,
+) -> None:
+ """Run a local development server.
+
+ This server is for development purposes only. It does not provide
+ the stability, security, or performance of production WSGI servers.
+
+ The reloader and debugger are enabled by default with the '--debug'
+ option.
+ """
+ try:
+ app: WSGIApplication = info.load_app() # pyright: ignore
+ except Exception as e:
+ if is_running_from_reloader():
+ # When reloading, print out the error immediately, but raise
+ # it later so the debugger or server can handle it.
+ traceback.print_exc()
+ err = e
+
+ def app(
+ environ: WSGIEnvironment, start_response: StartResponse
+ ) -> cabc.Iterable[bytes]:
+ raise err from None
+
+ else:
+ # When not reloading, raise the error immediately so the
+ # command fails.
+ raise e from None
+
+ debug = get_debug_flag()
+
+ if reload is None:
+ reload = debug
+
+ if debugger is None:
+ debugger = debug
+
+ show_server_banner(debug, info.app_import_path)
+
+ run_simple(
+ host,
+ port,
+ app,
+ use_reloader=reload,
+ use_debugger=debugger,
+ threaded=with_threads,
+ ssl_context=cert,
+ extra_files=extra_files,
+ exclude_patterns=exclude_patterns,
+ )
+
+
+run_command.params.insert(0, _debug_option)
+
+
+@click.command("shell", short_help="Run a shell in the app context.")
+@with_appcontext
+def shell_command() -> None:
+ """Run an interactive Python shell in the context of a given
+ Flask application. The application will populate the default
+ namespace of this shell according to its configuration.
+
+ This is useful for executing small snippets of management code
+ without having to manually configure the application.
+ """
+ import code
+
+ banner = (
+ f"Python {sys.version} on {sys.platform}\n"
+ f"App: {current_app.import_name}\n"
+ f"Instance: {current_app.instance_path}"
+ )
+ ctx: dict[str, t.Any] = {}
+
+ # Support the regular Python interpreter startup script if someone
+ # is using it.
+ startup = os.environ.get("PYTHONSTARTUP")
+ if startup and os.path.isfile(startup):
+ with open(startup) as f:
+ eval(compile(f.read(), startup, "exec"), ctx)
+
+ ctx.update(current_app.make_shell_context())
+
+ # Site, customize, or startup script can set a hook to call when
+ # entering interactive mode. The default one sets up readline with
+ # tab and history completion.
+ interactive_hook = getattr(sys, "__interactivehook__", None)
+
+ if interactive_hook is not None:
+ try:
+ import readline
+ from rlcompleter import Completer
+ except ImportError:
+ pass
+ else:
+ # rlcompleter uses __main__.__dict__ by default, which is
+ # flask.__main__. Use the shell context instead.
+ readline.set_completer(Completer(ctx).complete)
+
+ interactive_hook()
+
+ code.interact(banner=banner, local=ctx)
+
+
+@click.command("routes", short_help="Show the routes for the app.")
+@click.option(
+ "--sort",
+ "-s",
+ type=click.Choice(("endpoint", "methods", "domain", "rule", "match")),
+ default="endpoint",
+ help=(
+ "Method to sort routes by. 'match' is the order that Flask will match routes"
+ " when dispatching a request."
+ ),
+)
+@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
+@with_appcontext
+def routes_command(sort: str, all_methods: bool) -> None:
+ """Show all registered routes with endpoints and methods."""
+ rules = list(current_app.url_map.iter_rules())
+
+ if not rules:
+ click.echo("No routes were registered.")
+ return
+
+ ignored_methods = set() if all_methods else {"HEAD", "OPTIONS"}
+ host_matching = current_app.url_map.host_matching
+ has_domain = any(rule.host if host_matching else rule.subdomain for rule in rules)
+ rows = []
+
+ for rule in rules:
+ row = [
+ rule.endpoint,
+ ", ".join(sorted((rule.methods or set()) - ignored_methods)),
+ ]
+
+ if has_domain:
+ row.append((rule.host if host_matching else rule.subdomain) or "")
+
+ row.append(rule.rule)
+ rows.append(row)
+
+ headers = ["Endpoint", "Methods"]
+ sorts = ["endpoint", "methods"]
+
+ if has_domain:
+ headers.append("Host" if host_matching else "Subdomain")
+ sorts.append("domain")
+
+ headers.append("Rule")
+ sorts.append("rule")
+
+ try:
+ rows.sort(key=itemgetter(sorts.index(sort)))
+ except ValueError:
+ pass
+
+ rows.insert(0, headers)
+ widths = [max(len(row[i]) for row in rows) for i in range(len(headers))]
+ rows.insert(1, ["-" * w for w in widths])
+ template = " ".join(f"{{{i}:<{w}}}" for i, w in enumerate(widths))
+
+ for row in rows:
+ click.echo(template.format(*row))
+
+
+cli = FlaskGroup(
+ name="flask",
+ help="""\
+A general utility script for Flask applications.
+
+An application to load must be given with the '--app' option,
+'FLASK_APP' environment variable, or with a 'wsgi.py' or 'app.py' file
+in the current directory.
+""",
+)
+
+
+def main() -> None:
+ cli.main()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tapdown/lib/python3.11/site-packages/flask/config.py b/tapdown/lib/python3.11/site-packages/flask/config.py
new file mode 100644
index 0000000..34ef1a5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/config.py
@@ -0,0 +1,367 @@
+from __future__ import annotations
+
+import errno
+import json
+import os
+import types
+import typing as t
+
+from werkzeug.utils import import_string
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ from .sansio.app import App
+
+
+T = t.TypeVar("T")
+
+
+class ConfigAttribute(t.Generic[T]):
+ """Makes an attribute forward to the config"""
+
+ def __init__(
+ self, name: str, get_converter: t.Callable[[t.Any], T] | None = None
+ ) -> None:
+ self.__name__ = name
+ self.get_converter = get_converter
+
+ @t.overload
+ def __get__(self, obj: None, owner: None) -> te.Self: ...
+
+ @t.overload
+ def __get__(self, obj: App, owner: type[App]) -> T: ...
+
+ def __get__(self, obj: App | None, owner: type[App] | None = None) -> T | te.Self:
+ if obj is None:
+ return self
+
+ rv = obj.config[self.__name__]
+
+ if self.get_converter is not None:
+ rv = self.get_converter(rv)
+
+ return rv # type: ignore[no-any-return]
+
+ def __set__(self, obj: App, value: t.Any) -> None:
+ obj.config[self.__name__] = value
+
+
+class Config(dict): # type: ignore[type-arg]
+ """Works exactly like a dict but provides ways to fill it from files
+ or special dictionaries. There are two common patterns to populate the
+ config.
+
+ Either you can fill the config from a config file::
+
+ app.config.from_pyfile('yourconfig.cfg')
+
+ Or alternatively you can define the configuration options in the
+ module that calls :meth:`from_object` or provide an import path to
+ a module that should be loaded. It is also possible to tell it to
+ use the same module and with that provide the configuration values
+ just before the call::
+
+ DEBUG = True
+ SECRET_KEY = 'development key'
+ app.config.from_object(__name__)
+
+ In both cases (loading from any Python file or loading from modules),
+ only uppercase keys are added to the config. This makes it possible to use
+ lowercase values in the config file for temporary values that are not added
+ to the config or to define the config keys in the same file that implements
+ the application.
+
+ Probably the most interesting way to load configurations is from an
+ environment variable pointing to a file::
+
+ app.config.from_envvar('YOURAPPLICATION_SETTINGS')
+
+ In this case before launching the application you have to set this
+ environment variable to the file you want to use. On Linux and OS X
+ use the export statement::
+
+ export YOURAPPLICATION_SETTINGS='/path/to/config/file'
+
+ On windows use `set` instead.
+
+ :param root_path: path to which files are read relative from. When the
+ config object is created by the application, this is
+ the application's :attr:`~flask.Flask.root_path`.
+ :param defaults: an optional dictionary of default values
+ """
+
+ def __init__(
+ self,
+ root_path: str | os.PathLike[str],
+ defaults: dict[str, t.Any] | None = None,
+ ) -> None:
+ super().__init__(defaults or {})
+ self.root_path = root_path
+
+ def from_envvar(self, variable_name: str, silent: bool = False) -> bool:
+ """Loads a configuration from an environment variable pointing to
+ a configuration file. This is basically just a shortcut with nicer
+ error messages for this line of code::
+
+ app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
+
+ :param variable_name: name of the environment variable
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+ :return: ``True`` if the file was loaded successfully.
+ """
+ rv = os.environ.get(variable_name)
+ if not rv:
+ if silent:
+ return False
+ raise RuntimeError(
+ f"The environment variable {variable_name!r} is not set"
+ " and as such configuration could not be loaded. Set"
+ " this variable and make it point to a configuration"
+ " file"
+ )
+ return self.from_pyfile(rv, silent=silent)
+
+ def from_prefixed_env(
+ self, prefix: str = "FLASK", *, loads: t.Callable[[str], t.Any] = json.loads
+ ) -> bool:
+ """Load any environment variables that start with ``FLASK_``,
+ dropping the prefix from the env key for the config key. Values
+ are passed through a loading function to attempt to convert them
+ to more specific types than strings.
+
+ Keys are loaded in :func:`sorted` order.
+
+ The default loading function attempts to parse values as any
+ valid JSON type, including dicts and lists.
+
+ Specific items in nested dicts can be set by separating the
+ keys with double underscores (``__``). If an intermediate key
+ doesn't exist, it will be initialized to an empty dict.
+
+ :param prefix: Load env vars that start with this prefix,
+ separated with an underscore (``_``).
+ :param loads: Pass each string value to this function and use
+ the returned value as the config value. If any error is
+ raised it is ignored and the value remains a string. The
+ default is :func:`json.loads`.
+
+ .. versionadded:: 2.1
+ """
+ prefix = f"{prefix}_"
+
+ for key in sorted(os.environ):
+ if not key.startswith(prefix):
+ continue
+
+ value = os.environ[key]
+ key = key.removeprefix(prefix)
+
+ try:
+ value = loads(value)
+ except Exception:
+ # Keep the value as a string if loading failed.
+ pass
+
+ if "__" not in key:
+ # A non-nested key, set directly.
+ self[key] = value
+ continue
+
+ # Traverse nested dictionaries with keys separated by "__".
+ current = self
+ *parts, tail = key.split("__")
+
+ for part in parts:
+ # If an intermediate dict does not exist, create it.
+ if part not in current:
+ current[part] = {}
+
+ current = current[part]
+
+ current[tail] = value
+
+ return True
+
+ def from_pyfile(
+ self, filename: str | os.PathLike[str], silent: bool = False
+ ) -> bool:
+ """Updates the values in the config from a Python file. This function
+ behaves as if the file was imported as module with the
+ :meth:`from_object` function.
+
+ :param filename: the filename of the config. This can either be an
+ absolute filename or a filename relative to the
+ root path.
+ :param silent: set to ``True`` if you want silent failure for missing
+ files.
+ :return: ``True`` if the file was loaded successfully.
+
+ .. versionadded:: 0.7
+ `silent` parameter.
+ """
+ filename = os.path.join(self.root_path, filename)
+ d = types.ModuleType("config")
+ d.__file__ = filename
+ try:
+ with open(filename, mode="rb") as config_file:
+ exec(compile(config_file.read(), filename, "exec"), d.__dict__)
+ except OSError as e:
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
+ return False
+ e.strerror = f"Unable to load configuration file ({e.strerror})"
+ raise
+ self.from_object(d)
+ return True
+
+ def from_object(self, obj: object | str) -> None:
+ """Updates the values from the given object. An object can be of one
+ of the following two types:
+
+ - a string: in this case the object with that name will be imported
+ - an actual object reference: that object is used directly
+
+ Objects are usually either modules or classes. :meth:`from_object`
+ loads only the uppercase attributes of the module/class. A ``dict``
+ object will not work with :meth:`from_object` because the keys of a
+ ``dict`` are not attributes of the ``dict`` class.
+
+ Example of module-based configuration::
+
+ app.config.from_object('yourapplication.default_config')
+ from yourapplication import default_config
+ app.config.from_object(default_config)
+
+ Nothing is done to the object before loading. If the object is a
+ class and has ``@property`` attributes, it needs to be
+ instantiated before being passed to this method.
+
+ You should not use this function to load the actual configuration but
+ rather configuration defaults. The actual config should be loaded
+ with :meth:`from_pyfile` and ideally from a location not within the
+ package because the package might be installed system wide.
+
+ See :ref:`config-dev-prod` for an example of class-based configuration
+ using :meth:`from_object`.
+
+ :param obj: an import name or object
+ """
+ if isinstance(obj, str):
+ obj = import_string(obj)
+ for key in dir(obj):
+ if key.isupper():
+ self[key] = getattr(obj, key)
+
+ def from_file(
+ self,
+ filename: str | os.PathLike[str],
+ load: t.Callable[[t.IO[t.Any]], t.Mapping[str, t.Any]],
+ silent: bool = False,
+ text: bool = True,
+ ) -> bool:
+ """Update the values in the config from a file that is loaded
+ using the ``load`` parameter. The loaded data is passed to the
+ :meth:`from_mapping` method.
+
+ .. code-block:: python
+
+ import json
+ app.config.from_file("config.json", load=json.load)
+
+ import tomllib
+ app.config.from_file("config.toml", load=tomllib.load, text=False)
+
+ :param filename: The path to the data file. This can be an
+ absolute path or relative to the config root path.
+ :param load: A callable that takes a file handle and returns a
+ mapping of loaded data from the file.
+ :type load: ``Callable[[Reader], Mapping]`` where ``Reader``
+ implements a ``read`` method.
+ :param silent: Ignore the file if it doesn't exist.
+ :param text: Open the file in text or binary mode.
+ :return: ``True`` if the file was loaded successfully.
+
+ .. versionchanged:: 2.3
+ The ``text`` parameter was added.
+
+ .. versionadded:: 2.0
+ """
+ filename = os.path.join(self.root_path, filename)
+
+ try:
+ with open(filename, "r" if text else "rb") as f:
+ obj = load(f)
+ except OSError as e:
+ if silent and e.errno in (errno.ENOENT, errno.EISDIR):
+ return False
+
+ e.strerror = f"Unable to load configuration file ({e.strerror})"
+ raise
+
+ return self.from_mapping(obj)
+
+ def from_mapping(
+ self, mapping: t.Mapping[str, t.Any] | None = None, **kwargs: t.Any
+ ) -> bool:
+ """Updates the config like :meth:`update` ignoring items with
+ non-upper keys.
+
+ :return: Always returns ``True``.
+
+ .. versionadded:: 0.11
+ """
+ mappings: dict[str, t.Any] = {}
+ if mapping is not None:
+ mappings.update(mapping)
+ mappings.update(kwargs)
+ for key, value in mappings.items():
+ if key.isupper():
+ self[key] = value
+ return True
+
+ def get_namespace(
+ self, namespace: str, lowercase: bool = True, trim_namespace: bool = True
+ ) -> dict[str, t.Any]:
+ """Returns a dictionary containing a subset of configuration options
+ that match the specified namespace/prefix. Example usage::
+
+ app.config['IMAGE_STORE_TYPE'] = 'fs'
+ app.config['IMAGE_STORE_PATH'] = '/var/app/images'
+ app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com'
+ image_store_config = app.config.get_namespace('IMAGE_STORE_')
+
+ The resulting dictionary `image_store_config` would look like::
+
+ {
+ 'type': 'fs',
+ 'path': '/var/app/images',
+ 'base_url': 'http://img.website.com'
+ }
+
+ This is often useful when configuration options map directly to
+ keyword arguments in functions or class constructors.
+
+ :param namespace: a configuration namespace
+ :param lowercase: a flag indicating if the keys of the resulting
+ dictionary should be lowercase
+ :param trim_namespace: a flag indicating if the keys of the resulting
+ dictionary should not include the namespace
+
+ .. versionadded:: 0.11
+ """
+ rv = {}
+ for k, v in self.items():
+ if not k.startswith(namespace):
+ continue
+ if trim_namespace:
+ key = k[len(namespace) :]
+ else:
+ key = k
+ if lowercase:
+ key = key.lower()
+ rv[key] = v
+ return rv
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {dict.__repr__(self)}>"
diff --git a/tapdown/lib/python3.11/site-packages/flask/ctx.py b/tapdown/lib/python3.11/site-packages/flask/ctx.py
new file mode 100644
index 0000000..222e818
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/ctx.py
@@ -0,0 +1,449 @@
+from __future__ import annotations
+
+import contextvars
+import sys
+import typing as t
+from functools import update_wrapper
+from types import TracebackType
+
+from werkzeug.exceptions import HTTPException
+
+from . import typing as ft
+from .globals import _cv_app
+from .globals import _cv_request
+from .signals import appcontext_popped
+from .signals import appcontext_pushed
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from _typeshed.wsgi import WSGIEnvironment
+
+ from .app import Flask
+ from .sessions import SessionMixin
+ from .wrappers import Request
+
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
+
+class _AppCtxGlobals:
+ """A plain object. Used as a namespace for storing data during an
+ application context.
+
+ Creating an app context automatically creates this object, which is
+ made available as the :data:`g` proxy.
+
+ .. describe:: 'key' in g
+
+ Check whether an attribute is present.
+
+ .. versionadded:: 0.10
+
+ .. describe:: iter(g)
+
+ Return an iterator over the attribute names.
+
+ .. versionadded:: 0.10
+ """
+
+ # Define attr methods to let mypy know this is a namespace object
+ # that has arbitrary attributes.
+
+ def __getattr__(self, name: str) -> t.Any:
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ raise AttributeError(name) from None
+
+ def __setattr__(self, name: str, value: t.Any) -> None:
+ self.__dict__[name] = value
+
+ def __delattr__(self, name: str) -> None:
+ try:
+ del self.__dict__[name]
+ except KeyError:
+ raise AttributeError(name) from None
+
+ def get(self, name: str, default: t.Any | None = None) -> t.Any:
+ """Get an attribute by name, or a default value. Like
+ :meth:`dict.get`.
+
+ :param name: Name of attribute to get.
+ :param default: Value to return if the attribute is not present.
+
+ .. versionadded:: 0.10
+ """
+ return self.__dict__.get(name, default)
+
+ def pop(self, name: str, default: t.Any = _sentinel) -> t.Any:
+ """Get and remove an attribute by name. Like :meth:`dict.pop`.
+
+ :param name: Name of attribute to pop.
+ :param default: Value to return if the attribute is not present,
+ instead of raising a ``KeyError``.
+
+ .. versionadded:: 0.11
+ """
+ if default is _sentinel:
+ return self.__dict__.pop(name)
+ else:
+ return self.__dict__.pop(name, default)
+
+ def setdefault(self, name: str, default: t.Any = None) -> t.Any:
+ """Get the value of an attribute if it is present, otherwise
+ set and return a default value. Like :meth:`dict.setdefault`.
+
+ :param name: Name of attribute to get.
+ :param default: Value to set and return if the attribute is not
+ present.
+
+ .. versionadded:: 0.11
+ """
+ return self.__dict__.setdefault(name, default)
+
+ def __contains__(self, item: str) -> bool:
+ return item in self.__dict__
+
+ def __iter__(self) -> t.Iterator[str]:
+ return iter(self.__dict__)
+
+ def __repr__(self) -> str:
+ ctx = _cv_app.get(None)
+ if ctx is not None:
+ return f""
+ return object.__repr__(self)
+
+
+def after_this_request(
+ f: ft.AfterRequestCallable[t.Any],
+) -> ft.AfterRequestCallable[t.Any]:
+ """Executes a function after this request. This is useful to modify
+ response objects. The function is passed the response object and has
+ to return the same or a new one.
+
+ Example::
+
+ @app.route('/')
+ def index():
+ @after_this_request
+ def add_header(response):
+ response.headers['X-Foo'] = 'Parachute'
+ return response
+ return 'Hello World!'
+
+ This is more useful if a function other than the view function wants to
+ modify a response. For instance think of a decorator that wants to add
+ some headers without converting the return value into a response object.
+
+ .. versionadded:: 0.9
+ """
+ ctx = _cv_request.get(None)
+
+ if ctx is None:
+ raise RuntimeError(
+ "'after_this_request' can only be used when a request"
+ " context is active, such as in a view function."
+ )
+
+ ctx._after_request_functions.append(f)
+ return f
+
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+
+def copy_current_request_context(f: F) -> F:
+ """A helper function that decorates a function to retain the current
+ request context. This is useful when working with greenlets. The moment
+ the function is decorated a copy of the request context is created and
+ then pushed when the function is called. The current session is also
+ included in the copied request context.
+
+ Example::
+
+ import gevent
+ from flask import copy_current_request_context
+
+ @app.route('/')
+ def index():
+ @copy_current_request_context
+ def do_some_work():
+ # do some work here, it can access flask.request or
+ # flask.session like you would otherwise in the view function.
+ ...
+ gevent.spawn(do_some_work)
+ return 'Regular response'
+
+ .. versionadded:: 0.10
+ """
+ ctx = _cv_request.get(None)
+
+ if ctx is None:
+ raise RuntimeError(
+ "'copy_current_request_context' can only be used when a"
+ " request context is active, such as in a view function."
+ )
+
+ ctx = ctx.copy()
+
+ def wrapper(*args: t.Any, **kwargs: t.Any) -> t.Any:
+ with ctx:
+ return ctx.app.ensure_sync(f)(*args, **kwargs)
+
+ return update_wrapper(wrapper, f) # type: ignore[return-value]
+
+
+def has_request_context() -> bool:
+ """If you have code that wants to test if a request context is there or
+ not this function can be used. For instance, you may want to take advantage
+ of request information if the request object is available, but fail
+ silently if it is unavailable.
+
+ ::
+
+ class User(db.Model):
+
+ def __init__(self, username, remote_addr=None):
+ self.username = username
+ if remote_addr is None and has_request_context():
+ remote_addr = request.remote_addr
+ self.remote_addr = remote_addr
+
+ Alternatively you can also just test any of the context bound objects
+ (such as :class:`request` or :class:`g`) for truthness::
+
+ class User(db.Model):
+
+ def __init__(self, username, remote_addr=None):
+ self.username = username
+ if remote_addr is None and request:
+ remote_addr = request.remote_addr
+ self.remote_addr = remote_addr
+
+ .. versionadded:: 0.7
+ """
+ return _cv_request.get(None) is not None
+
+
+def has_app_context() -> bool:
+ """Works like :func:`has_request_context` but for the application
+ context. You can also just do a boolean check on the
+ :data:`current_app` object instead.
+
+ .. versionadded:: 0.9
+ """
+ return _cv_app.get(None) is not None
+
+
+class AppContext:
+ """The app context contains application-specific information. An app
+ context is created and pushed at the beginning of each request if
+ one is not already active. An app context is also pushed when
+ running CLI commands.
+ """
+
+ def __init__(self, app: Flask) -> None:
+ self.app = app
+ self.url_adapter = app.create_url_adapter(None)
+ self.g: _AppCtxGlobals = app.app_ctx_globals_class()
+ self._cv_tokens: list[contextvars.Token[AppContext]] = []
+
+ def push(self) -> None:
+ """Binds the app context to the current context."""
+ self._cv_tokens.append(_cv_app.set(self))
+ appcontext_pushed.send(self.app, _async_wrapper=self.app.ensure_sync)
+
+ def pop(self, exc: BaseException | None = _sentinel) -> None: # type: ignore
+ """Pops the app context."""
+ try:
+ if len(self._cv_tokens) == 1:
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ self.app.do_teardown_appcontext(exc)
+ finally:
+ ctx = _cv_app.get()
+ _cv_app.reset(self._cv_tokens.pop())
+
+ if ctx is not self:
+ raise AssertionError(
+ f"Popped wrong app context. ({ctx!r} instead of {self!r})"
+ )
+
+ appcontext_popped.send(self.app, _async_wrapper=self.app.ensure_sync)
+
+ def __enter__(self) -> AppContext:
+ self.push()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ self.pop(exc_value)
+
+
+class RequestContext:
+ """The request context contains per-request information. The Flask
+ app creates and pushes it at the beginning of the request, then pops
+ it at the end of the request. It will create the URL adapter and
+ request object for the WSGI environment provided.
+
+ Do not attempt to use this class directly, instead use
+ :meth:`~flask.Flask.test_request_context` and
+ :meth:`~flask.Flask.request_context` to create this object.
+
+ When the request context is popped, it will evaluate all the
+ functions registered on the application for teardown execution
+ (:meth:`~flask.Flask.teardown_request`).
+
+ The request context is automatically popped at the end of the
+ request. When using the interactive debugger, the context will be
+ restored so ``request`` is still accessible. Similarly, the test
+ client can preserve the context after the request ends. However,
+ teardown functions may already have closed some resources such as
+ database connections.
+ """
+
+ def __init__(
+ self,
+ app: Flask,
+ environ: WSGIEnvironment,
+ request: Request | None = None,
+ session: SessionMixin | None = None,
+ ) -> None:
+ self.app = app
+ if request is None:
+ request = app.request_class(environ)
+ request.json_module = app.json
+ self.request: Request = request
+ self.url_adapter = None
+ try:
+ self.url_adapter = app.create_url_adapter(self.request)
+ except HTTPException as e:
+ self.request.routing_exception = e
+ self.flashes: list[tuple[str, str]] | None = None
+ self.session: SessionMixin | None = session
+ # Functions that should be executed after the request on the response
+ # object. These will be called before the regular "after_request"
+ # functions.
+ self._after_request_functions: list[ft.AfterRequestCallable[t.Any]] = []
+
+ self._cv_tokens: list[
+ tuple[contextvars.Token[RequestContext], AppContext | None]
+ ] = []
+
+ def copy(self) -> RequestContext:
+ """Creates a copy of this request context with the same request object.
+ This can be used to move a request context to a different greenlet.
+ Because the actual request object is the same this cannot be used to
+ move a request context to a different thread unless access to the
+ request object is locked.
+
+ .. versionadded:: 0.10
+
+ .. versionchanged:: 1.1
+ The current session object is used instead of reloading the original
+ data. This prevents `flask.session` pointing to an out-of-date object.
+ """
+ return self.__class__(
+ self.app,
+ environ=self.request.environ,
+ request=self.request,
+ session=self.session,
+ )
+
+ def match_request(self) -> None:
+ """Can be overridden by a subclass to hook into the matching
+ of the request.
+ """
+ try:
+ result = self.url_adapter.match(return_rule=True) # type: ignore
+ self.request.url_rule, self.request.view_args = result # type: ignore
+ except HTTPException as e:
+ self.request.routing_exception = e
+
+ def push(self) -> None:
+ # Before we push the request context we have to ensure that there
+ # is an application context.
+ app_ctx = _cv_app.get(None)
+
+ if app_ctx is None or app_ctx.app is not self.app:
+ app_ctx = self.app.app_context()
+ app_ctx.push()
+ else:
+ app_ctx = None
+
+ self._cv_tokens.append((_cv_request.set(self), app_ctx))
+
+ # Open the session at the moment that the request context is available.
+ # This allows a custom open_session method to use the request context.
+ # Only open a new session if this is the first time the request was
+ # pushed, otherwise stream_with_context loses the session.
+ if self.session is None:
+ session_interface = self.app.session_interface
+ self.session = session_interface.open_session(self.app, self.request)
+
+ if self.session is None:
+ self.session = session_interface.make_null_session(self.app)
+
+ # Match the request URL after loading the session, so that the
+ # session is available in custom URL converters.
+ if self.url_adapter is not None:
+ self.match_request()
+
+ def pop(self, exc: BaseException | None = _sentinel) -> None: # type: ignore
+ """Pops the request context and unbinds it by doing that. This will
+ also trigger the execution of functions registered by the
+ :meth:`~flask.Flask.teardown_request` decorator.
+
+ .. versionchanged:: 0.9
+ Added the `exc` argument.
+ """
+ clear_request = len(self._cv_tokens) == 1
+
+ try:
+ if clear_request:
+ if exc is _sentinel:
+ exc = sys.exc_info()[1]
+ self.app.do_teardown_request(exc)
+
+ request_close = getattr(self.request, "close", None)
+ if request_close is not None:
+ request_close()
+ finally:
+ ctx = _cv_request.get()
+ token, app_ctx = self._cv_tokens.pop()
+ _cv_request.reset(token)
+
+ # get rid of circular dependencies at the end of the request
+ # so that we don't require the GC to be active.
+ if clear_request:
+ ctx.request.environ["werkzeug.request"] = None
+
+ if app_ctx is not None:
+ app_ctx.pop(exc)
+
+ if ctx is not self:
+ raise AssertionError(
+ f"Popped wrong request context. ({ctx!r} instead of {self!r})"
+ )
+
+ def __enter__(self) -> RequestContext:
+ self.push()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ self.pop(exc_value)
+
+ def __repr__(self) -> str:
+ return (
+ f"<{type(self).__name__} {self.request.url!r}"
+ f" [{self.request.method}] of {self.app.name}>"
+ )
diff --git a/tapdown/lib/python3.11/site-packages/flask/debughelpers.py b/tapdown/lib/python3.11/site-packages/flask/debughelpers.py
new file mode 100644
index 0000000..2c8c4c4
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/debughelpers.py
@@ -0,0 +1,178 @@
+from __future__ import annotations
+
+import typing as t
+
+from jinja2.loaders import BaseLoader
+from werkzeug.routing import RequestRedirect
+
+from .blueprints import Blueprint
+from .globals import request_ctx
+from .sansio.app import App
+
+if t.TYPE_CHECKING:
+ from .sansio.scaffold import Scaffold
+ from .wrappers import Request
+
+
+class UnexpectedUnicodeError(AssertionError, UnicodeError):
+ """Raised in places where we want some better error reporting for
+ unexpected unicode or binary data.
+ """
+
+
+class DebugFilesKeyError(KeyError, AssertionError):
+ """Raised from request.files during debugging. The idea is that it can
+ provide a better error message than just a generic KeyError/BadRequest.
+ """
+
+ def __init__(self, request: Request, key: str) -> None:
+ form_matches = request.form.getlist(key)
+ buf = [
+ f"You tried to access the file {key!r} in the request.files"
+ " dictionary but it does not exist. The mimetype for the"
+ f" request is {request.mimetype!r} instead of"
+ " 'multipart/form-data' which means that no file contents"
+ " were transmitted. To fix this error you should provide"
+ ' enctype="multipart/form-data" in your form.'
+ ]
+ if form_matches:
+ names = ", ".join(repr(x) for x in form_matches)
+ buf.append(
+ "\n\nThe browser instead transmitted some file names. "
+ f"This was submitted: {names}"
+ )
+ self.msg = "".join(buf)
+
+ def __str__(self) -> str:
+ return self.msg
+
+
+class FormDataRoutingRedirect(AssertionError):
+ """This exception is raised in debug mode if a routing redirect
+ would cause the browser to drop the method or body. This happens
+ when method is not GET, HEAD or OPTIONS and the status code is not
+ 307 or 308.
+ """
+
+ def __init__(self, request: Request) -> None:
+ exc = request.routing_exception
+ assert isinstance(exc, RequestRedirect)
+ buf = [
+ f"A request was sent to '{request.url}', but routing issued"
+ f" a redirect to the canonical URL '{exc.new_url}'."
+ ]
+
+ if f"{request.base_url}/" == exc.new_url.partition("?")[0]:
+ buf.append(
+ " The URL was defined with a trailing slash. Flask"
+ " will redirect to the URL with a trailing slash if it"
+ " was accessed without one."
+ )
+
+ buf.append(
+ " Send requests to the canonical URL, or use 307 or 308 for"
+ " routing redirects. Otherwise, browsers will drop form"
+ " data.\n\n"
+ "This exception is only raised in debug mode."
+ )
+ super().__init__("".join(buf))
+
+
+def attach_enctype_error_multidict(request: Request) -> None:
+ """Patch ``request.files.__getitem__`` to raise a descriptive error
+ about ``enctype=multipart/form-data``.
+
+ :param request: The request to patch.
+ :meta private:
+ """
+ oldcls = request.files.__class__
+
+ class newcls(oldcls): # type: ignore[valid-type, misc]
+ def __getitem__(self, key: str) -> t.Any:
+ try:
+ return super().__getitem__(key)
+ except KeyError as e:
+ if key not in request.form:
+ raise
+
+ raise DebugFilesKeyError(request, key).with_traceback(
+ e.__traceback__
+ ) from None
+
+ newcls.__name__ = oldcls.__name__
+ newcls.__module__ = oldcls.__module__
+ request.files.__class__ = newcls
+
+
+def _dump_loader_info(loader: BaseLoader) -> t.Iterator[str]:
+ yield f"class: {type(loader).__module__}.{type(loader).__name__}"
+ for key, value in sorted(loader.__dict__.items()):
+ if key.startswith("_"):
+ continue
+ if isinstance(value, (tuple, list)):
+ if not all(isinstance(x, str) for x in value):
+ continue
+ yield f"{key}:"
+ for item in value:
+ yield f" - {item}"
+ continue
+ elif not isinstance(value, (str, int, float, bool)):
+ continue
+ yield f"{key}: {value!r}"
+
+
+def explain_template_loading_attempts(
+ app: App,
+ template: str,
+ attempts: list[
+ tuple[
+ BaseLoader,
+ Scaffold,
+ tuple[str, str | None, t.Callable[[], bool] | None] | None,
+ ]
+ ],
+) -> None:
+ """This should help developers understand what failed"""
+ info = [f"Locating template {template!r}:"]
+ total_found = 0
+ blueprint = None
+ if request_ctx and request_ctx.request.blueprint is not None:
+ blueprint = request_ctx.request.blueprint
+
+ for idx, (loader, srcobj, triple) in enumerate(attempts):
+ if isinstance(srcobj, App):
+ src_info = f"application {srcobj.import_name!r}"
+ elif isinstance(srcobj, Blueprint):
+ src_info = f"blueprint {srcobj.name!r} ({srcobj.import_name})"
+ else:
+ src_info = repr(srcobj)
+
+ info.append(f"{idx + 1:5}: trying loader of {src_info}")
+
+ for line in _dump_loader_info(loader):
+ info.append(f" {line}")
+
+ if triple is None:
+ detail = "no match"
+ else:
+ detail = f"found ({triple[1] or ''!r})"
+ total_found += 1
+ info.append(f" -> {detail}")
+
+ seems_fishy = False
+ if total_found == 0:
+ info.append("Error: the template could not be found.")
+ seems_fishy = True
+ elif total_found > 1:
+ info.append("Warning: multiple loaders returned a match for the template.")
+ seems_fishy = True
+
+ if blueprint is not None and seems_fishy:
+ info.append(
+ " The template was looked up from an endpoint that belongs"
+ f" to the blueprint {blueprint!r}."
+ )
+ info.append(" Maybe you did not place a template in the right folder?")
+ info.append(" See https://flask.palletsprojects.com/blueprints/#templates")
+
+ app.logger.info("\n".join(info))
diff --git a/tapdown/lib/python3.11/site-packages/flask/globals.py b/tapdown/lib/python3.11/site-packages/flask/globals.py
new file mode 100644
index 0000000..e2c410c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/globals.py
@@ -0,0 +1,51 @@
+from __future__ import annotations
+
+import typing as t
+from contextvars import ContextVar
+
+from werkzeug.local import LocalProxy
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from .app import Flask
+ from .ctx import _AppCtxGlobals
+ from .ctx import AppContext
+ from .ctx import RequestContext
+ from .sessions import SessionMixin
+ from .wrappers import Request
+
+
+_no_app_msg = """\
+Working outside of application context.
+
+This typically means that you attempted to use functionality that needed
+the current application. To solve this, set up an application context
+with app.app_context(). See the documentation for more information.\
+"""
+_cv_app: ContextVar[AppContext] = ContextVar("flask.app_ctx")
+app_ctx: AppContext = LocalProxy( # type: ignore[assignment]
+ _cv_app, unbound_message=_no_app_msg
+)
+current_app: Flask = LocalProxy( # type: ignore[assignment]
+ _cv_app, "app", unbound_message=_no_app_msg
+)
+g: _AppCtxGlobals = LocalProxy( # type: ignore[assignment]
+ _cv_app, "g", unbound_message=_no_app_msg
+)
+
+_no_req_msg = """\
+Working outside of request context.
+
+This typically means that you attempted to use functionality that needed
+an active HTTP request. Consult the documentation on testing for
+information about how to avoid this problem.\
+"""
+_cv_request: ContextVar[RequestContext] = ContextVar("flask.request_ctx")
+request_ctx: RequestContext = LocalProxy( # type: ignore[assignment]
+ _cv_request, unbound_message=_no_req_msg
+)
+request: Request = LocalProxy( # type: ignore[assignment]
+ _cv_request, "request", unbound_message=_no_req_msg
+)
+session: SessionMixin = LocalProxy( # type: ignore[assignment]
+ _cv_request, "session", unbound_message=_no_req_msg
+)
diff --git a/tapdown/lib/python3.11/site-packages/flask/helpers.py b/tapdown/lib/python3.11/site-packages/flask/helpers.py
new file mode 100644
index 0000000..5d412c9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/helpers.py
@@ -0,0 +1,641 @@
+from __future__ import annotations
+
+import importlib.util
+import os
+import sys
+import typing as t
+from datetime import datetime
+from functools import cache
+from functools import update_wrapper
+
+import werkzeug.utils
+from werkzeug.exceptions import abort as _wz_abort
+from werkzeug.utils import redirect as _wz_redirect
+from werkzeug.wrappers import Response as BaseResponse
+
+from .globals import _cv_app
+from .globals import _cv_request
+from .globals import current_app
+from .globals import request
+from .globals import request_ctx
+from .globals import session
+from .signals import message_flashed
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from .wrappers import Response
+
+
+def get_debug_flag() -> bool:
+ """Get whether debug mode should be enabled for the app, indicated by the
+ :envvar:`FLASK_DEBUG` environment variable. The default is ``False``.
+ """
+ val = os.environ.get("FLASK_DEBUG")
+ return bool(val and val.lower() not in {"0", "false", "no"})
+
+
+def get_load_dotenv(default: bool = True) -> bool:
+ """Get whether the user has disabled loading default dotenv files by
+ setting :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load
+ the files.
+
+ :param default: What to return if the env var isn't set.
+ """
+ val = os.environ.get("FLASK_SKIP_DOTENV")
+
+ if not val:
+ return default
+
+ return val.lower() in ("0", "false", "no")
+
+
+@t.overload
+def stream_with_context(
+ generator_or_function: t.Iterator[t.AnyStr],
+) -> t.Iterator[t.AnyStr]: ...
+
+
+@t.overload
+def stream_with_context(
+ generator_or_function: t.Callable[..., t.Iterator[t.AnyStr]],
+) -> t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]: ...
+
+
+def stream_with_context(
+ generator_or_function: t.Iterator[t.AnyStr] | t.Callable[..., t.Iterator[t.AnyStr]],
+) -> t.Iterator[t.AnyStr] | t.Callable[[t.Iterator[t.AnyStr]], t.Iterator[t.AnyStr]]:
+ """Wrap a response generator function so that it runs inside the current
+ request context. This keeps :data:`request`, :data:`session`, and :data:`g`
+ available, even though at the point the generator runs the request context
+ will typically have ended.
+
+ Use it as a decorator on a generator function:
+
+ .. code-block:: python
+
+ from flask import stream_with_context, request, Response
+
+ @app.get("/stream")
+ def streamed_response():
+ @stream_with_context
+ def generate():
+ yield "Hello "
+ yield request.args["name"]
+ yield "!"
+
+ return Response(generate())
+
+ Or use it as a wrapper around a created generator:
+
+ .. code-block:: python
+
+ from flask import stream_with_context, request, Response
+
+ @app.get("/stream")
+ def streamed_response():
+ def generate():
+ yield "Hello "
+ yield request.args["name"]
+ yield "!"
+
+ return Response(stream_with_context(generate()))
+
+ .. versionadded:: 0.9
+ """
+ try:
+ gen = iter(generator_or_function) # type: ignore[arg-type]
+ except TypeError:
+
+ def decorator(*args: t.Any, **kwargs: t.Any) -> t.Any:
+ gen = generator_or_function(*args, **kwargs) # type: ignore[operator]
+ return stream_with_context(gen)
+
+ return update_wrapper(decorator, generator_or_function) # type: ignore[arg-type]
+
+ def generator() -> t.Iterator[t.AnyStr]:
+ if (req_ctx := _cv_request.get(None)) is None:
+ raise RuntimeError(
+ "'stream_with_context' can only be used when a request"
+ " context is active, such as in a view function."
+ )
+
+ app_ctx = _cv_app.get()
+ # Setup code below will run the generator to this point, so that the
+ # current contexts are recorded. The contexts must be pushed after,
+ # otherwise their ContextVar will record the wrong event loop during
+ # async view functions.
+ yield None # type: ignore[misc]
+
+ # Push the app context first, so that the request context does not
+ # automatically create and push a different app context.
+ with app_ctx, req_ctx:
+ try:
+ yield from gen
+ finally:
+ # Clean up in case the user wrapped a WSGI iterator.
+ if hasattr(gen, "close"):
+ gen.close()
+
+ # Execute the generator to the sentinel value. This ensures the context is
+ # preserved in the generator's state. Further iteration will push the
+ # context and yield from the original iterator.
+ wrapped_g = generator()
+ next(wrapped_g)
+ return wrapped_g
+
+
+def make_response(*args: t.Any) -> Response:
+ """Sometimes it is necessary to set additional headers in a view. Because
+ views do not have to return response objects but can return a value that
+ is converted into a response object by Flask itself, it becomes tricky to
+ add headers to it. This function can be called instead of using a return
+ and you will get a response object which you can use to attach headers.
+
+ If view looked like this and you want to add a new header::
+
+ def index():
+ return render_template('index.html', foo=42)
+
+ You can now do something like this::
+
+ def index():
+ response = make_response(render_template('index.html', foo=42))
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+ return response
+
+ This function accepts the very same arguments you can return from a
+ view function. This for example creates a response with a 404 error
+ code::
+
+ response = make_response(render_template('not_found.html'), 404)
+
+ The other use case of this function is to force the return value of a
+ view function into a response which is helpful with view
+ decorators::
+
+ response = make_response(view_function())
+ response.headers['X-Parachutes'] = 'parachutes are cool'
+
+ Internally this function does the following things:
+
+ - if no arguments are passed, it creates a new response argument
+ - if one argument is passed, :meth:`flask.Flask.make_response`
+ is invoked with it.
+ - if more than one argument is passed, the arguments are passed
+ to the :meth:`flask.Flask.make_response` function as tuple.
+
+ .. versionadded:: 0.6
+ """
+ if not args:
+ return current_app.response_class()
+ if len(args) == 1:
+ args = args[0]
+ return current_app.make_response(args)
+
+
+def url_for(
+ endpoint: str,
+ *,
+ _anchor: str | None = None,
+ _method: str | None = None,
+ _scheme: str | None = None,
+ _external: bool | None = None,
+ **values: t.Any,
+) -> str:
+ """Generate a URL to the given endpoint with the given values.
+
+ This requires an active request or application context, and calls
+ :meth:`current_app.url_for() `. See that method
+ for full documentation.
+
+ :param endpoint: The endpoint name associated with the URL to
+ generate. If this starts with a ``.``, the current blueprint
+ name (if any) will be used.
+ :param _anchor: If given, append this as ``#anchor`` to the URL.
+ :param _method: If given, generate the URL associated with this
+ method for the endpoint.
+ :param _scheme: If given, the URL will have this scheme if it is
+ external.
+ :param _external: If given, prefer the URL to be internal (False) or
+ require it to be external (True). External URLs include the
+ scheme and domain. When not in an active request, URLs are
+ external by default.
+ :param values: Values to use for the variable parts of the URL rule.
+ Unknown keys are appended as query string arguments, like
+ ``?a=b&c=d``.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.url_for``, allowing an app to override the
+ behavior.
+
+ .. versionchanged:: 0.10
+ The ``_scheme`` parameter was added.
+
+ .. versionchanged:: 0.9
+ The ``_anchor`` and ``_method`` parameters were added.
+
+ .. versionchanged:: 0.9
+ Calls ``app.handle_url_build_error`` on build errors.
+ """
+ return current_app.url_for(
+ endpoint,
+ _anchor=_anchor,
+ _method=_method,
+ _scheme=_scheme,
+ _external=_external,
+ **values,
+ )
+
+
+def redirect(
+ location: str, code: int = 302, Response: type[BaseResponse] | None = None
+) -> BaseResponse:
+ """Create a redirect response object.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`~flask.Flask.redirect` method, otherwise it will use
+ :func:`werkzeug.utils.redirect`.
+
+ :param location: The URL to redirect to.
+ :param code: The status code for the redirect.
+ :param Response: The response class to use. Not used when
+ ``current_app`` is active, which uses ``app.response_class``.
+
+ .. versionadded:: 2.2
+ Calls ``current_app.redirect`` if available instead of always
+ using Werkzeug's default ``redirect``.
+ """
+ if current_app:
+ return current_app.redirect(location, code=code)
+
+ return _wz_redirect(location, code=code, Response=Response)
+
+
+def abort(code: int | BaseResponse, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
+ """Raise an :exc:`~werkzeug.exceptions.HTTPException` for the given
+ status code.
+
+ If :data:`~flask.current_app` is available, it will call its
+ :attr:`~flask.Flask.aborter` object, otherwise it will use
+ :func:`werkzeug.exceptions.abort`.
+
+ :param code: The status code for the exception, which must be
+ registered in ``app.aborter``.
+ :param args: Passed to the exception.
+ :param kwargs: Passed to the exception.
+
+ .. versionadded:: 2.2
+ Calls ``current_app.aborter`` if available instead of always
+ using Werkzeug's default ``abort``.
+ """
+ if current_app:
+ current_app.aborter(code, *args, **kwargs)
+
+ _wz_abort(code, *args, **kwargs)
+
+
+def get_template_attribute(template_name: str, attribute: str) -> t.Any:
+ """Loads a macro (or variable) a template exports. This can be used to
+ invoke a macro from within Python code. If you for example have a
+ template named :file:`_cider.html` with the following contents:
+
+ .. sourcecode:: html+jinja
+
+ {% macro hello(name) %}Hello {{ name }}!{% endmacro %}
+
+ You can access this from Python code like this::
+
+ hello = get_template_attribute('_cider.html', 'hello')
+ return hello('World')
+
+ .. versionadded:: 0.2
+
+ :param template_name: the name of the template
+ :param attribute: the name of the variable of macro to access
+ """
+ return getattr(current_app.jinja_env.get_template(template_name).module, attribute)
+
+
+def flash(message: str, category: str = "message") -> None:
+ """Flashes a message to the next request. In order to remove the
+ flashed message from the session and to display it to the user,
+ the template has to call :func:`get_flashed_messages`.
+
+ .. versionchanged:: 0.3
+ `category` parameter added.
+
+ :param message: the message to be flashed.
+ :param category: the category for the message. The following values
+ are recommended: ``'message'`` for any kind of message,
+ ``'error'`` for errors, ``'info'`` for information
+ messages and ``'warning'`` for warnings. However any
+ kind of string can be used as category.
+ """
+ # Original implementation:
+ #
+ # session.setdefault('_flashes', []).append((category, message))
+ #
+ # This assumed that changes made to mutable structures in the session are
+ # always in sync with the session object, which is not true for session
+ # implementations that use external storage for keeping their keys/values.
+ flashes = session.get("_flashes", [])
+ flashes.append((category, message))
+ session["_flashes"] = flashes
+ app = current_app._get_current_object() # type: ignore
+ message_flashed.send(
+ app,
+ _async_wrapper=app.ensure_sync,
+ message=message,
+ category=category,
+ )
+
+
+def get_flashed_messages(
+ with_categories: bool = False, category_filter: t.Iterable[str] = ()
+) -> list[str] | list[tuple[str, str]]:
+ """Pulls all flashed messages from the session and returns them.
+ Further calls in the same request to the function will return
+ the same messages. By default just the messages are returned,
+ but when `with_categories` is set to ``True``, the return value will
+ be a list of tuples in the form ``(category, message)`` instead.
+
+ Filter the flashed messages to one or more categories by providing those
+ categories in `category_filter`. This allows rendering categories in
+ separate html blocks. The `with_categories` and `category_filter`
+ arguments are distinct:
+
+ * `with_categories` controls whether categories are returned with message
+ text (``True`` gives a tuple, where ``False`` gives just the message text).
+ * `category_filter` filters the messages down to only those matching the
+ provided categories.
+
+ See :doc:`/patterns/flashing` for examples.
+
+ .. versionchanged:: 0.3
+ `with_categories` parameter added.
+
+ .. versionchanged:: 0.9
+ `category_filter` parameter added.
+
+ :param with_categories: set to ``True`` to also receive categories.
+ :param category_filter: filter of categories to limit return values. Only
+ categories in the list will be returned.
+ """
+ flashes = request_ctx.flashes
+ if flashes is None:
+ flashes = session.pop("_flashes") if "_flashes" in session else []
+ request_ctx.flashes = flashes
+ if category_filter:
+ flashes = list(filter(lambda f: f[0] in category_filter, flashes))
+ if not with_categories:
+ return [x[1] for x in flashes]
+ return flashes
+
+
+def _prepare_send_file_kwargs(**kwargs: t.Any) -> dict[str, t.Any]:
+ if kwargs.get("max_age") is None:
+ kwargs["max_age"] = current_app.get_send_file_max_age
+
+ kwargs.update(
+ environ=request.environ,
+ use_x_sendfile=current_app.config["USE_X_SENDFILE"],
+ response_class=current_app.response_class,
+ _root_path=current_app.root_path,
+ )
+ return kwargs
+
+
+def send_file(
+ path_or_file: os.PathLike[t.AnyStr] | str | t.IO[bytes],
+ mimetype: str | None = None,
+ as_attachment: bool = False,
+ download_name: str | None = None,
+ conditional: bool = True,
+ etag: bool | str = True,
+ last_modified: datetime | int | float | None = None,
+ max_age: None | (int | t.Callable[[str | None], int | None]) = None,
+) -> Response:
+ """Send the contents of a file to the client.
+
+ The first argument can be a file path or a file-like object. Paths
+ are preferred in most cases because Werkzeug can manage the file and
+ get extra information from the path. Passing a file-like object
+ requires that the file is opened in binary mode, and is mostly
+ useful when building a file in memory with :class:`io.BytesIO`.
+
+ Never pass file paths provided by a user. The path is assumed to be
+ trusted, so a user could craft a path to access a file you didn't
+ intend. Use :func:`send_from_directory` to safely serve
+ user-requested paths from within a directory.
+
+ If the WSGI server sets a ``file_wrapper`` in ``environ``, it is
+ used, otherwise Werkzeug's built-in wrapper is used. Alternatively,
+ if the HTTP server supports ``X-Sendfile``, configuring Flask with
+ ``USE_X_SENDFILE = True`` will tell the server to send the given
+ path, which is much more efficient than reading it in Python.
+
+ :param path_or_file: The path to the file to send, relative to the
+ current working directory if a relative path is given.
+ Alternatively, a file-like object opened in binary mode. Make
+ sure the file pointer is seeked to the start of the data.
+ :param mimetype: The MIME type to send for the file. If not
+ provided, it will try to detect it from the file name.
+ :param as_attachment: Indicate to a browser that it should offer to
+ save the file instead of displaying it.
+ :param download_name: The default name browsers will use when saving
+ the file. Defaults to the passed file name.
+ :param conditional: Enable conditional and range responses based on
+ request headers. Requires passing a file path and ``environ``.
+ :param etag: Calculate an ETag for the file, which requires passing
+ a file path. Can also be a string to use instead.
+ :param last_modified: The last modified time to send for the file,
+ in seconds. If not provided, it will try to detect it from the
+ file path.
+ :param max_age: How long the client should cache the file, in
+ seconds. If set, ``Cache-Control`` will be ``public``, otherwise
+ it will be ``no-cache`` to prefer conditional caching.
+
+ .. versionchanged:: 2.0
+ ``download_name`` replaces the ``attachment_filename``
+ parameter. If ``as_attachment=False``, it is passed with
+ ``Content-Disposition: inline`` instead.
+
+ .. versionchanged:: 2.0
+ ``max_age`` replaces the ``cache_timeout`` parameter.
+ ``conditional`` is enabled and ``max_age`` is not set by
+ default.
+
+ .. versionchanged:: 2.0
+ ``etag`` replaces the ``add_etags`` parameter. It can be a
+ string to use instead of generating one.
+
+ .. versionchanged:: 2.0
+ Passing a file-like object that inherits from
+ :class:`~io.TextIOBase` will raise a :exc:`ValueError` rather
+ than sending an empty file.
+
+ .. versionadded:: 2.0
+ Moved the implementation to Werkzeug. This is now a wrapper to
+ pass some Flask-specific arguments.
+
+ .. versionchanged:: 1.1
+ ``filename`` may be a :class:`~os.PathLike` object.
+
+ .. versionchanged:: 1.1
+ Passing a :class:`~io.BytesIO` object supports range requests.
+
+ .. versionchanged:: 1.0.3
+ Filenames are encoded with ASCII instead of Latin-1 for broader
+ compatibility with WSGI servers.
+
+ .. versionchanged:: 1.0
+ UTF-8 filenames as specified in :rfc:`2231` are supported.
+
+ .. versionchanged:: 0.12
+ The filename is no longer automatically inferred from file
+ objects. If you want to use automatic MIME and etag support,
+ pass a filename via ``filename_or_fp`` or
+ ``attachment_filename``.
+
+ .. versionchanged:: 0.12
+ ``attachment_filename`` is preferred over ``filename`` for MIME
+ detection.
+
+ .. versionchanged:: 0.9
+ ``cache_timeout`` defaults to
+ :meth:`Flask.get_send_file_max_age`.
+
+ .. versionchanged:: 0.7
+ MIME guessing and etag support for file-like objects was
+ removed because it was unreliable. Pass a filename if you are
+ able to, otherwise attach an etag yourself.
+
+ .. versionchanged:: 0.5
+ The ``add_etags``, ``cache_timeout`` and ``conditional``
+ parameters were added. The default behavior is to add etags.
+
+ .. versionadded:: 0.2
+ """
+ return werkzeug.utils.send_file( # type: ignore[return-value]
+ **_prepare_send_file_kwargs(
+ path_or_file=path_or_file,
+ environ=request.environ,
+ mimetype=mimetype,
+ as_attachment=as_attachment,
+ download_name=download_name,
+ conditional=conditional,
+ etag=etag,
+ last_modified=last_modified,
+ max_age=max_age,
+ )
+ )
+
+
+def send_from_directory(
+ directory: os.PathLike[str] | str,
+ path: os.PathLike[str] | str,
+ **kwargs: t.Any,
+) -> Response:
+ """Send a file from within a directory using :func:`send_file`.
+
+ .. code-block:: python
+
+ @app.route("/uploads/")
+ def download_file(name):
+ return send_from_directory(
+ app.config['UPLOAD_FOLDER'], name, as_attachment=True
+ )
+
+ This is a secure way to serve files from a folder, such as static
+ files or uploads. Uses :func:`~werkzeug.security.safe_join` to
+ ensure the path coming from the client is not maliciously crafted to
+ point outside the specified directory.
+
+ If the final path does not point to an existing regular file,
+ raises a 404 :exc:`~werkzeug.exceptions.NotFound` error.
+
+ :param directory: The directory that ``path`` must be located under,
+ relative to the current application's root path. This *must not*
+ be a value provided by the client, otherwise it becomes insecure.
+ :param path: The path to the file to send, relative to
+ ``directory``.
+ :param kwargs: Arguments to pass to :func:`send_file`.
+
+ .. versionchanged:: 2.0
+ ``path`` replaces the ``filename`` parameter.
+
+ .. versionadded:: 2.0
+ Moved the implementation to Werkzeug. This is now a wrapper to
+ pass some Flask-specific arguments.
+
+ .. versionadded:: 0.5
+ """
+ return werkzeug.utils.send_from_directory( # type: ignore[return-value]
+ directory, path, **_prepare_send_file_kwargs(**kwargs)
+ )
+
+
+def get_root_path(import_name: str) -> str:
+ """Find the root path of a package, or the path that contains a
+ module. If it cannot be found, returns the current working
+ directory.
+
+ Not to be confused with the value returned by :func:`find_package`.
+
+ :meta private:
+ """
+ # Module already imported and has a file attribute. Use that first.
+ mod = sys.modules.get(import_name)
+
+ if mod is not None and hasattr(mod, "__file__") and mod.__file__ is not None:
+ return os.path.dirname(os.path.abspath(mod.__file__))
+
+ # Next attempt: check the loader.
+ try:
+ spec = importlib.util.find_spec(import_name)
+
+ if spec is None:
+ raise ValueError
+ except (ImportError, ValueError):
+ loader = None
+ else:
+ loader = spec.loader
+
+ # Loader does not exist or we're referring to an unloaded main
+ # module or a main module without path (interactive sessions), go
+ # with the current working directory.
+ if loader is None:
+ return os.getcwd()
+
+ if hasattr(loader, "get_filename"):
+ filepath = loader.get_filename(import_name) # pyright: ignore
+ else:
+ # Fall back to imports.
+ __import__(import_name)
+ mod = sys.modules[import_name]
+ filepath = getattr(mod, "__file__", None)
+
+ # If we don't have a file path it might be because it is a
+ # namespace package. In this case pick the root path from the
+ # first module that is contained in the package.
+ if filepath is None:
+ raise RuntimeError(
+ "No root path can be found for the provided module"
+ f" {import_name!r}. This can happen because the module"
+ " came from an import hook that does not provide file"
+ " name information or because it's a namespace package."
+ " In this case the root path needs to be explicitly"
+ " provided."
+ )
+
+ # filepath is import_name.py for a module, or __init__.py for a package.
+ return os.path.dirname(os.path.abspath(filepath)) # type: ignore[no-any-return]
+
+
+@cache
+def _split_blueprint_path(name: str) -> list[str]:
+ out: list[str] = [name]
+
+ if "." in name:
+ out.extend(_split_blueprint_path(name.rpartition(".")[0]))
+
+ return out
diff --git a/tapdown/lib/python3.11/site-packages/flask/json/__init__.py b/tapdown/lib/python3.11/site-packages/flask/json/__init__.py
new file mode 100644
index 0000000..c0941d0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/json/__init__.py
@@ -0,0 +1,170 @@
+from __future__ import annotations
+
+import json as _json
+import typing as t
+
+from ..globals import current_app
+from .provider import _default
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from ..wrappers import Response
+
+
+def dumps(obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize data as JSON.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.dumps() `
+ method, otherwise it will use :func:`json.dumps`.
+
+ :param obj: The data to serialize.
+ :param kwargs: Arguments passed to the ``dumps`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.dumps``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0.2
+ :class:`decimal.Decimal` is supported by converting to a string.
+
+ .. versionchanged:: 2.0
+ ``encoding`` will be removed in Flask 2.1.
+
+ .. versionchanged:: 1.0.3
+ ``app`` can be passed directly, rather than requiring an app
+ context for configuration.
+ """
+ if current_app:
+ return current_app.json.dumps(obj, **kwargs)
+
+ kwargs.setdefault("default", _default)
+ return _json.dumps(obj, **kwargs)
+
+
+def dump(obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
+ """Serialize data as JSON and write to a file.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.dump() `
+ method, otherwise it will use :func:`json.dump`.
+
+ :param obj: The data to serialize.
+ :param fp: A file opened for writing text. Should use the UTF-8
+ encoding to be valid JSON.
+ :param kwargs: Arguments passed to the ``dump`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.dump``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0
+ Writing to a binary file, and the ``encoding`` argument, will be
+ removed in Flask 2.1.
+ """
+ if current_app:
+ current_app.json.dump(obj, fp, **kwargs)
+ else:
+ kwargs.setdefault("default", _default)
+ _json.dump(obj, fp, **kwargs)
+
+
+def loads(s: str | bytes, **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.loads() `
+ method, otherwise it will use :func:`json.loads`.
+
+ :param s: Text or UTF-8 bytes.
+ :param kwargs: Arguments passed to the ``loads`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.loads``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0
+ ``encoding`` will be removed in Flask 2.1. The data must be a
+ string or UTF-8 bytes.
+
+ .. versionchanged:: 1.0.3
+ ``app`` can be passed directly, rather than requiring an app
+ context for configuration.
+ """
+ if current_app:
+ return current_app.json.loads(s, **kwargs)
+
+ return _json.loads(s, **kwargs)
+
+
+def load(fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON read from a file.
+
+ If :data:`~flask.current_app` is available, it will use its
+ :meth:`app.json.load() `
+ method, otherwise it will use :func:`json.load`.
+
+ :param fp: A file opened for reading text or UTF-8 bytes.
+ :param kwargs: Arguments passed to the ``load`` implementation.
+
+ .. versionchanged:: 2.3
+ The ``app`` parameter was removed.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.load``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.2
+ The ``app`` parameter will be removed in Flask 2.3.
+
+ .. versionchanged:: 2.0
+ ``encoding`` will be removed in Flask 2.1. The file must be text
+ mode, or binary mode with UTF-8 bytes.
+ """
+ if current_app:
+ return current_app.json.load(fp, **kwargs)
+
+ return _json.load(fp, **kwargs)
+
+
+def jsonify(*args: t.Any, **kwargs: t.Any) -> Response:
+ """Serialize the given arguments as JSON, and return a
+ :class:`~flask.Response` object with the ``application/json``
+ mimetype. A dict or list returned from a view will be converted to a
+ JSON response automatically without needing to call this.
+
+ This requires an active request or application context, and calls
+ :meth:`app.json.response() `.
+
+ In debug mode, the output is formatted with indentation to make it
+ easier to read. This may also be controlled by the provider.
+
+ Either positional or keyword arguments can be given, not both.
+ If no arguments are given, ``None`` is serialized.
+
+ :param args: A single value to serialize, or multiple values to
+ treat as a list to serialize.
+ :param kwargs: Treat as a dict to serialize.
+
+ .. versionchanged:: 2.2
+ Calls ``current_app.json.response``, allowing an app to override
+ the behavior.
+
+ .. versionchanged:: 2.0.2
+ :class:`decimal.Decimal` is supported by converting to a string.
+
+ .. versionchanged:: 0.11
+ Added support for serializing top-level arrays. This was a
+ security risk in ancient browsers. See :ref:`security-json`.
+
+ .. versionadded:: 0.2
+ """
+ return current_app.json.response(*args, **kwargs) # type: ignore[return-value]
diff --git a/tapdown/lib/python3.11/site-packages/flask/json/provider.py b/tapdown/lib/python3.11/site-packages/flask/json/provider.py
new file mode 100644
index 0000000..ea7e475
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/json/provider.py
@@ -0,0 +1,215 @@
+from __future__ import annotations
+
+import dataclasses
+import decimal
+import json
+import typing as t
+import uuid
+import weakref
+from datetime import date
+
+from werkzeug.http import http_date
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from werkzeug.sansio.response import Response
+
+ from ..sansio.app import App
+
+
+class JSONProvider:
+ """A standard set of JSON operations for an application. Subclasses
+ of this can be used to customize JSON behavior or use different
+ JSON libraries.
+
+ To implement a provider for a specific library, subclass this base
+ class and implement at least :meth:`dumps` and :meth:`loads`. All
+ other methods have default implementations.
+
+ To use a different provider, either subclass ``Flask`` and set
+ :attr:`~flask.Flask.json_provider_class` to a provider class, or set
+ :attr:`app.json ` to an instance of the class.
+
+ :param app: An application instance. This will be stored as a
+ :class:`weakref.proxy` on the :attr:`_app` attribute.
+
+ .. versionadded:: 2.2
+ """
+
+ def __init__(self, app: App) -> None:
+ self._app: App = weakref.proxy(app)
+
+ def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize data as JSON.
+
+ :param obj: The data to serialize.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
+ raise NotImplementedError
+
+ def dump(self, obj: t.Any, fp: t.IO[str], **kwargs: t.Any) -> None:
+ """Serialize data as JSON and write to a file.
+
+ :param obj: The data to serialize.
+ :param fp: A file opened for writing text. Should use the UTF-8
+ encoding to be valid JSON.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
+ fp.write(self.dumps(obj, **kwargs))
+
+ def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON.
+
+ :param s: Text or UTF-8 bytes.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
+ raise NotImplementedError
+
+ def load(self, fp: t.IO[t.AnyStr], **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON read from a file.
+
+ :param fp: A file opened for reading text or UTF-8 bytes.
+ :param kwargs: May be passed to the underlying JSON library.
+ """
+ return self.loads(fp.read(), **kwargs)
+
+ def _prepare_response_obj(
+ self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
+ ) -> t.Any:
+ if args and kwargs:
+ raise TypeError("app.json.response() takes either args or kwargs, not both")
+
+ if not args and not kwargs:
+ return None
+
+ if len(args) == 1:
+ return args[0]
+
+ return args or kwargs
+
+ def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
+ """Serialize the given arguments as JSON, and return a
+ :class:`~flask.Response` object with the ``application/json``
+ mimetype.
+
+ The :func:`~flask.json.jsonify` function calls this method for
+ the current application.
+
+ Either positional or keyword arguments can be given, not both.
+ If no arguments are given, ``None`` is serialized.
+
+ :param args: A single value to serialize, or multiple values to
+ treat as a list to serialize.
+ :param kwargs: Treat as a dict to serialize.
+ """
+ obj = self._prepare_response_obj(args, kwargs)
+ return self._app.response_class(self.dumps(obj), mimetype="application/json")
+
+
+def _default(o: t.Any) -> t.Any:
+ if isinstance(o, date):
+ return http_date(o)
+
+ if isinstance(o, (decimal.Decimal, uuid.UUID)):
+ return str(o)
+
+ if dataclasses and dataclasses.is_dataclass(o):
+ return dataclasses.asdict(o) # type: ignore[arg-type]
+
+ if hasattr(o, "__html__"):
+ return str(o.__html__())
+
+ raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable")
+
+
+class DefaultJSONProvider(JSONProvider):
+ """Provide JSON operations using Python's built-in :mod:`json`
+ library. Serializes the following additional data types:
+
+ - :class:`datetime.datetime` and :class:`datetime.date` are
+ serialized to :rfc:`822` strings. This is the same as the HTTP
+ date format.
+ - :class:`uuid.UUID` is serialized to a string.
+ - :class:`dataclasses.dataclass` is passed to
+ :func:`dataclasses.asdict`.
+ - :class:`~markupsafe.Markup` (or any object with a ``__html__``
+ method) will call the ``__html__`` method to get a string.
+ """
+
+ default: t.Callable[[t.Any], t.Any] = staticmethod(_default) # type: ignore[assignment]
+ """Apply this function to any object that :meth:`json.dumps` does
+ not know how to serialize. It should return a valid JSON type or
+ raise a ``TypeError``.
+ """
+
+ ensure_ascii = True
+ """Replace non-ASCII characters with escape sequences. This may be
+ more compatible with some clients, but can be disabled for better
+ performance and size.
+ """
+
+ sort_keys = True
+ """Sort the keys in any serialized dicts. This may be useful for
+ some caching situations, but can be disabled for better performance.
+ When enabled, keys must all be strings, they are not converted
+ before sorting.
+ """
+
+ compact: bool | None = None
+ """If ``True``, or ``None`` out of debug mode, the :meth:`response`
+ output will not add indentation, newlines, or spaces. If ``False``,
+ or ``None`` in debug mode, it will use a non-compact representation.
+ """
+
+ mimetype = "application/json"
+ """The mimetype set in :meth:`response`."""
+
+ def dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize data as JSON to a string.
+
+ Keyword arguments are passed to :func:`json.dumps`. Sets some
+ parameter defaults from the :attr:`default`,
+ :attr:`ensure_ascii`, and :attr:`sort_keys` attributes.
+
+ :param obj: The data to serialize.
+ :param kwargs: Passed to :func:`json.dumps`.
+ """
+ kwargs.setdefault("default", self.default)
+ kwargs.setdefault("ensure_ascii", self.ensure_ascii)
+ kwargs.setdefault("sort_keys", self.sort_keys)
+ return json.dumps(obj, **kwargs)
+
+ def loads(self, s: str | bytes, **kwargs: t.Any) -> t.Any:
+ """Deserialize data as JSON from a string or bytes.
+
+ :param s: Text or UTF-8 bytes.
+ :param kwargs: Passed to :func:`json.loads`.
+ """
+ return json.loads(s, **kwargs)
+
+ def response(self, *args: t.Any, **kwargs: t.Any) -> Response:
+ """Serialize the given arguments as JSON, and return a
+ :class:`~flask.Response` object with it. The response mimetype
+ will be "application/json" and can be changed with
+ :attr:`mimetype`.
+
+ If :attr:`compact` is ``False`` or debug mode is enabled, the
+ output will be formatted to be easier to read.
+
+ Either positional or keyword arguments can be given, not both.
+ If no arguments are given, ``None`` is serialized.
+
+ :param args: A single value to serialize, or multiple values to
+ treat as a list to serialize.
+ :param kwargs: Treat as a dict to serialize.
+ """
+ obj = self._prepare_response_obj(args, kwargs)
+ dump_args: dict[str, t.Any] = {}
+
+ if (self.compact is None and self._app.debug) or self.compact is False:
+ dump_args.setdefault("indent", 2)
+ else:
+ dump_args.setdefault("separators", (",", ":"))
+
+ return self._app.response_class(
+ f"{self.dumps(obj, **dump_args)}\n", mimetype=self.mimetype
+ )
diff --git a/tapdown/lib/python3.11/site-packages/flask/json/tag.py b/tapdown/lib/python3.11/site-packages/flask/json/tag.py
new file mode 100644
index 0000000..8dc3629
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/json/tag.py
@@ -0,0 +1,327 @@
+"""
+Tagged JSON
+~~~~~~~~~~~
+
+A compact representation for lossless serialization of non-standard JSON
+types. :class:`~flask.sessions.SecureCookieSessionInterface` uses this
+to serialize the session data, but it may be useful in other places. It
+can be extended to support other types.
+
+.. autoclass:: TaggedJSONSerializer
+ :members:
+
+.. autoclass:: JSONTag
+ :members:
+
+Let's see an example that adds support for
+:class:`~collections.OrderedDict`. Dicts don't have an order in JSON, so
+to handle this we will dump the items as a list of ``[key, value]``
+pairs. Subclass :class:`JSONTag` and give it the new key ``' od'`` to
+identify the type. The session serializer processes dicts first, so
+insert the new tag at the front of the order since ``OrderedDict`` must
+be processed before ``dict``.
+
+.. code-block:: python
+
+ from flask.json.tag import JSONTag
+
+ class TagOrderedDict(JSONTag):
+ __slots__ = ('serializer',)
+ key = ' od'
+
+ def check(self, value):
+ return isinstance(value, OrderedDict)
+
+ def to_json(self, value):
+ return [[k, self.serializer.tag(v)] for k, v in iteritems(value)]
+
+ def to_python(self, value):
+ return OrderedDict(value)
+
+ app.session_interface.serializer.register(TagOrderedDict, index=0)
+"""
+
+from __future__ import annotations
+
+import typing as t
+from base64 import b64decode
+from base64 import b64encode
+from datetime import datetime
+from uuid import UUID
+
+from markupsafe import Markup
+from werkzeug.http import http_date
+from werkzeug.http import parse_date
+
+from ..json import dumps
+from ..json import loads
+
+
+class JSONTag:
+ """Base class for defining type tags for :class:`TaggedJSONSerializer`."""
+
+ __slots__ = ("serializer",)
+
+ #: The tag to mark the serialized object with. If empty, this tag is
+ #: only used as an intermediate step during tagging.
+ key: str = ""
+
+ def __init__(self, serializer: TaggedJSONSerializer) -> None:
+ """Create a tagger for the given serializer."""
+ self.serializer = serializer
+
+ def check(self, value: t.Any) -> bool:
+ """Check if the given value should be tagged by this tag."""
+ raise NotImplementedError
+
+ def to_json(self, value: t.Any) -> t.Any:
+ """Convert the Python object to an object that is a valid JSON type.
+ The tag will be added later."""
+ raise NotImplementedError
+
+ def to_python(self, value: t.Any) -> t.Any:
+ """Convert the JSON representation back to the correct type. The tag
+ will already be removed."""
+ raise NotImplementedError
+
+ def tag(self, value: t.Any) -> dict[str, t.Any]:
+ """Convert the value to a valid JSON type and add the tag structure
+ around it."""
+ return {self.key: self.to_json(value)}
+
+
+class TagDict(JSONTag):
+ """Tag for 1-item dicts whose only key matches a registered tag.
+
+ Internally, the dict key is suffixed with `__`, and the suffix is removed
+ when deserializing.
+ """
+
+ __slots__ = ()
+ key = " di"
+
+ def check(self, value: t.Any) -> bool:
+ return (
+ isinstance(value, dict)
+ and len(value) == 1
+ and next(iter(value)) in self.serializer.tags
+ )
+
+ def to_json(self, value: t.Any) -> t.Any:
+ key = next(iter(value))
+ return {f"{key}__": self.serializer.tag(value[key])}
+
+ def to_python(self, value: t.Any) -> t.Any:
+ key = next(iter(value))
+ return {key[:-2]: value[key]}
+
+
+class PassDict(JSONTag):
+ __slots__ = ()
+
+ def check(self, value: t.Any) -> bool:
+ return isinstance(value, dict)
+
+ def to_json(self, value: t.Any) -> t.Any:
+ # JSON objects may only have string keys, so don't bother tagging the
+ # key here.
+ return {k: self.serializer.tag(v) for k, v in value.items()}
+
+ tag = to_json
+
+
+class TagTuple(JSONTag):
+ __slots__ = ()
+ key = " t"
+
+ def check(self, value: t.Any) -> bool:
+ return isinstance(value, tuple)
+
+ def to_json(self, value: t.Any) -> t.Any:
+ return [self.serializer.tag(item) for item in value]
+
+ def to_python(self, value: t.Any) -> t.Any:
+ return tuple(value)
+
+
+class PassList(JSONTag):
+ __slots__ = ()
+
+ def check(self, value: t.Any) -> bool:
+ return isinstance(value, list)
+
+ def to_json(self, value: t.Any) -> t.Any:
+ return [self.serializer.tag(item) for item in value]
+
+ tag = to_json
+
+
+class TagBytes(JSONTag):
+ __slots__ = ()
+ key = " b"
+
+ def check(self, value: t.Any) -> bool:
+ return isinstance(value, bytes)
+
+ def to_json(self, value: t.Any) -> t.Any:
+ return b64encode(value).decode("ascii")
+
+ def to_python(self, value: t.Any) -> t.Any:
+ return b64decode(value)
+
+
+class TagMarkup(JSONTag):
+ """Serialize anything matching the :class:`~markupsafe.Markup` API by
+ having a ``__html__`` method to the result of that method. Always
+ deserializes to an instance of :class:`~markupsafe.Markup`."""
+
+ __slots__ = ()
+ key = " m"
+
+ def check(self, value: t.Any) -> bool:
+ return callable(getattr(value, "__html__", None))
+
+ def to_json(self, value: t.Any) -> t.Any:
+ return str(value.__html__())
+
+ def to_python(self, value: t.Any) -> t.Any:
+ return Markup(value)
+
+
+class TagUUID(JSONTag):
+ __slots__ = ()
+ key = " u"
+
+ def check(self, value: t.Any) -> bool:
+ return isinstance(value, UUID)
+
+ def to_json(self, value: t.Any) -> t.Any:
+ return value.hex
+
+ def to_python(self, value: t.Any) -> t.Any:
+ return UUID(value)
+
+
+class TagDateTime(JSONTag):
+ __slots__ = ()
+ key = " d"
+
+ def check(self, value: t.Any) -> bool:
+ return isinstance(value, datetime)
+
+ def to_json(self, value: t.Any) -> t.Any:
+ return http_date(value)
+
+ def to_python(self, value: t.Any) -> t.Any:
+ return parse_date(value)
+
+
+class TaggedJSONSerializer:
+ """Serializer that uses a tag system to compactly represent objects that
+ are not JSON types. Passed as the intermediate serializer to
+ :class:`itsdangerous.Serializer`.
+
+ The following extra types are supported:
+
+ * :class:`dict`
+ * :class:`tuple`
+ * :class:`bytes`
+ * :class:`~markupsafe.Markup`
+ * :class:`~uuid.UUID`
+ * :class:`~datetime.datetime`
+ """
+
+ __slots__ = ("tags", "order")
+
+ #: Tag classes to bind when creating the serializer. Other tags can be
+ #: added later using :meth:`~register`.
+ default_tags = [
+ TagDict,
+ PassDict,
+ TagTuple,
+ PassList,
+ TagBytes,
+ TagMarkup,
+ TagUUID,
+ TagDateTime,
+ ]
+
+ def __init__(self) -> None:
+ self.tags: dict[str, JSONTag] = {}
+ self.order: list[JSONTag] = []
+
+ for cls in self.default_tags:
+ self.register(cls)
+
+ def register(
+ self,
+ tag_class: type[JSONTag],
+ force: bool = False,
+ index: int | None = None,
+ ) -> None:
+ """Register a new tag with this serializer.
+
+ :param tag_class: tag class to register. Will be instantiated with this
+ serializer instance.
+ :param force: overwrite an existing tag. If false (default), a
+ :exc:`KeyError` is raised.
+ :param index: index to insert the new tag in the tag order. Useful when
+ the new tag is a special case of an existing tag. If ``None``
+ (default), the tag is appended to the end of the order.
+
+ :raise KeyError: if the tag key is already registered and ``force`` is
+ not true.
+ """
+ tag = tag_class(self)
+ key = tag.key
+
+ if key:
+ if not force and key in self.tags:
+ raise KeyError(f"Tag '{key}' is already registered.")
+
+ self.tags[key] = tag
+
+ if index is None:
+ self.order.append(tag)
+ else:
+ self.order.insert(index, tag)
+
+ def tag(self, value: t.Any) -> t.Any:
+ """Convert a value to a tagged representation if necessary."""
+ for tag in self.order:
+ if tag.check(value):
+ return tag.tag(value)
+
+ return value
+
+ def untag(self, value: dict[str, t.Any]) -> t.Any:
+ """Convert a tagged representation back to the original type."""
+ if len(value) != 1:
+ return value
+
+ key = next(iter(value))
+
+ if key not in self.tags:
+ return value
+
+ return self.tags[key].to_python(value[key])
+
+ def _untag_scan(self, value: t.Any) -> t.Any:
+ if isinstance(value, dict):
+ # untag each item recursively
+ value = {k: self._untag_scan(v) for k, v in value.items()}
+ # untag the dict itself
+ value = self.untag(value)
+ elif isinstance(value, list):
+ # untag each item recursively
+ value = [self._untag_scan(item) for item in value]
+
+ return value
+
+ def dumps(self, value: t.Any) -> str:
+ """Tag the value and dump it to a compact JSON string."""
+ return dumps(self.tag(value), separators=(",", ":"))
+
+ def loads(self, value: str) -> t.Any:
+ """Load data from a JSON string and deserialized any tagged objects."""
+ return self._untag_scan(loads(value))
diff --git a/tapdown/lib/python3.11/site-packages/flask/logging.py b/tapdown/lib/python3.11/site-packages/flask/logging.py
new file mode 100644
index 0000000..0cb8f43
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/logging.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+import logging
+import sys
+import typing as t
+
+from werkzeug.local import LocalProxy
+
+from .globals import request
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from .sansio.app import App
+
+
+@LocalProxy
+def wsgi_errors_stream() -> t.TextIO:
+ """Find the most appropriate error stream for the application. If a request
+ is active, log to ``wsgi.errors``, otherwise use ``sys.stderr``.
+
+ If you configure your own :class:`logging.StreamHandler`, you may want to
+ use this for the stream. If you are using file or dict configuration and
+ can't import this directly, you can refer to it as
+ ``ext://flask.logging.wsgi_errors_stream``.
+ """
+ if request:
+ return request.environ["wsgi.errors"] # type: ignore[no-any-return]
+
+ return sys.stderr
+
+
+def has_level_handler(logger: logging.Logger) -> bool:
+ """Check if there is a handler in the logging chain that will handle the
+ given logger's :meth:`effective level <~logging.Logger.getEffectiveLevel>`.
+ """
+ level = logger.getEffectiveLevel()
+ current = logger
+
+ while current:
+ if any(handler.level <= level for handler in current.handlers):
+ return True
+
+ if not current.propagate:
+ break
+
+ current = current.parent # type: ignore
+
+ return False
+
+
+#: Log messages to :func:`~flask.logging.wsgi_errors_stream` with the format
+#: ``[%(asctime)s] %(levelname)s in %(module)s: %(message)s``.
+default_handler = logging.StreamHandler(wsgi_errors_stream) # type: ignore
+default_handler.setFormatter(
+ logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
+)
+
+
+def create_logger(app: App) -> logging.Logger:
+ """Get the Flask app's logger and configure it if needed.
+
+ The logger name will be the same as
+ :attr:`app.import_name `.
+
+ When :attr:`~flask.Flask.debug` is enabled, set the logger level to
+ :data:`logging.DEBUG` if it is not set.
+
+ If there is no handler for the logger's effective level, add a
+ :class:`~logging.StreamHandler` for
+ :func:`~flask.logging.wsgi_errors_stream` with a basic format.
+ """
+ logger = logging.getLogger(app.name)
+
+ if app.debug and not logger.level:
+ logger.setLevel(logging.DEBUG)
+
+ if not has_level_handler(logger):
+ logger.addHandler(default_handler)
+
+ return logger
diff --git a/tapdown/lib/python3.11/site-packages/flask/py.typed b/tapdown/lib/python3.11/site-packages/flask/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/flask/sansio/README.md b/tapdown/lib/python3.11/site-packages/flask/sansio/README.md
new file mode 100644
index 0000000..623ac19
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/sansio/README.md
@@ -0,0 +1,6 @@
+# Sansio
+
+This folder contains code that can be used by alternative Flask
+implementations, for example Quart. The code therefore cannot do any
+IO, nor be part of a likely IO path. Finally this code cannot use the
+Flask globals.
diff --git a/tapdown/lib/python3.11/site-packages/flask/sansio/app.py b/tapdown/lib/python3.11/site-packages/flask/sansio/app.py
new file mode 100644
index 0000000..a2592fe
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/sansio/app.py
@@ -0,0 +1,964 @@
+from __future__ import annotations
+
+import logging
+import os
+import sys
+import typing as t
+from datetime import timedelta
+from itertools import chain
+
+from werkzeug.exceptions import Aborter
+from werkzeug.exceptions import BadRequest
+from werkzeug.exceptions import BadRequestKeyError
+from werkzeug.routing import BuildError
+from werkzeug.routing import Map
+from werkzeug.routing import Rule
+from werkzeug.sansio.response import Response
+from werkzeug.utils import cached_property
+from werkzeug.utils import redirect as _wz_redirect
+
+from .. import typing as ft
+from ..config import Config
+from ..config import ConfigAttribute
+from ..ctx import _AppCtxGlobals
+from ..helpers import _split_blueprint_path
+from ..helpers import get_debug_flag
+from ..json.provider import DefaultJSONProvider
+from ..json.provider import JSONProvider
+from ..logging import create_logger
+from ..templating import DispatchingJinjaLoader
+from ..templating import Environment
+from .scaffold import _endpoint_from_view_func
+from .scaffold import find_package
+from .scaffold import Scaffold
+from .scaffold import setupmethod
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from werkzeug.wrappers import Response as BaseResponse
+
+ from ..testing import FlaskClient
+ from ..testing import FlaskCliRunner
+ from .blueprints import Blueprint
+
+T_shell_context_processor = t.TypeVar(
+ "T_shell_context_processor", bound=ft.ShellContextProcessorCallable
+)
+T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
+T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
+T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
+T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
+
+
+def _make_timedelta(value: timedelta | int | None) -> timedelta | None:
+ if value is None or isinstance(value, timedelta):
+ return value
+
+ return timedelta(seconds=value)
+
+
+class App(Scaffold):
+ """The flask object implements a WSGI application and acts as the central
+ object. It is passed the name of the module or package of the
+ application. Once it is created it will act as a central registry for
+ the view functions, the URL rules, template configuration and much more.
+
+ The name of the package is used to resolve resources from inside the
+ package or the folder the module is contained in depending on if the
+ package parameter resolves to an actual python package (a folder with
+ an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file).
+
+ For more information about resource loading, see :func:`open_resource`.
+
+ Usually you create a :class:`Flask` instance in your main module or
+ in the :file:`__init__.py` file of your package like this::
+
+ from flask import Flask
+ app = Flask(__name__)
+
+ .. admonition:: About the First Parameter
+
+ The idea of the first parameter is to give Flask an idea of what
+ belongs to your application. This name is used to find resources
+ on the filesystem, can be used by extensions to improve debugging
+ information and a lot more.
+
+ So it's important what you provide there. If you are using a single
+ module, `__name__` is always the correct value. If you however are
+ using a package, it's usually recommended to hardcode the name of
+ your package there.
+
+ For example if your application is defined in :file:`yourapplication/app.py`
+ you should create it with one of the two versions below::
+
+ app = Flask('yourapplication')
+ app = Flask(__name__.split('.')[0])
+
+ Why is that? The application will work even with `__name__`, thanks
+ to how resources are looked up. However it will make debugging more
+ painful. Certain extensions can make assumptions based on the
+ import name of your application. For example the Flask-SQLAlchemy
+ extension will look for the code in your application that triggered
+ an SQL query in debug mode. If the import name is not properly set
+ up, that debugging information is lost. (For example it would only
+ pick up SQL queries in `yourapplication.app` and not
+ `yourapplication.views.frontend`)
+
+ .. versionadded:: 0.7
+ The `static_url_path`, `static_folder`, and `template_folder`
+ parameters were added.
+
+ .. versionadded:: 0.8
+ The `instance_path` and `instance_relative_config` parameters were
+ added.
+
+ .. versionadded:: 0.11
+ The `root_path` parameter was added.
+
+ .. versionadded:: 1.0
+ The ``host_matching`` and ``static_host`` parameters were added.
+
+ .. versionadded:: 1.0
+ The ``subdomain_matching`` parameter was added. Subdomain
+ matching needs to be enabled manually now. Setting
+ :data:`SERVER_NAME` does not implicitly enable it.
+
+ :param import_name: the name of the application package
+ :param static_url_path: can be used to specify a different path for the
+ static files on the web. Defaults to the name
+ of the `static_folder` folder.
+ :param static_folder: The folder with static files that is served at
+ ``static_url_path``. Relative to the application ``root_path``
+ or an absolute path. Defaults to ``'static'``.
+ :param static_host: the host to use when adding the static route.
+ Defaults to None. Required when using ``host_matching=True``
+ with a ``static_folder`` configured.
+ :param host_matching: set ``url_map.host_matching`` attribute.
+ Defaults to False.
+ :param subdomain_matching: consider the subdomain relative to
+ :data:`SERVER_NAME` when matching routes. Defaults to False.
+ :param template_folder: the folder that contains the templates that should
+ be used by the application. Defaults to
+ ``'templates'`` folder in the root path of the
+ application.
+ :param instance_path: An alternative instance path for the application.
+ By default the folder ``'instance'`` next to the
+ package or module is assumed to be the instance
+ path.
+ :param instance_relative_config: if set to ``True`` relative filenames
+ for loading the config are assumed to
+ be relative to the instance path instead
+ of the application root.
+ :param root_path: The path to the root of the application files.
+ This should only be set manually when it can't be detected
+ automatically, such as for namespace packages.
+ """
+
+ #: The class of the object assigned to :attr:`aborter`, created by
+ #: :meth:`create_aborter`. That object is called by
+ #: :func:`flask.abort` to raise HTTP errors, and can be
+ #: called directly as well.
+ #:
+ #: Defaults to :class:`werkzeug.exceptions.Aborter`.
+ #:
+ #: .. versionadded:: 2.2
+ aborter_class = Aborter
+
+ #: The class that is used for the Jinja environment.
+ #:
+ #: .. versionadded:: 0.11
+ jinja_environment = Environment
+
+ #: The class that is used for the :data:`~flask.g` instance.
+ #:
+ #: Example use cases for a custom class:
+ #:
+ #: 1. Store arbitrary attributes on flask.g.
+ #: 2. Add a property for lazy per-request database connectors.
+ #: 3. Return None instead of AttributeError on unexpected attributes.
+ #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g.
+ #:
+ #: In Flask 0.9 this property was called `request_globals_class` but it
+ #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the
+ #: flask.g object is now application context scoped.
+ #:
+ #: .. versionadded:: 0.10
+ app_ctx_globals_class = _AppCtxGlobals
+
+ #: The class that is used for the ``config`` attribute of this app.
+ #: Defaults to :class:`~flask.Config`.
+ #:
+ #: Example use cases for a custom class:
+ #:
+ #: 1. Default values for certain config options.
+ #: 2. Access to config values through attributes in addition to keys.
+ #:
+ #: .. versionadded:: 0.11
+ config_class = Config
+
+ #: The testing flag. Set this to ``True`` to enable the test mode of
+ #: Flask extensions (and in the future probably also Flask itself).
+ #: For example this might activate test helpers that have an
+ #: additional runtime cost which should not be enabled by default.
+ #:
+ #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the
+ #: default it's implicitly enabled.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``TESTING`` configuration key. Defaults to ``False``.
+ testing = ConfigAttribute[bool]("TESTING")
+
+ #: If a secret key is set, cryptographic components can use this to
+ #: sign cookies and other things. Set this to a complex random value
+ #: when you want to use the secure cookie for instance.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: :data:`SECRET_KEY` configuration key. Defaults to ``None``.
+ secret_key = ConfigAttribute[t.Union[str, bytes, None]]("SECRET_KEY")
+
+ #: A :class:`~datetime.timedelta` which is used to set the expiration
+ #: date of a permanent session. The default is 31 days which makes a
+ #: permanent session survive for roughly one month.
+ #:
+ #: This attribute can also be configured from the config with the
+ #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to
+ #: ``timedelta(days=31)``
+ permanent_session_lifetime = ConfigAttribute[timedelta](
+ "PERMANENT_SESSION_LIFETIME",
+ get_converter=_make_timedelta, # type: ignore[arg-type]
+ )
+
+ json_provider_class: type[JSONProvider] = DefaultJSONProvider
+ """A subclass of :class:`~flask.json.provider.JSONProvider`. An
+ instance is created and assigned to :attr:`app.json` when creating
+ the app.
+
+ The default, :class:`~flask.json.provider.DefaultJSONProvider`, uses
+ Python's built-in :mod:`json` library. A different provider can use
+ a different JSON library.
+
+ .. versionadded:: 2.2
+ """
+
+ #: Options that are passed to the Jinja environment in
+ #: :meth:`create_jinja_environment`. Changing these options after
+ #: the environment is created (accessing :attr:`jinja_env`) will
+ #: have no effect.
+ #:
+ #: .. versionchanged:: 1.1.0
+ #: This is a ``dict`` instead of an ``ImmutableDict`` to allow
+ #: easier configuration.
+ #:
+ jinja_options: dict[str, t.Any] = {}
+
+ #: The rule object to use for URL rules created. This is used by
+ #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`.
+ #:
+ #: .. versionadded:: 0.7
+ url_rule_class = Rule
+
+ #: The map object to use for storing the URL rules and routing
+ #: configuration parameters. Defaults to :class:`werkzeug.routing.Map`.
+ #:
+ #: .. versionadded:: 1.1.0
+ url_map_class = Map
+
+ #: The :meth:`test_client` method creates an instance of this test
+ #: client class. Defaults to :class:`~flask.testing.FlaskClient`.
+ #:
+ #: .. versionadded:: 0.7
+ test_client_class: type[FlaskClient] | None = None
+
+ #: The :class:`~click.testing.CliRunner` subclass, by default
+ #: :class:`~flask.testing.FlaskCliRunner` that is used by
+ #: :meth:`test_cli_runner`. Its ``__init__`` method should take a
+ #: Flask app object as the first argument.
+ #:
+ #: .. versionadded:: 1.0
+ test_cli_runner_class: type[FlaskCliRunner] | None = None
+
+ default_config: dict[str, t.Any]
+ response_class: type[Response]
+
+ def __init__(
+ self,
+ import_name: str,
+ static_url_path: str | None = None,
+ static_folder: str | os.PathLike[str] | None = "static",
+ static_host: str | None = None,
+ host_matching: bool = False,
+ subdomain_matching: bool = False,
+ template_folder: str | os.PathLike[str] | None = "templates",
+ instance_path: str | None = None,
+ instance_relative_config: bool = False,
+ root_path: str | None = None,
+ ) -> None:
+ super().__init__(
+ import_name=import_name,
+ static_folder=static_folder,
+ static_url_path=static_url_path,
+ template_folder=template_folder,
+ root_path=root_path,
+ )
+
+ if instance_path is None:
+ instance_path = self.auto_find_instance_path()
+ elif not os.path.isabs(instance_path):
+ raise ValueError(
+ "If an instance path is provided it must be absolute."
+ " A relative path was given instead."
+ )
+
+ #: Holds the path to the instance folder.
+ #:
+ #: .. versionadded:: 0.8
+ self.instance_path = instance_path
+
+ #: The configuration dictionary as :class:`Config`. This behaves
+ #: exactly like a regular dictionary but supports additional methods
+ #: to load a config from files.
+ self.config = self.make_config(instance_relative_config)
+
+ #: An instance of :attr:`aborter_class` created by
+ #: :meth:`make_aborter`. This is called by :func:`flask.abort`
+ #: to raise HTTP errors, and can be called directly as well.
+ #:
+ #: .. versionadded:: 2.2
+ #: Moved from ``flask.abort``, which calls this object.
+ self.aborter = self.make_aborter()
+
+ self.json: JSONProvider = self.json_provider_class(self)
+ """Provides access to JSON methods. Functions in ``flask.json``
+ will call methods on this provider when the application context
+ is active. Used for handling JSON requests and responses.
+
+ An instance of :attr:`json_provider_class`. Can be customized by
+ changing that attribute on a subclass, or by assigning to this
+ attribute afterwards.
+
+ The default, :class:`~flask.json.provider.DefaultJSONProvider`,
+ uses Python's built-in :mod:`json` library. A different provider
+ can use a different JSON library.
+
+ .. versionadded:: 2.2
+ """
+
+ #: A list of functions that are called by
+ #: :meth:`handle_url_build_error` when :meth:`.url_for` raises a
+ #: :exc:`~werkzeug.routing.BuildError`. Each function is called
+ #: with ``error``, ``endpoint`` and ``values``. If a function
+ #: returns ``None`` or raises a ``BuildError``, it is skipped.
+ #: Otherwise, its return value is returned by ``url_for``.
+ #:
+ #: .. versionadded:: 0.9
+ self.url_build_error_handlers: list[
+ t.Callable[[Exception, str, dict[str, t.Any]], str]
+ ] = []
+
+ #: A list of functions that are called when the application context
+ #: is destroyed. Since the application context is also torn down
+ #: if the request ends this is the place to store code that disconnects
+ #: from databases.
+ #:
+ #: .. versionadded:: 0.9
+ self.teardown_appcontext_funcs: list[ft.TeardownCallable] = []
+
+ #: A list of shell context processor functions that should be run
+ #: when a shell context is created.
+ #:
+ #: .. versionadded:: 0.11
+ self.shell_context_processors: list[ft.ShellContextProcessorCallable] = []
+
+ #: Maps registered blueprint names to blueprint objects. The
+ #: dict retains the order the blueprints were registered in.
+ #: Blueprints can be registered multiple times, this dict does
+ #: not track how often they were attached.
+ #:
+ #: .. versionadded:: 0.7
+ self.blueprints: dict[str, Blueprint] = {}
+
+ #: a place where extensions can store application specific state. For
+ #: example this is where an extension could store database engines and
+ #: similar things.
+ #:
+ #: The key must match the name of the extension module. For example in
+ #: case of a "Flask-Foo" extension in `flask_foo`, the key would be
+ #: ``'foo'``.
+ #:
+ #: .. versionadded:: 0.7
+ self.extensions: dict[str, t.Any] = {}
+
+ #: The :class:`~werkzeug.routing.Map` for this instance. You can use
+ #: this to change the routing converters after the class was created
+ #: but before any routes are connected. Example::
+ #:
+ #: from werkzeug.routing import BaseConverter
+ #:
+ #: class ListConverter(BaseConverter):
+ #: def to_python(self, value):
+ #: return value.split(',')
+ #: def to_url(self, values):
+ #: return ','.join(super(ListConverter, self).to_url(value)
+ #: for value in values)
+ #:
+ #: app = Flask(__name__)
+ #: app.url_map.converters['list'] = ListConverter
+ self.url_map = self.url_map_class(host_matching=host_matching)
+
+ self.subdomain_matching = subdomain_matching
+
+ # tracks internally if the application already handled at least one
+ # request.
+ self._got_first_request = False
+
+ def _check_setup_finished(self, f_name: str) -> None:
+ if self._got_first_request:
+ raise AssertionError(
+ f"The setup method '{f_name}' can no longer be called"
+ " on the application. It has already handled its first"
+ " request, any changes will not be applied"
+ " consistently.\n"
+ "Make sure all imports, decorators, functions, etc."
+ " needed to set up the application are done before"
+ " running it."
+ )
+
+ @cached_property
+ def name(self) -> str:
+ """The name of the application. This is usually the import name
+ with the difference that it's guessed from the run file if the
+ import name is main. This name is used as a display name when
+ Flask needs the name of the application. It can be set and overridden
+ to change the value.
+
+ .. versionadded:: 0.8
+ """
+ if self.import_name == "__main__":
+ fn: str | None = getattr(sys.modules["__main__"], "__file__", None)
+ if fn is None:
+ return "__main__"
+ return os.path.splitext(os.path.basename(fn))[0]
+ return self.import_name
+
+ @cached_property
+ def logger(self) -> logging.Logger:
+ """A standard Python :class:`~logging.Logger` for the app, with
+ the same name as :attr:`name`.
+
+ In debug mode, the logger's :attr:`~logging.Logger.level` will
+ be set to :data:`~logging.DEBUG`.
+
+ If there are no handlers configured, a default handler will be
+ added. See :doc:`/logging` for more information.
+
+ .. versionchanged:: 1.1.0
+ The logger takes the same name as :attr:`name` rather than
+ hard-coding ``"flask.app"``.
+
+ .. versionchanged:: 1.0.0
+ Behavior was simplified. The logger is always named
+ ``"flask.app"``. The level is only set during configuration,
+ it doesn't check ``app.debug`` each time. Only one format is
+ used, not different ones depending on ``app.debug``. No
+ handlers are removed, and a handler is only added if no
+ handlers are already configured.
+
+ .. versionadded:: 0.3
+ """
+ return create_logger(self)
+
+ @cached_property
+ def jinja_env(self) -> Environment:
+ """The Jinja environment used to load templates.
+
+ The environment is created the first time this property is
+ accessed. Changing :attr:`jinja_options` after that will have no
+ effect.
+ """
+ return self.create_jinja_environment()
+
+ def create_jinja_environment(self) -> Environment:
+ raise NotImplementedError()
+
+ def make_config(self, instance_relative: bool = False) -> Config:
+ """Used to create the config attribute by the Flask constructor.
+ The `instance_relative` parameter is passed in from the constructor
+ of Flask (there named `instance_relative_config`) and indicates if
+ the config should be relative to the instance path or the root path
+ of the application.
+
+ .. versionadded:: 0.8
+ """
+ root_path = self.root_path
+ if instance_relative:
+ root_path = self.instance_path
+ defaults = dict(self.default_config)
+ defaults["DEBUG"] = get_debug_flag()
+ return self.config_class(root_path, defaults)
+
+ def make_aborter(self) -> Aborter:
+ """Create the object to assign to :attr:`aborter`. That object
+ is called by :func:`flask.abort` to raise HTTP errors, and can
+ be called directly as well.
+
+ By default, this creates an instance of :attr:`aborter_class`,
+ which defaults to :class:`werkzeug.exceptions.Aborter`.
+
+ .. versionadded:: 2.2
+ """
+ return self.aborter_class()
+
+ def auto_find_instance_path(self) -> str:
+ """Tries to locate the instance path if it was not provided to the
+ constructor of the application class. It will basically calculate
+ the path to a folder named ``instance`` next to your main file or
+ the package.
+
+ .. versionadded:: 0.8
+ """
+ prefix, package_path = find_package(self.import_name)
+ if prefix is None:
+ return os.path.join(package_path, "instance")
+ return os.path.join(prefix, "var", f"{self.name}-instance")
+
+ def create_global_jinja_loader(self) -> DispatchingJinjaLoader:
+ """Creates the loader for the Jinja environment. Can be used to
+ override just the loader and keeping the rest unchanged. It's
+ discouraged to override this function. Instead one should override
+ the :meth:`jinja_loader` function instead.
+
+ The global loader dispatches between the loaders of the application
+ and the individual blueprints.
+
+ .. versionadded:: 0.7
+ """
+ return DispatchingJinjaLoader(self)
+
+ def select_jinja_autoescape(self, filename: str) -> bool:
+ """Returns ``True`` if autoescaping should be active for the given
+ template name. If no template name is given, returns `True`.
+
+ .. versionchanged:: 2.2
+ Autoescaping is now enabled by default for ``.svg`` files.
+
+ .. versionadded:: 0.5
+ """
+ if filename is None:
+ return True
+ return filename.endswith((".html", ".htm", ".xml", ".xhtml", ".svg"))
+
+ @property
+ def debug(self) -> bool:
+ """Whether debug mode is enabled. When using ``flask run`` to start the
+ development server, an interactive debugger will be shown for unhandled
+ exceptions, and the server will be reloaded when code changes. This maps to the
+ :data:`DEBUG` config key. It may not behave as expected if set late.
+
+ **Do not enable debug mode when deploying in production.**
+
+ Default: ``False``
+ """
+ return self.config["DEBUG"] # type: ignore[no-any-return]
+
+ @debug.setter
+ def debug(self, value: bool) -> None:
+ self.config["DEBUG"] = value
+
+ if self.config["TEMPLATES_AUTO_RELOAD"] is None:
+ self.jinja_env.auto_reload = value
+
+ @setupmethod
+ def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
+ """Register a :class:`~flask.Blueprint` on the application. Keyword
+ arguments passed to this method will override the defaults set on the
+ blueprint.
+
+ Calls the blueprint's :meth:`~flask.Blueprint.register` method after
+ recording the blueprint in the application's :attr:`blueprints`.
+
+ :param blueprint: The blueprint to register.
+ :param url_prefix: Blueprint routes will be prefixed with this.
+ :param subdomain: Blueprint routes will match on this subdomain.
+ :param url_defaults: Blueprint routes will use these default values for
+ view arguments.
+ :param options: Additional keyword arguments are passed to
+ :class:`~flask.blueprints.BlueprintSetupState`. They can be
+ accessed in :meth:`~flask.Blueprint.record` callbacks.
+
+ .. versionchanged:: 2.0.1
+ The ``name`` option can be used to change the (pre-dotted)
+ name the blueprint is registered with. This allows the same
+ blueprint to be registered multiple times with unique names
+ for ``url_for``.
+
+ .. versionadded:: 0.7
+ """
+ blueprint.register(self, options)
+
+ def iter_blueprints(self) -> t.ValuesView[Blueprint]:
+ """Iterates over all blueprints by the order they were registered.
+
+ .. versionadded:: 0.11
+ """
+ return self.blueprints.values()
+
+ @setupmethod
+ def add_url_rule(
+ self,
+ rule: str,
+ endpoint: str | None = None,
+ view_func: ft.RouteCallable | None = None,
+ provide_automatic_options: bool | None = None,
+ **options: t.Any,
+ ) -> None:
+ if endpoint is None:
+ endpoint = _endpoint_from_view_func(view_func) # type: ignore
+ options["endpoint"] = endpoint
+ methods = options.pop("methods", None)
+
+ # if the methods are not given and the view_func object knows its
+ # methods we can use that instead. If neither exists, we go with
+ # a tuple of only ``GET`` as default.
+ if methods is None:
+ methods = getattr(view_func, "methods", None) or ("GET",)
+ if isinstance(methods, str):
+ raise TypeError(
+ "Allowed methods must be a list of strings, for"
+ ' example: @app.route(..., methods=["POST"])'
+ )
+ methods = {item.upper() for item in methods}
+
+ # Methods that should always be added
+ required_methods: set[str] = set(getattr(view_func, "required_methods", ()))
+
+ # starting with Flask 0.8 the view_func object can disable and
+ # force-enable the automatic options handling.
+ if provide_automatic_options is None:
+ provide_automatic_options = getattr(
+ view_func, "provide_automatic_options", None
+ )
+
+ if provide_automatic_options is None:
+ if "OPTIONS" not in methods and self.config["PROVIDE_AUTOMATIC_OPTIONS"]:
+ provide_automatic_options = True
+ required_methods.add("OPTIONS")
+ else:
+ provide_automatic_options = False
+
+ # Add the required methods now.
+ methods |= required_methods
+
+ rule_obj = self.url_rule_class(rule, methods=methods, **options)
+ rule_obj.provide_automatic_options = provide_automatic_options # type: ignore[attr-defined]
+
+ self.url_map.add(rule_obj)
+ if view_func is not None:
+ old_func = self.view_functions.get(endpoint)
+ if old_func is not None and old_func != view_func:
+ raise AssertionError(
+ "View function mapping is overwriting an existing"
+ f" endpoint function: {endpoint}"
+ )
+ self.view_functions[endpoint] = view_func
+
+ @setupmethod
+ def template_filter(
+ self, name: str | None = None
+ ) -> t.Callable[[T_template_filter], T_template_filter]:
+ """A decorator that is used to register custom template filter.
+ You can specify a name for the filter, otherwise the function
+ name will be used. Example::
+
+ @app.template_filter()
+ def reverse(s):
+ return s[::-1]
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+
+ def decorator(f: T_template_filter) -> T_template_filter:
+ self.add_template_filter(f, name=name)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_template_filter(
+ self, f: ft.TemplateFilterCallable, name: str | None = None
+ ) -> None:
+ """Register a custom template filter. Works exactly like the
+ :meth:`template_filter` decorator.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+ self.jinja_env.filters[name or f.__name__] = f
+
+ @setupmethod
+ def template_test(
+ self, name: str | None = None
+ ) -> t.Callable[[T_template_test], T_template_test]:
+ """A decorator that is used to register custom template test.
+ You can specify a name for the test, otherwise the function
+ name will be used. Example::
+
+ @app.template_test()
+ def is_prime(n):
+ if n == 2:
+ return True
+ for i in range(2, int(math.ceil(math.sqrt(n))) + 1):
+ if n % i == 0:
+ return False
+ return True
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+
+ def decorator(f: T_template_test) -> T_template_test:
+ self.add_template_test(f, name=name)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_template_test(
+ self, f: ft.TemplateTestCallable, name: str | None = None
+ ) -> None:
+ """Register a custom template test. Works exactly like the
+ :meth:`template_test` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+ self.jinja_env.tests[name or f.__name__] = f
+
+ @setupmethod
+ def template_global(
+ self, name: str | None = None
+ ) -> t.Callable[[T_template_global], T_template_global]:
+ """A decorator that is used to register a custom template global function.
+ You can specify a name for the global function, otherwise the function
+ name will be used. Example::
+
+ @app.template_global()
+ def double(n):
+ return 2 * n
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global function, otherwise the
+ function name will be used.
+ """
+
+ def decorator(f: T_template_global) -> T_template_global:
+ self.add_template_global(f, name=name)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_template_global(
+ self, f: ft.TemplateGlobalCallable, name: str | None = None
+ ) -> None:
+ """Register a custom template global function. Works exactly like the
+ :meth:`template_global` decorator.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global function, otherwise the
+ function name will be used.
+ """
+ self.jinja_env.globals[name or f.__name__] = f
+
+ @setupmethod
+ def teardown_appcontext(self, f: T_teardown) -> T_teardown:
+ """Registers a function to be called when the application
+ context is popped. The application context is typically popped
+ after the request context for each request, at the end of CLI
+ commands, or after a manually pushed context ends.
+
+ .. code-block:: python
+
+ with app.app_context():
+ ...
+
+ When the ``with`` block exits (or ``ctx.pop()`` is called), the
+ teardown functions are called just before the app context is
+ made inactive. Since a request context typically also manages an
+ application context it would also be called when you pop a
+ request context.
+
+ When a teardown function was called because of an unhandled
+ exception it will be passed an error object. If an
+ :meth:`errorhandler` is registered, it will handle the exception
+ and the teardown will not receive it.
+
+ Teardown functions must avoid raising exceptions. If they
+ execute code that might fail they must surround that code with a
+ ``try``/``except`` block and log any errors.
+
+ The return values of teardown functions are ignored.
+
+ .. versionadded:: 0.9
+ """
+ self.teardown_appcontext_funcs.append(f)
+ return f
+
+ @setupmethod
+ def shell_context_processor(
+ self, f: T_shell_context_processor
+ ) -> T_shell_context_processor:
+ """Registers a shell context processor function.
+
+ .. versionadded:: 0.11
+ """
+ self.shell_context_processors.append(f)
+ return f
+
+ def _find_error_handler(
+ self, e: Exception, blueprints: list[str]
+ ) -> ft.ErrorHandlerCallable | None:
+ """Return a registered error handler for an exception in this order:
+ blueprint handler for a specific code, app handler for a specific code,
+ blueprint handler for an exception class, app handler for an exception
+ class, or ``None`` if a suitable handler is not found.
+ """
+ exc_class, code = self._get_exc_class_and_code(type(e))
+ names = (*blueprints, None)
+
+ for c in (code, None) if code is not None else (None,):
+ for name in names:
+ handler_map = self.error_handler_spec[name][c]
+
+ if not handler_map:
+ continue
+
+ for cls in exc_class.__mro__:
+ handler = handler_map.get(cls)
+
+ if handler is not None:
+ return handler
+ return None
+
+ def trap_http_exception(self, e: Exception) -> bool:
+ """Checks if an HTTP exception should be trapped or not. By default
+ this will return ``False`` for all exceptions except for a bad request
+ key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It
+ also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``.
+
+ This is called for all HTTP exceptions raised by a view function.
+ If it returns ``True`` for any exception the error handler for this
+ exception is not called and it shows up as regular exception in the
+ traceback. This is helpful for debugging implicitly raised HTTP
+ exceptions.
+
+ .. versionchanged:: 1.0
+ Bad request errors are not trapped by default in debug mode.
+
+ .. versionadded:: 0.8
+ """
+ if self.config["TRAP_HTTP_EXCEPTIONS"]:
+ return True
+
+ trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"]
+
+ # if unset, trap key errors in debug mode
+ if (
+ trap_bad_request is None
+ and self.debug
+ and isinstance(e, BadRequestKeyError)
+ ):
+ return True
+
+ if trap_bad_request:
+ return isinstance(e, BadRequest)
+
+ return False
+
+ def should_ignore_error(self, error: BaseException | None) -> bool:
+ """This is called to figure out if an error should be ignored
+ or not as far as the teardown system is concerned. If this
+ function returns ``True`` then the teardown handlers will not be
+ passed the error.
+
+ .. versionadded:: 0.10
+ """
+ return False
+
+ def redirect(self, location: str, code: int = 302) -> BaseResponse:
+ """Create a redirect response object.
+
+ This is called by :func:`flask.redirect`, and can be called
+ directly as well.
+
+ :param location: The URL to redirect to.
+ :param code: The status code for the redirect.
+
+ .. versionadded:: 2.2
+ Moved from ``flask.redirect``, which calls this method.
+ """
+ return _wz_redirect(
+ location,
+ code=code,
+ Response=self.response_class, # type: ignore[arg-type]
+ )
+
+ def inject_url_defaults(self, endpoint: str, values: dict[str, t.Any]) -> None:
+ """Injects the URL defaults for the given endpoint directly into
+ the values dictionary passed. This is used internally and
+ automatically called on URL building.
+
+ .. versionadded:: 0.7
+ """
+ names: t.Iterable[str | None] = (None,)
+
+ # url_for may be called outside a request context, parse the
+ # passed endpoint instead of using request.blueprints.
+ if "." in endpoint:
+ names = chain(
+ names, reversed(_split_blueprint_path(endpoint.rpartition(".")[0]))
+ )
+
+ for name in names:
+ if name in self.url_default_functions:
+ for func in self.url_default_functions[name]:
+ func(endpoint, values)
+
+ def handle_url_build_error(
+ self, error: BuildError, endpoint: str, values: dict[str, t.Any]
+ ) -> str:
+ """Called by :meth:`.url_for` if a
+ :exc:`~werkzeug.routing.BuildError` was raised. If this returns
+ a value, it will be returned by ``url_for``, otherwise the error
+ will be re-raised.
+
+ Each function in :attr:`url_build_error_handlers` is called with
+ ``error``, ``endpoint`` and ``values``. If a function returns
+ ``None`` or raises a ``BuildError``, it is skipped. Otherwise,
+ its return value is returned by ``url_for``.
+
+ :param error: The active ``BuildError`` being handled.
+ :param endpoint: The endpoint being built.
+ :param values: The keyword arguments passed to ``url_for``.
+ """
+ for handler in self.url_build_error_handlers:
+ try:
+ rv = handler(error, endpoint, values)
+ except BuildError as e:
+ # make error available outside except block
+ error = e
+ else:
+ if rv is not None:
+ return rv
+
+ # Re-raise if called with an active exception, otherwise raise
+ # the passed in exception.
+ if error is sys.exc_info()[1]:
+ raise
+
+ raise error
diff --git a/tapdown/lib/python3.11/site-packages/flask/sansio/blueprints.py b/tapdown/lib/python3.11/site-packages/flask/sansio/blueprints.py
new file mode 100644
index 0000000..4f912cc
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/sansio/blueprints.py
@@ -0,0 +1,632 @@
+from __future__ import annotations
+
+import os
+import typing as t
+from collections import defaultdict
+from functools import update_wrapper
+
+from .. import typing as ft
+from .scaffold import _endpoint_from_view_func
+from .scaffold import _sentinel
+from .scaffold import Scaffold
+from .scaffold import setupmethod
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from .app import App
+
+DeferredSetupFunction = t.Callable[["BlueprintSetupState"], None]
+T_after_request = t.TypeVar("T_after_request", bound=ft.AfterRequestCallable[t.Any])
+T_before_request = t.TypeVar("T_before_request", bound=ft.BeforeRequestCallable)
+T_error_handler = t.TypeVar("T_error_handler", bound=ft.ErrorHandlerCallable)
+T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
+T_template_context_processor = t.TypeVar(
+ "T_template_context_processor", bound=ft.TemplateContextProcessorCallable
+)
+T_template_filter = t.TypeVar("T_template_filter", bound=ft.TemplateFilterCallable)
+T_template_global = t.TypeVar("T_template_global", bound=ft.TemplateGlobalCallable)
+T_template_test = t.TypeVar("T_template_test", bound=ft.TemplateTestCallable)
+T_url_defaults = t.TypeVar("T_url_defaults", bound=ft.URLDefaultCallable)
+T_url_value_preprocessor = t.TypeVar(
+ "T_url_value_preprocessor", bound=ft.URLValuePreprocessorCallable
+)
+
+
+class BlueprintSetupState:
+ """Temporary holder object for registering a blueprint with the
+ application. An instance of this class is created by the
+ :meth:`~flask.Blueprint.make_setup_state` method and later passed
+ to all register callback functions.
+ """
+
+ def __init__(
+ self,
+ blueprint: Blueprint,
+ app: App,
+ options: t.Any,
+ first_registration: bool,
+ ) -> None:
+ #: a reference to the current application
+ self.app = app
+
+ #: a reference to the blueprint that created this setup state.
+ self.blueprint = blueprint
+
+ #: a dictionary with all options that were passed to the
+ #: :meth:`~flask.Flask.register_blueprint` method.
+ self.options = options
+
+ #: as blueprints can be registered multiple times with the
+ #: application and not everything wants to be registered
+ #: multiple times on it, this attribute can be used to figure
+ #: out if the blueprint was registered in the past already.
+ self.first_registration = first_registration
+
+ subdomain = self.options.get("subdomain")
+ if subdomain is None:
+ subdomain = self.blueprint.subdomain
+
+ #: The subdomain that the blueprint should be active for, ``None``
+ #: otherwise.
+ self.subdomain = subdomain
+
+ url_prefix = self.options.get("url_prefix")
+ if url_prefix is None:
+ url_prefix = self.blueprint.url_prefix
+ #: The prefix that should be used for all URLs defined on the
+ #: blueprint.
+ self.url_prefix = url_prefix
+
+ self.name = self.options.get("name", blueprint.name)
+ self.name_prefix = self.options.get("name_prefix", "")
+
+ #: A dictionary with URL defaults that is added to each and every
+ #: URL that was defined with the blueprint.
+ self.url_defaults = dict(self.blueprint.url_values_defaults)
+ self.url_defaults.update(self.options.get("url_defaults", ()))
+
+ def add_url_rule(
+ self,
+ rule: str,
+ endpoint: str | None = None,
+ view_func: ft.RouteCallable | None = None,
+ **options: t.Any,
+ ) -> None:
+ """A helper method to register a rule (and optionally a view function)
+ to the application. The endpoint is automatically prefixed with the
+ blueprint's name.
+ """
+ if self.url_prefix is not None:
+ if rule:
+ rule = "/".join((self.url_prefix.rstrip("/"), rule.lstrip("/")))
+ else:
+ rule = self.url_prefix
+ options.setdefault("subdomain", self.subdomain)
+ if endpoint is None:
+ endpoint = _endpoint_from_view_func(view_func) # type: ignore
+ defaults = self.url_defaults
+ if "defaults" in options:
+ defaults = dict(defaults, **options.pop("defaults"))
+
+ self.app.add_url_rule(
+ rule,
+ f"{self.name_prefix}.{self.name}.{endpoint}".lstrip("."),
+ view_func,
+ defaults=defaults,
+ **options,
+ )
+
+
+class Blueprint(Scaffold):
+ """Represents a blueprint, a collection of routes and other
+ app-related functions that can be registered on a real application
+ later.
+
+ A blueprint is an object that allows defining application functions
+ without requiring an application object ahead of time. It uses the
+ same decorators as :class:`~flask.Flask`, but defers the need for an
+ application by recording them for later registration.
+
+ Decorating a function with a blueprint creates a deferred function
+ that is called with :class:`~flask.blueprints.BlueprintSetupState`
+ when the blueprint is registered on an application.
+
+ See :doc:`/blueprints` for more information.
+
+ :param name: The name of the blueprint. Will be prepended to each
+ endpoint name.
+ :param import_name: The name of the blueprint package, usually
+ ``__name__``. This helps locate the ``root_path`` for the
+ blueprint.
+ :param static_folder: A folder with static files that should be
+ served by the blueprint's static route. The path is relative to
+ the blueprint's root path. Blueprint static files are disabled
+ by default.
+ :param static_url_path: The url to serve static files from.
+ Defaults to ``static_folder``. If the blueprint does not have
+ a ``url_prefix``, the app's static route will take precedence,
+ and the blueprint's static files won't be accessible.
+ :param template_folder: A folder with templates that should be added
+ to the app's template search path. The path is relative to the
+ blueprint's root path. Blueprint templates are disabled by
+ default. Blueprint templates have a lower precedence than those
+ in the app's templates folder.
+ :param url_prefix: A path to prepend to all of the blueprint's URLs,
+ to make them distinct from the rest of the app's routes.
+ :param subdomain: A subdomain that blueprint routes will match on by
+ default.
+ :param url_defaults: A dict of default values that blueprint routes
+ will receive by default.
+ :param root_path: By default, the blueprint will automatically set
+ this based on ``import_name``. In certain situations this
+ automatic detection can fail, so the path can be specified
+ manually instead.
+
+ .. versionchanged:: 1.1.0
+ Blueprints have a ``cli`` group to register nested CLI commands.
+ The ``cli_group`` parameter controls the name of the group under
+ the ``flask`` command.
+
+ .. versionadded:: 0.7
+ """
+
+ _got_registered_once = False
+
+ def __init__(
+ self,
+ name: str,
+ import_name: str,
+ static_folder: str | os.PathLike[str] | None = None,
+ static_url_path: str | None = None,
+ template_folder: str | os.PathLike[str] | None = None,
+ url_prefix: str | None = None,
+ subdomain: str | None = None,
+ url_defaults: dict[str, t.Any] | None = None,
+ root_path: str | None = None,
+ cli_group: str | None = _sentinel, # type: ignore[assignment]
+ ):
+ super().__init__(
+ import_name=import_name,
+ static_folder=static_folder,
+ static_url_path=static_url_path,
+ template_folder=template_folder,
+ root_path=root_path,
+ )
+
+ if not name:
+ raise ValueError("'name' may not be empty.")
+
+ if "." in name:
+ raise ValueError("'name' may not contain a dot '.' character.")
+
+ self.name = name
+ self.url_prefix = url_prefix
+ self.subdomain = subdomain
+ self.deferred_functions: list[DeferredSetupFunction] = []
+
+ if url_defaults is None:
+ url_defaults = {}
+
+ self.url_values_defaults = url_defaults
+ self.cli_group = cli_group
+ self._blueprints: list[tuple[Blueprint, dict[str, t.Any]]] = []
+
+ def _check_setup_finished(self, f_name: str) -> None:
+ if self._got_registered_once:
+ raise AssertionError(
+ f"The setup method '{f_name}' can no longer be called on the blueprint"
+ f" '{self.name}'. It has already been registered at least once, any"
+ " changes will not be applied consistently.\n"
+ "Make sure all imports, decorators, functions, etc. needed to set up"
+ " the blueprint are done before registering it."
+ )
+
+ @setupmethod
+ def record(self, func: DeferredSetupFunction) -> None:
+ """Registers a function that is called when the blueprint is
+ registered on the application. This function is called with the
+ state as argument as returned by the :meth:`make_setup_state`
+ method.
+ """
+ self.deferred_functions.append(func)
+
+ @setupmethod
+ def record_once(self, func: DeferredSetupFunction) -> None:
+ """Works like :meth:`record` but wraps the function in another
+ function that will ensure the function is only called once. If the
+ blueprint is registered a second time on the application, the
+ function passed is not called.
+ """
+
+ def wrapper(state: BlueprintSetupState) -> None:
+ if state.first_registration:
+ func(state)
+
+ self.record(update_wrapper(wrapper, func))
+
+ def make_setup_state(
+ self, app: App, options: dict[str, t.Any], first_registration: bool = False
+ ) -> BlueprintSetupState:
+ """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
+ object that is later passed to the register callback functions.
+ Subclasses can override this to return a subclass of the setup state.
+ """
+ return BlueprintSetupState(self, app, options, first_registration)
+
+ @setupmethod
+ def register_blueprint(self, blueprint: Blueprint, **options: t.Any) -> None:
+ """Register a :class:`~flask.Blueprint` on this blueprint. Keyword
+ arguments passed to this method will override the defaults set
+ on the blueprint.
+
+ .. versionchanged:: 2.0.1
+ The ``name`` option can be used to change the (pre-dotted)
+ name the blueprint is registered with. This allows the same
+ blueprint to be registered multiple times with unique names
+ for ``url_for``.
+
+ .. versionadded:: 2.0
+ """
+ if blueprint is self:
+ raise ValueError("Cannot register a blueprint on itself")
+ self._blueprints.append((blueprint, options))
+
+ def register(self, app: App, options: dict[str, t.Any]) -> None:
+ """Called by :meth:`Flask.register_blueprint` to register all
+ views and callbacks registered on the blueprint with the
+ application. Creates a :class:`.BlueprintSetupState` and calls
+ each :meth:`record` callback with it.
+
+ :param app: The application this blueprint is being registered
+ with.
+ :param options: Keyword arguments forwarded from
+ :meth:`~Flask.register_blueprint`.
+
+ .. versionchanged:: 2.3
+ Nested blueprints now correctly apply subdomains.
+
+ .. versionchanged:: 2.1
+ Registering the same blueprint with the same name multiple
+ times is an error.
+
+ .. versionchanged:: 2.0.1
+ Nested blueprints are registered with their dotted name.
+ This allows different blueprints with the same name to be
+ nested at different locations.
+
+ .. versionchanged:: 2.0.1
+ The ``name`` option can be used to change the (pre-dotted)
+ name the blueprint is registered with. This allows the same
+ blueprint to be registered multiple times with unique names
+ for ``url_for``.
+ """
+ name_prefix = options.get("name_prefix", "")
+ self_name = options.get("name", self.name)
+ name = f"{name_prefix}.{self_name}".lstrip(".")
+
+ if name in app.blueprints:
+ bp_desc = "this" if app.blueprints[name] is self else "a different"
+ existing_at = f" '{name}'" if self_name != name else ""
+
+ raise ValueError(
+ f"The name '{self_name}' is already registered for"
+ f" {bp_desc} blueprint{existing_at}. Use 'name=' to"
+ f" provide a unique name."
+ )
+
+ first_bp_registration = not any(bp is self for bp in app.blueprints.values())
+ first_name_registration = name not in app.blueprints
+
+ app.blueprints[name] = self
+ self._got_registered_once = True
+ state = self.make_setup_state(app, options, first_bp_registration)
+
+ if self.has_static_folder:
+ state.add_url_rule(
+ f"{self.static_url_path}/",
+ view_func=self.send_static_file, # type: ignore[attr-defined]
+ endpoint="static",
+ )
+
+ # Merge blueprint data into parent.
+ if first_bp_registration or first_name_registration:
+ self._merge_blueprint_funcs(app, name)
+
+ for deferred in self.deferred_functions:
+ deferred(state)
+
+ cli_resolved_group = options.get("cli_group", self.cli_group)
+
+ if self.cli.commands:
+ if cli_resolved_group is None:
+ app.cli.commands.update(self.cli.commands)
+ elif cli_resolved_group is _sentinel:
+ self.cli.name = name
+ app.cli.add_command(self.cli)
+ else:
+ self.cli.name = cli_resolved_group
+ app.cli.add_command(self.cli)
+
+ for blueprint, bp_options in self._blueprints:
+ bp_options = bp_options.copy()
+ bp_url_prefix = bp_options.get("url_prefix")
+ bp_subdomain = bp_options.get("subdomain")
+
+ if bp_subdomain is None:
+ bp_subdomain = blueprint.subdomain
+
+ if state.subdomain is not None and bp_subdomain is not None:
+ bp_options["subdomain"] = bp_subdomain + "." + state.subdomain
+ elif bp_subdomain is not None:
+ bp_options["subdomain"] = bp_subdomain
+ elif state.subdomain is not None:
+ bp_options["subdomain"] = state.subdomain
+
+ if bp_url_prefix is None:
+ bp_url_prefix = blueprint.url_prefix
+
+ if state.url_prefix is not None and bp_url_prefix is not None:
+ bp_options["url_prefix"] = (
+ state.url_prefix.rstrip("/") + "/" + bp_url_prefix.lstrip("/")
+ )
+ elif bp_url_prefix is not None:
+ bp_options["url_prefix"] = bp_url_prefix
+ elif state.url_prefix is not None:
+ bp_options["url_prefix"] = state.url_prefix
+
+ bp_options["name_prefix"] = name
+ blueprint.register(app, bp_options)
+
+ def _merge_blueprint_funcs(self, app: App, name: str) -> None:
+ def extend(
+ bp_dict: dict[ft.AppOrBlueprintKey, list[t.Any]],
+ parent_dict: dict[ft.AppOrBlueprintKey, list[t.Any]],
+ ) -> None:
+ for key, values in bp_dict.items():
+ key = name if key is None else f"{name}.{key}"
+ parent_dict[key].extend(values)
+
+ for key, value in self.error_handler_spec.items():
+ key = name if key is None else f"{name}.{key}"
+ value = defaultdict(
+ dict,
+ {
+ code: {exc_class: func for exc_class, func in code_values.items()}
+ for code, code_values in value.items()
+ },
+ )
+ app.error_handler_spec[key] = value
+
+ for endpoint, func in self.view_functions.items():
+ app.view_functions[endpoint] = func
+
+ extend(self.before_request_funcs, app.before_request_funcs)
+ extend(self.after_request_funcs, app.after_request_funcs)
+ extend(
+ self.teardown_request_funcs,
+ app.teardown_request_funcs,
+ )
+ extend(self.url_default_functions, app.url_default_functions)
+ extend(self.url_value_preprocessors, app.url_value_preprocessors)
+ extend(self.template_context_processors, app.template_context_processors)
+
+ @setupmethod
+ def add_url_rule(
+ self,
+ rule: str,
+ endpoint: str | None = None,
+ view_func: ft.RouteCallable | None = None,
+ provide_automatic_options: bool | None = None,
+ **options: t.Any,
+ ) -> None:
+ """Register a URL rule with the blueprint. See :meth:`.Flask.add_url_rule` for
+ full documentation.
+
+ The URL rule is prefixed with the blueprint's URL prefix. The endpoint name,
+ used with :func:`url_for`, is prefixed with the blueprint's name.
+ """
+ if endpoint and "." in endpoint:
+ raise ValueError("'endpoint' may not contain a dot '.' character.")
+
+ if view_func and hasattr(view_func, "__name__") and "." in view_func.__name__:
+ raise ValueError("'view_func' name may not contain a dot '.' character.")
+
+ self.record(
+ lambda s: s.add_url_rule(
+ rule,
+ endpoint,
+ view_func,
+ provide_automatic_options=provide_automatic_options,
+ **options,
+ )
+ )
+
+ @setupmethod
+ def app_template_filter(
+ self, name: str | None = None
+ ) -> t.Callable[[T_template_filter], T_template_filter]:
+ """Register a template filter, available in any template rendered by the
+ application. Equivalent to :meth:`.Flask.template_filter`.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+
+ def decorator(f: T_template_filter) -> T_template_filter:
+ self.add_app_template_filter(f, name=name)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_app_template_filter(
+ self, f: ft.TemplateFilterCallable, name: str | None = None
+ ) -> None:
+ """Register a template filter, available in any template rendered by the
+ application. Works like the :meth:`app_template_filter` decorator. Equivalent to
+ :meth:`.Flask.add_template_filter`.
+
+ :param name: the optional name of the filter, otherwise the
+ function name will be used.
+ """
+
+ def register_template(state: BlueprintSetupState) -> None:
+ state.app.jinja_env.filters[name or f.__name__] = f
+
+ self.record_once(register_template)
+
+ @setupmethod
+ def app_template_test(
+ self, name: str | None = None
+ ) -> t.Callable[[T_template_test], T_template_test]:
+ """Register a template test, available in any template rendered by the
+ application. Equivalent to :meth:`.Flask.template_test`.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+
+ def decorator(f: T_template_test) -> T_template_test:
+ self.add_app_template_test(f, name=name)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_app_template_test(
+ self, f: ft.TemplateTestCallable, name: str | None = None
+ ) -> None:
+ """Register a template test, available in any template rendered by the
+ application. Works like the :meth:`app_template_test` decorator. Equivalent to
+ :meth:`.Flask.add_template_test`.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the test, otherwise the
+ function name will be used.
+ """
+
+ def register_template(state: BlueprintSetupState) -> None:
+ state.app.jinja_env.tests[name or f.__name__] = f
+
+ self.record_once(register_template)
+
+ @setupmethod
+ def app_template_global(
+ self, name: str | None = None
+ ) -> t.Callable[[T_template_global], T_template_global]:
+ """Register a template global, available in any template rendered by the
+ application. Equivalent to :meth:`.Flask.template_global`.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global, otherwise the
+ function name will be used.
+ """
+
+ def decorator(f: T_template_global) -> T_template_global:
+ self.add_app_template_global(f, name=name)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_app_template_global(
+ self, f: ft.TemplateGlobalCallable, name: str | None = None
+ ) -> None:
+ """Register a template global, available in any template rendered by the
+ application. Works like the :meth:`app_template_global` decorator. Equivalent to
+ :meth:`.Flask.add_template_global`.
+
+ .. versionadded:: 0.10
+
+ :param name: the optional name of the global, otherwise the
+ function name will be used.
+ """
+
+ def register_template(state: BlueprintSetupState) -> None:
+ state.app.jinja_env.globals[name or f.__name__] = f
+
+ self.record_once(register_template)
+
+ @setupmethod
+ def before_app_request(self, f: T_before_request) -> T_before_request:
+ """Like :meth:`before_request`, but before every request, not only those handled
+ by the blueprint. Equivalent to :meth:`.Flask.before_request`.
+ """
+ self.record_once(
+ lambda s: s.app.before_request_funcs.setdefault(None, []).append(f)
+ )
+ return f
+
+ @setupmethod
+ def after_app_request(self, f: T_after_request) -> T_after_request:
+ """Like :meth:`after_request`, but after every request, not only those handled
+ by the blueprint. Equivalent to :meth:`.Flask.after_request`.
+ """
+ self.record_once(
+ lambda s: s.app.after_request_funcs.setdefault(None, []).append(f)
+ )
+ return f
+
+ @setupmethod
+ def teardown_app_request(self, f: T_teardown) -> T_teardown:
+ """Like :meth:`teardown_request`, but after every request, not only those
+ handled by the blueprint. Equivalent to :meth:`.Flask.teardown_request`.
+ """
+ self.record_once(
+ lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f)
+ )
+ return f
+
+ @setupmethod
+ def app_context_processor(
+ self, f: T_template_context_processor
+ ) -> T_template_context_processor:
+ """Like :meth:`context_processor`, but for templates rendered by every view, not
+ only by the blueprint. Equivalent to :meth:`.Flask.context_processor`.
+ """
+ self.record_once(
+ lambda s: s.app.template_context_processors.setdefault(None, []).append(f)
+ )
+ return f
+
+ @setupmethod
+ def app_errorhandler(
+ self, code: type[Exception] | int
+ ) -> t.Callable[[T_error_handler], T_error_handler]:
+ """Like :meth:`errorhandler`, but for every request, not only those handled by
+ the blueprint. Equivalent to :meth:`.Flask.errorhandler`.
+ """
+
+ def decorator(f: T_error_handler) -> T_error_handler:
+ def from_blueprint(state: BlueprintSetupState) -> None:
+ state.app.errorhandler(code)(f)
+
+ self.record_once(from_blueprint)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def app_url_value_preprocessor(
+ self, f: T_url_value_preprocessor
+ ) -> T_url_value_preprocessor:
+ """Like :meth:`url_value_preprocessor`, but for every request, not only those
+ handled by the blueprint. Equivalent to :meth:`.Flask.url_value_preprocessor`.
+ """
+ self.record_once(
+ lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f)
+ )
+ return f
+
+ @setupmethod
+ def app_url_defaults(self, f: T_url_defaults) -> T_url_defaults:
+ """Like :meth:`url_defaults`, but for every request, not only those handled by
+ the blueprint. Equivalent to :meth:`.Flask.url_defaults`.
+ """
+ self.record_once(
+ lambda s: s.app.url_default_functions.setdefault(None, []).append(f)
+ )
+ return f
diff --git a/tapdown/lib/python3.11/site-packages/flask/sansio/scaffold.py b/tapdown/lib/python3.11/site-packages/flask/sansio/scaffold.py
new file mode 100644
index 0000000..0e96f15
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/sansio/scaffold.py
@@ -0,0 +1,792 @@
+from __future__ import annotations
+
+import importlib.util
+import os
+import pathlib
+import sys
+import typing as t
+from collections import defaultdict
+from functools import update_wrapper
+
+from jinja2 import BaseLoader
+from jinja2 import FileSystemLoader
+from werkzeug.exceptions import default_exceptions
+from werkzeug.exceptions import HTTPException
+from werkzeug.utils import cached_property
+
+from .. import typing as ft
+from ..helpers import get_root_path
+from ..templating import _default_template_ctx_processor
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from click import Group
+
+# a singleton sentinel value for parameter defaults
+_sentinel = object()
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+T_after_request = t.TypeVar("T_after_request", bound=ft.AfterRequestCallable[t.Any])
+T_before_request = t.TypeVar("T_before_request", bound=ft.BeforeRequestCallable)
+T_error_handler = t.TypeVar("T_error_handler", bound=ft.ErrorHandlerCallable)
+T_teardown = t.TypeVar("T_teardown", bound=ft.TeardownCallable)
+T_template_context_processor = t.TypeVar(
+ "T_template_context_processor", bound=ft.TemplateContextProcessorCallable
+)
+T_url_defaults = t.TypeVar("T_url_defaults", bound=ft.URLDefaultCallable)
+T_url_value_preprocessor = t.TypeVar(
+ "T_url_value_preprocessor", bound=ft.URLValuePreprocessorCallable
+)
+T_route = t.TypeVar("T_route", bound=ft.RouteCallable)
+
+
+def setupmethod(f: F) -> F:
+ f_name = f.__name__
+
+ def wrapper_func(self: Scaffold, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ self._check_setup_finished(f_name)
+ return f(self, *args, **kwargs)
+
+ return t.cast(F, update_wrapper(wrapper_func, f))
+
+
+class Scaffold:
+ """Common behavior shared between :class:`~flask.Flask` and
+ :class:`~flask.blueprints.Blueprint`.
+
+ :param import_name: The import name of the module where this object
+ is defined. Usually :attr:`__name__` should be used.
+ :param static_folder: Path to a folder of static files to serve.
+ If this is set, a static route will be added.
+ :param static_url_path: URL prefix for the static route.
+ :param template_folder: Path to a folder containing template files.
+ for rendering. If this is set, a Jinja loader will be added.
+ :param root_path: The path that static, template, and resource files
+ are relative to. Typically not set, it is discovered based on
+ the ``import_name``.
+
+ .. versionadded:: 2.0
+ """
+
+ cli: Group
+ name: str
+ _static_folder: str | None = None
+ _static_url_path: str | None = None
+
+ def __init__(
+ self,
+ import_name: str,
+ static_folder: str | os.PathLike[str] | None = None,
+ static_url_path: str | None = None,
+ template_folder: str | os.PathLike[str] | None = None,
+ root_path: str | None = None,
+ ):
+ #: The name of the package or module that this object belongs
+ #: to. Do not change this once it is set by the constructor.
+ self.import_name = import_name
+
+ self.static_folder = static_folder
+ self.static_url_path = static_url_path
+
+ #: The path to the templates folder, relative to
+ #: :attr:`root_path`, to add to the template loader. ``None`` if
+ #: templates should not be added.
+ self.template_folder = template_folder
+
+ if root_path is None:
+ root_path = get_root_path(self.import_name)
+
+ #: Absolute path to the package on the filesystem. Used to look
+ #: up resources contained in the package.
+ self.root_path = root_path
+
+ #: A dictionary mapping endpoint names to view functions.
+ #:
+ #: To register a view function, use the :meth:`route` decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.view_functions: dict[str, ft.RouteCallable] = {}
+
+ #: A data structure of registered error handlers, in the format
+ #: ``{scope: {code: {class: handler}}}``. The ``scope`` key is
+ #: the name of a blueprint the handlers are active for, or
+ #: ``None`` for all requests. The ``code`` key is the HTTP
+ #: status code for ``HTTPException``, or ``None`` for
+ #: other exceptions. The innermost dictionary maps exception
+ #: classes to handler functions.
+ #:
+ #: To register an error handler, use the :meth:`errorhandler`
+ #: decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.error_handler_spec: dict[
+ ft.AppOrBlueprintKey,
+ dict[int | None, dict[type[Exception], ft.ErrorHandlerCallable]],
+ ] = defaultdict(lambda: defaultdict(dict))
+
+ #: A data structure of functions to call at the beginning of
+ #: each request, in the format ``{scope: [functions]}``. The
+ #: ``scope`` key is the name of a blueprint the functions are
+ #: active for, or ``None`` for all requests.
+ #:
+ #: To register a function, use the :meth:`before_request`
+ #: decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.before_request_funcs: dict[
+ ft.AppOrBlueprintKey, list[ft.BeforeRequestCallable]
+ ] = defaultdict(list)
+
+ #: A data structure of functions to call at the end of each
+ #: request, in the format ``{scope: [functions]}``. The
+ #: ``scope`` key is the name of a blueprint the functions are
+ #: active for, or ``None`` for all requests.
+ #:
+ #: To register a function, use the :meth:`after_request`
+ #: decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.after_request_funcs: dict[
+ ft.AppOrBlueprintKey, list[ft.AfterRequestCallable[t.Any]]
+ ] = defaultdict(list)
+
+ #: A data structure of functions to call at the end of each
+ #: request even if an exception is raised, in the format
+ #: ``{scope: [functions]}``. The ``scope`` key is the name of a
+ #: blueprint the functions are active for, or ``None`` for all
+ #: requests.
+ #:
+ #: To register a function, use the :meth:`teardown_request`
+ #: decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.teardown_request_funcs: dict[
+ ft.AppOrBlueprintKey, list[ft.TeardownCallable]
+ ] = defaultdict(list)
+
+ #: A data structure of functions to call to pass extra context
+ #: values when rendering templates, in the format
+ #: ``{scope: [functions]}``. The ``scope`` key is the name of a
+ #: blueprint the functions are active for, or ``None`` for all
+ #: requests.
+ #:
+ #: To register a function, use the :meth:`context_processor`
+ #: decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.template_context_processors: dict[
+ ft.AppOrBlueprintKey, list[ft.TemplateContextProcessorCallable]
+ ] = defaultdict(list, {None: [_default_template_ctx_processor]})
+
+ #: A data structure of functions to call to modify the keyword
+ #: arguments passed to the view function, in the format
+ #: ``{scope: [functions]}``. The ``scope`` key is the name of a
+ #: blueprint the functions are active for, or ``None`` for all
+ #: requests.
+ #:
+ #: To register a function, use the
+ #: :meth:`url_value_preprocessor` decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.url_value_preprocessors: dict[
+ ft.AppOrBlueprintKey,
+ list[ft.URLValuePreprocessorCallable],
+ ] = defaultdict(list)
+
+ #: A data structure of functions to call to modify the keyword
+ #: arguments when generating URLs, in the format
+ #: ``{scope: [functions]}``. The ``scope`` key is the name of a
+ #: blueprint the functions are active for, or ``None`` for all
+ #: requests.
+ #:
+ #: To register a function, use the :meth:`url_defaults`
+ #: decorator.
+ #:
+ #: This data structure is internal. It should not be modified
+ #: directly and its format may change at any time.
+ self.url_default_functions: dict[
+ ft.AppOrBlueprintKey, list[ft.URLDefaultCallable]
+ ] = defaultdict(list)
+
+ def __repr__(self) -> str:
+ return f"<{type(self).__name__} {self.name!r}>"
+
+ def _check_setup_finished(self, f_name: str) -> None:
+ raise NotImplementedError
+
+ @property
+ def static_folder(self) -> str | None:
+ """The absolute path to the configured static folder. ``None``
+ if no static folder is set.
+ """
+ if self._static_folder is not None:
+ return os.path.join(self.root_path, self._static_folder)
+ else:
+ return None
+
+ @static_folder.setter
+ def static_folder(self, value: str | os.PathLike[str] | None) -> None:
+ if value is not None:
+ value = os.fspath(value).rstrip(r"\/")
+
+ self._static_folder = value
+
+ @property
+ def has_static_folder(self) -> bool:
+ """``True`` if :attr:`static_folder` is set.
+
+ .. versionadded:: 0.5
+ """
+ return self.static_folder is not None
+
+ @property
+ def static_url_path(self) -> str | None:
+ """The URL prefix that the static route will be accessible from.
+
+ If it was not configured during init, it is derived from
+ :attr:`static_folder`.
+ """
+ if self._static_url_path is not None:
+ return self._static_url_path
+
+ if self.static_folder is not None:
+ basename = os.path.basename(self.static_folder)
+ return f"/{basename}".rstrip("/")
+
+ return None
+
+ @static_url_path.setter
+ def static_url_path(self, value: str | None) -> None:
+ if value is not None:
+ value = value.rstrip("/")
+
+ self._static_url_path = value
+
+ @cached_property
+ def jinja_loader(self) -> BaseLoader | None:
+ """The Jinja loader for this object's templates. By default this
+ is a class :class:`jinja2.loaders.FileSystemLoader` to
+ :attr:`template_folder` if it is set.
+
+ .. versionadded:: 0.5
+ """
+ if self.template_folder is not None:
+ return FileSystemLoader(os.path.join(self.root_path, self.template_folder))
+ else:
+ return None
+
+ def _method_route(
+ self,
+ method: str,
+ rule: str,
+ options: dict[str, t.Any],
+ ) -> t.Callable[[T_route], T_route]:
+ if "methods" in options:
+ raise TypeError("Use the 'route' decorator to use the 'methods' argument.")
+
+ return self.route(rule, methods=[method], **options)
+
+ @setupmethod
+ def get(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["GET"]``.
+
+ .. versionadded:: 2.0
+ """
+ return self._method_route("GET", rule, options)
+
+ @setupmethod
+ def post(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["POST"]``.
+
+ .. versionadded:: 2.0
+ """
+ return self._method_route("POST", rule, options)
+
+ @setupmethod
+ def put(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["PUT"]``.
+
+ .. versionadded:: 2.0
+ """
+ return self._method_route("PUT", rule, options)
+
+ @setupmethod
+ def delete(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["DELETE"]``.
+
+ .. versionadded:: 2.0
+ """
+ return self._method_route("DELETE", rule, options)
+
+ @setupmethod
+ def patch(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Shortcut for :meth:`route` with ``methods=["PATCH"]``.
+
+ .. versionadded:: 2.0
+ """
+ return self._method_route("PATCH", rule, options)
+
+ @setupmethod
+ def route(self, rule: str, **options: t.Any) -> t.Callable[[T_route], T_route]:
+ """Decorate a view function to register it with the given URL
+ rule and options. Calls :meth:`add_url_rule`, which has more
+ details about the implementation.
+
+ .. code-block:: python
+
+ @app.route("/")
+ def index():
+ return "Hello, World!"
+
+ See :ref:`url-route-registrations`.
+
+ The endpoint name for the route defaults to the name of the view
+ function if the ``endpoint`` parameter isn't passed.
+
+ The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` and
+ ``OPTIONS`` are added automatically.
+
+ :param rule: The URL rule string.
+ :param options: Extra options passed to the
+ :class:`~werkzeug.routing.Rule` object.
+ """
+
+ def decorator(f: T_route) -> T_route:
+ endpoint = options.pop("endpoint", None)
+ self.add_url_rule(rule, endpoint, f, **options)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def add_url_rule(
+ self,
+ rule: str,
+ endpoint: str | None = None,
+ view_func: ft.RouteCallable | None = None,
+ provide_automatic_options: bool | None = None,
+ **options: t.Any,
+ ) -> None:
+ """Register a rule for routing incoming requests and building
+ URLs. The :meth:`route` decorator is a shortcut to call this
+ with the ``view_func`` argument. These are equivalent:
+
+ .. code-block:: python
+
+ @app.route("/")
+ def index():
+ ...
+
+ .. code-block:: python
+
+ def index():
+ ...
+
+ app.add_url_rule("/", view_func=index)
+
+ See :ref:`url-route-registrations`.
+
+ The endpoint name for the route defaults to the name of the view
+ function if the ``endpoint`` parameter isn't passed. An error
+ will be raised if a function has already been registered for the
+ endpoint.
+
+ The ``methods`` parameter defaults to ``["GET"]``. ``HEAD`` is
+ always added automatically, and ``OPTIONS`` is added
+ automatically by default.
+
+ ``view_func`` does not necessarily need to be passed, but if the
+ rule should participate in routing an endpoint name must be
+ associated with a view function at some point with the
+ :meth:`endpoint` decorator.
+
+ .. code-block:: python
+
+ app.add_url_rule("/", endpoint="index")
+
+ @app.endpoint("index")
+ def index():
+ ...
+
+ If ``view_func`` has a ``required_methods`` attribute, those
+ methods are added to the passed and automatic methods. If it
+ has a ``provide_automatic_methods`` attribute, it is used as the
+ default if the parameter is not passed.
+
+ :param rule: The URL rule string.
+ :param endpoint: The endpoint name to associate with the rule
+ and view function. Used when routing and building URLs.
+ Defaults to ``view_func.__name__``.
+ :param view_func: The view function to associate with the
+ endpoint name.
+ :param provide_automatic_options: Add the ``OPTIONS`` method and
+ respond to ``OPTIONS`` requests automatically.
+ :param options: Extra options passed to the
+ :class:`~werkzeug.routing.Rule` object.
+ """
+ raise NotImplementedError
+
+ @setupmethod
+ def endpoint(self, endpoint: str) -> t.Callable[[F], F]:
+ """Decorate a view function to register it for the given
+ endpoint. Used if a rule is added without a ``view_func`` with
+ :meth:`add_url_rule`.
+
+ .. code-block:: python
+
+ app.add_url_rule("/ex", endpoint="example")
+
+ @app.endpoint("example")
+ def example():
+ ...
+
+ :param endpoint: The endpoint name to associate with the view
+ function.
+ """
+
+ def decorator(f: F) -> F:
+ self.view_functions[endpoint] = f
+ return f
+
+ return decorator
+
+ @setupmethod
+ def before_request(self, f: T_before_request) -> T_before_request:
+ """Register a function to run before each request.
+
+ For example, this can be used to open a database connection, or
+ to load the logged in user from the session.
+
+ .. code-block:: python
+
+ @app.before_request
+ def load_user():
+ if "user_id" in session:
+ g.user = db.session.get(session["user_id"])
+
+ The function will be called without any arguments. If it returns
+ a non-``None`` value, the value is handled as if it was the
+ return value from the view, and further request handling is
+ stopped.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ executes before every request. When used on a blueprint, this executes before
+ every request that the blueprint handles. To register with a blueprint and
+ execute before every request, use :meth:`.Blueprint.before_app_request`.
+ """
+ self.before_request_funcs.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def after_request(self, f: T_after_request) -> T_after_request:
+ """Register a function to run after each request to this object.
+
+ The function is called with the response object, and must return
+ a response object. This allows the functions to modify or
+ replace the response before it is sent.
+
+ If a function raises an exception, any remaining
+ ``after_request`` functions will not be called. Therefore, this
+ should not be used for actions that must execute, such as to
+ close resources. Use :meth:`teardown_request` for that.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ executes after every request. When used on a blueprint, this executes after
+ every request that the blueprint handles. To register with a blueprint and
+ execute after every request, use :meth:`.Blueprint.after_app_request`.
+ """
+ self.after_request_funcs.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def teardown_request(self, f: T_teardown) -> T_teardown:
+ """Register a function to be called when the request context is
+ popped. Typically this happens at the end of each request, but
+ contexts may be pushed manually as well during testing.
+
+ .. code-block:: python
+
+ with app.test_request_context():
+ ...
+
+ When the ``with`` block exits (or ``ctx.pop()`` is called), the
+ teardown functions are called just before the request context is
+ made inactive.
+
+ When a teardown function was called because of an unhandled
+ exception it will be passed an error object. If an
+ :meth:`errorhandler` is registered, it will handle the exception
+ and the teardown will not receive it.
+
+ Teardown functions must avoid raising exceptions. If they
+ execute code that might fail they must surround that code with a
+ ``try``/``except`` block and log any errors.
+
+ The return values of teardown functions are ignored.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ executes after every request. When used on a blueprint, this executes after
+ every request that the blueprint handles. To register with a blueprint and
+ execute after every request, use :meth:`.Blueprint.teardown_app_request`.
+ """
+ self.teardown_request_funcs.setdefault(None, []).append(f)
+ return f
+
+ @setupmethod
+ def context_processor(
+ self,
+ f: T_template_context_processor,
+ ) -> T_template_context_processor:
+ """Registers a template context processor function. These functions run before
+ rendering a template. The keys of the returned dict are added as variables
+ available in the template.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ is called for every rendered template. When used on a blueprint, this is called
+ for templates rendered from the blueprint's views. To register with a blueprint
+ and affect every template, use :meth:`.Blueprint.app_context_processor`.
+ """
+ self.template_context_processors[None].append(f)
+ return f
+
+ @setupmethod
+ def url_value_preprocessor(
+ self,
+ f: T_url_value_preprocessor,
+ ) -> T_url_value_preprocessor:
+ """Register a URL value preprocessor function for all view
+ functions in the application. These functions will be called before the
+ :meth:`before_request` functions.
+
+ The function can modify the values captured from the matched url before
+ they are passed to the view. For example, this can be used to pop a
+ common language code value and place it in ``g`` rather than pass it to
+ every view.
+
+ The function is passed the endpoint name and values dict. The return
+ value is ignored.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ is called for every request. When used on a blueprint, this is called for
+ requests that the blueprint handles. To register with a blueprint and affect
+ every request, use :meth:`.Blueprint.app_url_value_preprocessor`.
+ """
+ self.url_value_preprocessors[None].append(f)
+ return f
+
+ @setupmethod
+ def url_defaults(self, f: T_url_defaults) -> T_url_defaults:
+ """Callback function for URL defaults for all view functions of the
+ application. It's called with the endpoint and values and should
+ update the values passed in place.
+
+ This is available on both app and blueprint objects. When used on an app, this
+ is called for every request. When used on a blueprint, this is called for
+ requests that the blueprint handles. To register with a blueprint and affect
+ every request, use :meth:`.Blueprint.app_url_defaults`.
+ """
+ self.url_default_functions[None].append(f)
+ return f
+
+ @setupmethod
+ def errorhandler(
+ self, code_or_exception: type[Exception] | int
+ ) -> t.Callable[[T_error_handler], T_error_handler]:
+ """Register a function to handle errors by code or exception class.
+
+ A decorator that is used to register a function given an
+ error code. Example::
+
+ @app.errorhandler(404)
+ def page_not_found(error):
+ return 'This page does not exist', 404
+
+ You can also register handlers for arbitrary exceptions::
+
+ @app.errorhandler(DatabaseError)
+ def special_exception_handler(error):
+ return 'Database connection failed', 500
+
+ This is available on both app and blueprint objects. When used on an app, this
+ can handle errors from every request. When used on a blueprint, this can handle
+ errors from requests that the blueprint handles. To register with a blueprint
+ and affect every request, use :meth:`.Blueprint.app_errorhandler`.
+
+ .. versionadded:: 0.7
+ Use :meth:`register_error_handler` instead of modifying
+ :attr:`error_handler_spec` directly, for application wide error
+ handlers.
+
+ .. versionadded:: 0.7
+ One can now additionally also register custom exception types
+ that do not necessarily have to be a subclass of the
+ :class:`~werkzeug.exceptions.HTTPException` class.
+
+ :param code_or_exception: the code as integer for the handler, or
+ an arbitrary exception
+ """
+
+ def decorator(f: T_error_handler) -> T_error_handler:
+ self.register_error_handler(code_or_exception, f)
+ return f
+
+ return decorator
+
+ @setupmethod
+ def register_error_handler(
+ self,
+ code_or_exception: type[Exception] | int,
+ f: ft.ErrorHandlerCallable,
+ ) -> None:
+ """Alternative error attach function to the :meth:`errorhandler`
+ decorator that is more straightforward to use for non decorator
+ usage.
+
+ .. versionadded:: 0.7
+ """
+ exc_class, code = self._get_exc_class_and_code(code_or_exception)
+ self.error_handler_spec[None][code][exc_class] = f
+
+ @staticmethod
+ def _get_exc_class_and_code(
+ exc_class_or_code: type[Exception] | int,
+ ) -> tuple[type[Exception], int | None]:
+ """Get the exception class being handled. For HTTP status codes
+ or ``HTTPException`` subclasses, return both the exception and
+ status code.
+
+ :param exc_class_or_code: Any exception class, or an HTTP status
+ code as an integer.
+ """
+ exc_class: type[Exception]
+
+ if isinstance(exc_class_or_code, int):
+ try:
+ exc_class = default_exceptions[exc_class_or_code]
+ except KeyError:
+ raise ValueError(
+ f"'{exc_class_or_code}' is not a recognized HTTP"
+ " error code. Use a subclass of HTTPException with"
+ " that code instead."
+ ) from None
+ else:
+ exc_class = exc_class_or_code
+
+ if isinstance(exc_class, Exception):
+ raise TypeError(
+ f"{exc_class!r} is an instance, not a class. Handlers"
+ " can only be registered for Exception classes or HTTP"
+ " error codes."
+ )
+
+ if not issubclass(exc_class, Exception):
+ raise ValueError(
+ f"'{exc_class.__name__}' is not a subclass of Exception."
+ " Handlers can only be registered for Exception classes"
+ " or HTTP error codes."
+ )
+
+ if issubclass(exc_class, HTTPException):
+ return exc_class, exc_class.code
+ else:
+ return exc_class, None
+
+
+def _endpoint_from_view_func(view_func: ft.RouteCallable) -> str:
+ """Internal helper that returns the default endpoint for a given
+ function. This always is the function name.
+ """
+ assert view_func is not None, "expected view func if endpoint is not provided."
+ return view_func.__name__
+
+
+def _find_package_path(import_name: str) -> str:
+ """Find the path that contains the package or module."""
+ root_mod_name, _, _ = import_name.partition(".")
+
+ try:
+ root_spec = importlib.util.find_spec(root_mod_name)
+
+ if root_spec is None:
+ raise ValueError("not found")
+ except (ImportError, ValueError):
+ # ImportError: the machinery told us it does not exist
+ # ValueError:
+ # - the module name was invalid
+ # - the module name is __main__
+ # - we raised `ValueError` due to `root_spec` being `None`
+ return os.getcwd()
+
+ if root_spec.submodule_search_locations:
+ if root_spec.origin is None or root_spec.origin == "namespace":
+ # namespace package
+ package_spec = importlib.util.find_spec(import_name)
+
+ if package_spec is not None and package_spec.submodule_search_locations:
+ # Pick the path in the namespace that contains the submodule.
+ package_path = pathlib.Path(
+ os.path.commonpath(package_spec.submodule_search_locations)
+ )
+ search_location = next(
+ location
+ for location in root_spec.submodule_search_locations
+ if package_path.is_relative_to(location)
+ )
+ else:
+ # Pick the first path.
+ search_location = root_spec.submodule_search_locations[0]
+
+ return os.path.dirname(search_location)
+ else:
+ # package with __init__.py
+ return os.path.dirname(os.path.dirname(root_spec.origin))
+ else:
+ # module
+ return os.path.dirname(root_spec.origin) # type: ignore[type-var, return-value]
+
+
+def find_package(import_name: str) -> tuple[str | None, str]:
+ """Find the prefix that a package is installed under, and the path
+ that it would be imported from.
+
+ The prefix is the directory containing the standard directory
+ hierarchy (lib, bin, etc.). If the package is not installed to the
+ system (:attr:`sys.prefix`) or a virtualenv (``site-packages``),
+ ``None`` is returned.
+
+ The path is the entry in :attr:`sys.path` that contains the package
+ for import. If the package is not installed, it's assumed that the
+ package was imported from the current working directory.
+ """
+ package_path = _find_package_path(import_name)
+ py_prefix = os.path.abspath(sys.prefix)
+
+ # installed to the system
+ if pathlib.PurePath(package_path).is_relative_to(py_prefix):
+ return py_prefix, package_path
+
+ site_parent, site_folder = os.path.split(package_path)
+
+ # installed to a virtualenv
+ if site_folder.lower() == "site-packages":
+ parent, folder = os.path.split(site_parent)
+
+ # Windows (prefix/lib/site-packages)
+ if folder.lower() == "lib":
+ return parent, package_path
+
+ # Unix (prefix/lib/pythonX.Y/site-packages)
+ if os.path.basename(parent).lower() == "lib":
+ return os.path.dirname(parent), package_path
+
+ # something else (prefix/site-packages)
+ return site_parent, package_path
+
+ # not installed
+ return None, package_path
diff --git a/tapdown/lib/python3.11/site-packages/flask/sessions.py b/tapdown/lib/python3.11/site-packages/flask/sessions.py
new file mode 100644
index 0000000..0a357d9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/sessions.py
@@ -0,0 +1,399 @@
+from __future__ import annotations
+
+import collections.abc as c
+import hashlib
+import typing as t
+from collections.abc import MutableMapping
+from datetime import datetime
+from datetime import timezone
+
+from itsdangerous import BadSignature
+from itsdangerous import URLSafeTimedSerializer
+from werkzeug.datastructures import CallbackDict
+
+from .json.tag import TaggedJSONSerializer
+
+if t.TYPE_CHECKING: # pragma: no cover
+ import typing_extensions as te
+
+ from .app import Flask
+ from .wrappers import Request
+ from .wrappers import Response
+
+
+class SessionMixin(MutableMapping[str, t.Any]):
+ """Expands a basic dictionary with session attributes."""
+
+ @property
+ def permanent(self) -> bool:
+ """This reflects the ``'_permanent'`` key in the dict."""
+ return self.get("_permanent", False)
+
+ @permanent.setter
+ def permanent(self, value: bool) -> None:
+ self["_permanent"] = bool(value)
+
+ #: Some implementations can detect whether a session is newly
+ #: created, but that is not guaranteed. Use with caution. The mixin
+ # default is hard-coded ``False``.
+ new = False
+
+ #: Some implementations can detect changes to the session and set
+ #: this when that happens. The mixin default is hard coded to
+ #: ``True``.
+ modified = True
+
+ #: Some implementations can detect when session data is read or
+ #: written and set this when that happens. The mixin default is hard
+ #: coded to ``True``.
+ accessed = True
+
+
+class SecureCookieSession(CallbackDict[str, t.Any], SessionMixin):
+ """Base class for sessions based on signed cookies.
+
+ This session backend will set the :attr:`modified` and
+ :attr:`accessed` attributes. It cannot reliably track whether a
+ session is new (vs. empty), so :attr:`new` remains hard coded to
+ ``False``.
+ """
+
+ #: When data is changed, this is set to ``True``. Only the session
+ #: dictionary itself is tracked; if the session contains mutable
+ #: data (for example a nested dict) then this must be set to
+ #: ``True`` manually when modifying that data. The session cookie
+ #: will only be written to the response if this is ``True``.
+ modified = False
+
+ #: When data is read or written, this is set to ``True``. Used by
+ # :class:`.SecureCookieSessionInterface` to add a ``Vary: Cookie``
+ #: header, which allows caching proxies to cache different pages for
+ #: different users.
+ accessed = False
+
+ def __init__(
+ self,
+ initial: c.Mapping[str, t.Any] | c.Iterable[tuple[str, t.Any]] | None = None,
+ ) -> None:
+ def on_update(self: te.Self) -> None:
+ self.modified = True
+ self.accessed = True
+
+ super().__init__(initial, on_update)
+
+ def __getitem__(self, key: str) -> t.Any:
+ self.accessed = True
+ return super().__getitem__(key)
+
+ def get(self, key: str, default: t.Any = None) -> t.Any:
+ self.accessed = True
+ return super().get(key, default)
+
+ def setdefault(self, key: str, default: t.Any = None) -> t.Any:
+ self.accessed = True
+ return super().setdefault(key, default)
+
+
+class NullSession(SecureCookieSession):
+ """Class used to generate nicer error messages if sessions are not
+ available. Will still allow read-only access to the empty session
+ but fail on setting.
+ """
+
+ def _fail(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
+ raise RuntimeError(
+ "The session is unavailable because no secret "
+ "key was set. Set the secret_key on the "
+ "application to something unique and secret."
+ )
+
+ __setitem__ = __delitem__ = clear = pop = popitem = update = setdefault = _fail # noqa: B950
+ del _fail
+
+
+class SessionInterface:
+ """The basic interface you have to implement in order to replace the
+ default session interface which uses werkzeug's securecookie
+ implementation. The only methods you have to implement are
+ :meth:`open_session` and :meth:`save_session`, the others have
+ useful defaults which you don't need to change.
+
+ The session object returned by the :meth:`open_session` method has to
+ provide a dictionary like interface plus the properties and methods
+ from the :class:`SessionMixin`. We recommend just subclassing a dict
+ and adding that mixin::
+
+ class Session(dict, SessionMixin):
+ pass
+
+ If :meth:`open_session` returns ``None`` Flask will call into
+ :meth:`make_null_session` to create a session that acts as replacement
+ if the session support cannot work because some requirement is not
+ fulfilled. The default :class:`NullSession` class that is created
+ will complain that the secret key was not set.
+
+ To replace the session interface on an application all you have to do
+ is to assign :attr:`flask.Flask.session_interface`::
+
+ app = Flask(__name__)
+ app.session_interface = MySessionInterface()
+
+ Multiple requests with the same session may be sent and handled
+ concurrently. When implementing a new session interface, consider
+ whether reads or writes to the backing store must be synchronized.
+ There is no guarantee on the order in which the session for each
+ request is opened or saved, it will occur in the order that requests
+ begin and end processing.
+
+ .. versionadded:: 0.8
+ """
+
+ #: :meth:`make_null_session` will look here for the class that should
+ #: be created when a null session is requested. Likewise the
+ #: :meth:`is_null_session` method will perform a typecheck against
+ #: this type.
+ null_session_class = NullSession
+
+ #: A flag that indicates if the session interface is pickle based.
+ #: This can be used by Flask extensions to make a decision in regards
+ #: to how to deal with the session object.
+ #:
+ #: .. versionadded:: 0.10
+ pickle_based = False
+
+ def make_null_session(self, app: Flask) -> NullSession:
+ """Creates a null session which acts as a replacement object if the
+ real session support could not be loaded due to a configuration
+ error. This mainly aids the user experience because the job of the
+ null session is to still support lookup without complaining but
+ modifications are answered with a helpful error message of what
+ failed.
+
+ This creates an instance of :attr:`null_session_class` by default.
+ """
+ return self.null_session_class()
+
+ def is_null_session(self, obj: object) -> bool:
+ """Checks if a given object is a null session. Null sessions are
+ not asked to be saved.
+
+ This checks if the object is an instance of :attr:`null_session_class`
+ by default.
+ """
+ return isinstance(obj, self.null_session_class)
+
+ def get_cookie_name(self, app: Flask) -> str:
+ """The name of the session cookie. Uses``app.config["SESSION_COOKIE_NAME"]``."""
+ return app.config["SESSION_COOKIE_NAME"] # type: ignore[no-any-return]
+
+ def get_cookie_domain(self, app: Flask) -> str | None:
+ """The value of the ``Domain`` parameter on the session cookie. If not set,
+ browsers will only send the cookie to the exact domain it was set from.
+ Otherwise, they will send it to any subdomain of the given value as well.
+
+ Uses the :data:`SESSION_COOKIE_DOMAIN` config.
+
+ .. versionchanged:: 2.3
+ Not set by default, does not fall back to ``SERVER_NAME``.
+ """
+ return app.config["SESSION_COOKIE_DOMAIN"] # type: ignore[no-any-return]
+
+ def get_cookie_path(self, app: Flask) -> str:
+ """Returns the path for which the cookie should be valid. The
+ default implementation uses the value from the ``SESSION_COOKIE_PATH``
+ config var if it's set, and falls back to ``APPLICATION_ROOT`` or
+ uses ``/`` if it's ``None``.
+ """
+ return app.config["SESSION_COOKIE_PATH"] or app.config["APPLICATION_ROOT"] # type: ignore[no-any-return]
+
+ def get_cookie_httponly(self, app: Flask) -> bool:
+ """Returns True if the session cookie should be httponly. This
+ currently just returns the value of the ``SESSION_COOKIE_HTTPONLY``
+ config var.
+ """
+ return app.config["SESSION_COOKIE_HTTPONLY"] # type: ignore[no-any-return]
+
+ def get_cookie_secure(self, app: Flask) -> bool:
+ """Returns True if the cookie should be secure. This currently
+ just returns the value of the ``SESSION_COOKIE_SECURE`` setting.
+ """
+ return app.config["SESSION_COOKIE_SECURE"] # type: ignore[no-any-return]
+
+ def get_cookie_samesite(self, app: Flask) -> str | None:
+ """Return ``'Strict'`` or ``'Lax'`` if the cookie should use the
+ ``SameSite`` attribute. This currently just returns the value of
+ the :data:`SESSION_COOKIE_SAMESITE` setting.
+ """
+ return app.config["SESSION_COOKIE_SAMESITE"] # type: ignore[no-any-return]
+
+ def get_cookie_partitioned(self, app: Flask) -> bool:
+ """Returns True if the cookie should be partitioned. By default, uses
+ the value of :data:`SESSION_COOKIE_PARTITIONED`.
+
+ .. versionadded:: 3.1
+ """
+ return app.config["SESSION_COOKIE_PARTITIONED"] # type: ignore[no-any-return]
+
+ def get_expiration_time(self, app: Flask, session: SessionMixin) -> datetime | None:
+ """A helper method that returns an expiration date for the session
+ or ``None`` if the session is linked to the browser session. The
+ default implementation returns now + the permanent session
+ lifetime configured on the application.
+ """
+ if session.permanent:
+ return datetime.now(timezone.utc) + app.permanent_session_lifetime
+ return None
+
+ def should_set_cookie(self, app: Flask, session: SessionMixin) -> bool:
+ """Used by session backends to determine if a ``Set-Cookie`` header
+ should be set for this session cookie for this response. If the session
+ has been modified, the cookie is set. If the session is permanent and
+ the ``SESSION_REFRESH_EACH_REQUEST`` config is true, the cookie is
+ always set.
+
+ This check is usually skipped if the session was deleted.
+
+ .. versionadded:: 0.11
+ """
+
+ return session.modified or (
+ session.permanent and app.config["SESSION_REFRESH_EACH_REQUEST"]
+ )
+
+ def open_session(self, app: Flask, request: Request) -> SessionMixin | None:
+ """This is called at the beginning of each request, after
+ pushing the request context, before matching the URL.
+
+ This must return an object which implements a dictionary-like
+ interface as well as the :class:`SessionMixin` interface.
+
+ This will return ``None`` to indicate that loading failed in
+ some way that is not immediately an error. The request
+ context will fall back to using :meth:`make_null_session`
+ in this case.
+ """
+ raise NotImplementedError()
+
+ def save_session(
+ self, app: Flask, session: SessionMixin, response: Response
+ ) -> None:
+ """This is called at the end of each request, after generating
+ a response, before removing the request context. It is skipped
+ if :meth:`is_null_session` returns ``True``.
+ """
+ raise NotImplementedError()
+
+
+session_json_serializer = TaggedJSONSerializer()
+
+
+def _lazy_sha1(string: bytes = b"") -> t.Any:
+ """Don't access ``hashlib.sha1`` until runtime. FIPS builds may not include
+ SHA-1, in which case the import and use as a default would fail before the
+ developer can configure something else.
+ """
+ return hashlib.sha1(string)
+
+
+class SecureCookieSessionInterface(SessionInterface):
+ """The default session interface that stores sessions in signed cookies
+ through the :mod:`itsdangerous` module.
+ """
+
+ #: the salt that should be applied on top of the secret key for the
+ #: signing of cookie based sessions.
+ salt = "cookie-session"
+ #: the hash function to use for the signature. The default is sha1
+ digest_method = staticmethod(_lazy_sha1)
+ #: the name of the itsdangerous supported key derivation. The default
+ #: is hmac.
+ key_derivation = "hmac"
+ #: A python serializer for the payload. The default is a compact
+ #: JSON derived serializer with support for some extra Python types
+ #: such as datetime objects or tuples.
+ serializer = session_json_serializer
+ session_class = SecureCookieSession
+
+ def get_signing_serializer(self, app: Flask) -> URLSafeTimedSerializer | None:
+ if not app.secret_key:
+ return None
+
+ keys: list[str | bytes] = []
+
+ if fallbacks := app.config["SECRET_KEY_FALLBACKS"]:
+ keys.extend(fallbacks)
+
+ keys.append(app.secret_key) # itsdangerous expects current key at top
+ return URLSafeTimedSerializer(
+ keys, # type: ignore[arg-type]
+ salt=self.salt,
+ serializer=self.serializer,
+ signer_kwargs={
+ "key_derivation": self.key_derivation,
+ "digest_method": self.digest_method,
+ },
+ )
+
+ def open_session(self, app: Flask, request: Request) -> SecureCookieSession | None:
+ s = self.get_signing_serializer(app)
+ if s is None:
+ return None
+ val = request.cookies.get(self.get_cookie_name(app))
+ if not val:
+ return self.session_class()
+ max_age = int(app.permanent_session_lifetime.total_seconds())
+ try:
+ data = s.loads(val, max_age=max_age)
+ return self.session_class(data)
+ except BadSignature:
+ return self.session_class()
+
+ def save_session(
+ self, app: Flask, session: SessionMixin, response: Response
+ ) -> None:
+ name = self.get_cookie_name(app)
+ domain = self.get_cookie_domain(app)
+ path = self.get_cookie_path(app)
+ secure = self.get_cookie_secure(app)
+ partitioned = self.get_cookie_partitioned(app)
+ samesite = self.get_cookie_samesite(app)
+ httponly = self.get_cookie_httponly(app)
+
+ # Add a "Vary: Cookie" header if the session was accessed at all.
+ if session.accessed:
+ response.vary.add("Cookie")
+
+ # If the session is modified to be empty, remove the cookie.
+ # If the session is empty, return without setting the cookie.
+ if not session:
+ if session.modified:
+ response.delete_cookie(
+ name,
+ domain=domain,
+ path=path,
+ secure=secure,
+ partitioned=partitioned,
+ samesite=samesite,
+ httponly=httponly,
+ )
+ response.vary.add("Cookie")
+
+ return
+
+ if not self.should_set_cookie(app, session):
+ return
+
+ expires = self.get_expiration_time(app, session)
+ val = self.get_signing_serializer(app).dumps(dict(session)) # type: ignore[union-attr]
+ response.set_cookie(
+ name,
+ val,
+ expires=expires,
+ httponly=httponly,
+ domain=domain,
+ path=path,
+ secure=secure,
+ partitioned=partitioned,
+ samesite=samesite,
+ )
+ response.vary.add("Cookie")
diff --git a/tapdown/lib/python3.11/site-packages/flask/signals.py b/tapdown/lib/python3.11/site-packages/flask/signals.py
new file mode 100644
index 0000000..444fda9
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/signals.py
@@ -0,0 +1,17 @@
+from __future__ import annotations
+
+from blinker import Namespace
+
+# This namespace is only for signals provided by Flask itself.
+_signals = Namespace()
+
+template_rendered = _signals.signal("template-rendered")
+before_render_template = _signals.signal("before-render-template")
+request_started = _signals.signal("request-started")
+request_finished = _signals.signal("request-finished")
+request_tearing_down = _signals.signal("request-tearing-down")
+got_request_exception = _signals.signal("got-request-exception")
+appcontext_tearing_down = _signals.signal("appcontext-tearing-down")
+appcontext_pushed = _signals.signal("appcontext-pushed")
+appcontext_popped = _signals.signal("appcontext-popped")
+message_flashed = _signals.signal("message-flashed")
diff --git a/tapdown/lib/python3.11/site-packages/flask/templating.py b/tapdown/lib/python3.11/site-packages/flask/templating.py
new file mode 100644
index 0000000..16d480f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/templating.py
@@ -0,0 +1,219 @@
+from __future__ import annotations
+
+import typing as t
+
+from jinja2 import BaseLoader
+from jinja2 import Environment as BaseEnvironment
+from jinja2 import Template
+from jinja2 import TemplateNotFound
+
+from .globals import _cv_app
+from .globals import _cv_request
+from .globals import current_app
+from .globals import request
+from .helpers import stream_with_context
+from .signals import before_render_template
+from .signals import template_rendered
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from .app import Flask
+ from .sansio.app import App
+ from .sansio.scaffold import Scaffold
+
+
+def _default_template_ctx_processor() -> dict[str, t.Any]:
+ """Default template context processor. Injects `request`,
+ `session` and `g`.
+ """
+ appctx = _cv_app.get(None)
+ reqctx = _cv_request.get(None)
+ rv: dict[str, t.Any] = {}
+ if appctx is not None:
+ rv["g"] = appctx.g
+ if reqctx is not None:
+ rv["request"] = reqctx.request
+ rv["session"] = reqctx.session
+ return rv
+
+
+class Environment(BaseEnvironment):
+ """Works like a regular Jinja environment but has some additional
+ knowledge of how Flask's blueprint works so that it can prepend the
+ name of the blueprint to referenced templates if necessary.
+ """
+
+ def __init__(self, app: App, **options: t.Any) -> None:
+ if "loader" not in options:
+ options["loader"] = app.create_global_jinja_loader()
+ BaseEnvironment.__init__(self, **options)
+ self.app = app
+
+
+class DispatchingJinjaLoader(BaseLoader):
+ """A loader that looks for templates in the application and all
+ the blueprint folders.
+ """
+
+ def __init__(self, app: App) -> None:
+ self.app = app
+
+ def get_source(
+ self, environment: BaseEnvironment, template: str
+ ) -> tuple[str, str | None, t.Callable[[], bool] | None]:
+ if self.app.config["EXPLAIN_TEMPLATE_LOADING"]:
+ return self._get_source_explained(environment, template)
+ return self._get_source_fast(environment, template)
+
+ def _get_source_explained(
+ self, environment: BaseEnvironment, template: str
+ ) -> tuple[str, str | None, t.Callable[[], bool] | None]:
+ attempts = []
+ rv: tuple[str, str | None, t.Callable[[], bool] | None] | None
+ trv: None | (tuple[str, str | None, t.Callable[[], bool] | None]) = None
+
+ for srcobj, loader in self._iter_loaders(template):
+ try:
+ rv = loader.get_source(environment, template)
+ if trv is None:
+ trv = rv
+ except TemplateNotFound:
+ rv = None
+ attempts.append((loader, srcobj, rv))
+
+ from .debughelpers import explain_template_loading_attempts
+
+ explain_template_loading_attempts(self.app, template, attempts)
+
+ if trv is not None:
+ return trv
+ raise TemplateNotFound(template)
+
+ def _get_source_fast(
+ self, environment: BaseEnvironment, template: str
+ ) -> tuple[str, str | None, t.Callable[[], bool] | None]:
+ for _srcobj, loader in self._iter_loaders(template):
+ try:
+ return loader.get_source(environment, template)
+ except TemplateNotFound:
+ continue
+ raise TemplateNotFound(template)
+
+ def _iter_loaders(self, template: str) -> t.Iterator[tuple[Scaffold, BaseLoader]]:
+ loader = self.app.jinja_loader
+ if loader is not None:
+ yield self.app, loader
+
+ for blueprint in self.app.iter_blueprints():
+ loader = blueprint.jinja_loader
+ if loader is not None:
+ yield blueprint, loader
+
+ def list_templates(self) -> list[str]:
+ result = set()
+ loader = self.app.jinja_loader
+ if loader is not None:
+ result.update(loader.list_templates())
+
+ for blueprint in self.app.iter_blueprints():
+ loader = blueprint.jinja_loader
+ if loader is not None:
+ for template in loader.list_templates():
+ result.add(template)
+
+ return list(result)
+
+
+def _render(app: Flask, template: Template, context: dict[str, t.Any]) -> str:
+ app.update_template_context(context)
+ before_render_template.send(
+ app, _async_wrapper=app.ensure_sync, template=template, context=context
+ )
+ rv = template.render(context)
+ template_rendered.send(
+ app, _async_wrapper=app.ensure_sync, template=template, context=context
+ )
+ return rv
+
+
+def render_template(
+ template_name_or_list: str | Template | list[str | Template],
+ **context: t.Any,
+) -> str:
+ """Render a template by name with the given context.
+
+ :param template_name_or_list: The name of the template to render. If
+ a list is given, the first name to exist will be rendered.
+ :param context: The variables to make available in the template.
+ """
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+ template = app.jinja_env.get_or_select_template(template_name_or_list)
+ return _render(app, template, context)
+
+
+def render_template_string(source: str, **context: t.Any) -> str:
+ """Render a template from the given source string with the given
+ context.
+
+ :param source: The source code of the template to render.
+ :param context: The variables to make available in the template.
+ """
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+ template = app.jinja_env.from_string(source)
+ return _render(app, template, context)
+
+
+def _stream(
+ app: Flask, template: Template, context: dict[str, t.Any]
+) -> t.Iterator[str]:
+ app.update_template_context(context)
+ before_render_template.send(
+ app, _async_wrapper=app.ensure_sync, template=template, context=context
+ )
+
+ def generate() -> t.Iterator[str]:
+ yield from template.generate(context)
+ template_rendered.send(
+ app, _async_wrapper=app.ensure_sync, template=template, context=context
+ )
+
+ rv = generate()
+
+ # If a request context is active, keep it while generating.
+ if request:
+ rv = stream_with_context(rv)
+
+ return rv
+
+
+def stream_template(
+ template_name_or_list: str | Template | list[str | Template],
+ **context: t.Any,
+) -> t.Iterator[str]:
+ """Render a template by name with the given context as a stream.
+ This returns an iterator of strings, which can be used as a
+ streaming response from a view.
+
+ :param template_name_or_list: The name of the template to render. If
+ a list is given, the first name to exist will be rendered.
+ :param context: The variables to make available in the template.
+
+ .. versionadded:: 2.2
+ """
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+ template = app.jinja_env.get_or_select_template(template_name_or_list)
+ return _stream(app, template, context)
+
+
+def stream_template_string(source: str, **context: t.Any) -> t.Iterator[str]:
+ """Render a template from the given source string with the given
+ context as a stream. This returns an iterator of strings, which can
+ be used as a streaming response from a view.
+
+ :param source: The source code of the template to render.
+ :param context: The variables to make available in the template.
+
+ .. versionadded:: 2.2
+ """
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+ template = app.jinja_env.from_string(source)
+ return _stream(app, template, context)
diff --git a/tapdown/lib/python3.11/site-packages/flask/testing.py b/tapdown/lib/python3.11/site-packages/flask/testing.py
new file mode 100644
index 0000000..55eb12f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/testing.py
@@ -0,0 +1,298 @@
+from __future__ import annotations
+
+import importlib.metadata
+import typing as t
+from contextlib import contextmanager
+from contextlib import ExitStack
+from copy import copy
+from types import TracebackType
+from urllib.parse import urlsplit
+
+import werkzeug.test
+from click.testing import CliRunner
+from click.testing import Result
+from werkzeug.test import Client
+from werkzeug.wrappers import Request as BaseRequest
+
+from .cli import ScriptInfo
+from .sessions import SessionMixin
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from _typeshed.wsgi import WSGIEnvironment
+ from werkzeug.test import TestResponse
+
+ from .app import Flask
+
+
+class EnvironBuilder(werkzeug.test.EnvironBuilder):
+ """An :class:`~werkzeug.test.EnvironBuilder`, that takes defaults from the
+ application.
+
+ :param app: The Flask application to configure the environment from.
+ :param path: URL path being requested.
+ :param base_url: Base URL where the app is being served, which
+ ``path`` is relative to. If not given, built from
+ :data:`PREFERRED_URL_SCHEME`, ``subdomain``,
+ :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`.
+ :param subdomain: Subdomain name to append to :data:`SERVER_NAME`.
+ :param url_scheme: Scheme to use instead of
+ :data:`PREFERRED_URL_SCHEME`.
+ :param json: If given, this is serialized as JSON and passed as
+ ``data``. Also defaults ``content_type`` to
+ ``application/json``.
+ :param args: other positional arguments passed to
+ :class:`~werkzeug.test.EnvironBuilder`.
+ :param kwargs: other keyword arguments passed to
+ :class:`~werkzeug.test.EnvironBuilder`.
+ """
+
+ def __init__(
+ self,
+ app: Flask,
+ path: str = "/",
+ base_url: str | None = None,
+ subdomain: str | None = None,
+ url_scheme: str | None = None,
+ *args: t.Any,
+ **kwargs: t.Any,
+ ) -> None:
+ assert not (base_url or subdomain or url_scheme) or (
+ base_url is not None
+ ) != bool(subdomain or url_scheme), (
+ 'Cannot pass "subdomain" or "url_scheme" with "base_url".'
+ )
+
+ if base_url is None:
+ http_host = app.config.get("SERVER_NAME") or "localhost"
+ app_root = app.config["APPLICATION_ROOT"]
+
+ if subdomain:
+ http_host = f"{subdomain}.{http_host}"
+
+ if url_scheme is None:
+ url_scheme = app.config["PREFERRED_URL_SCHEME"]
+
+ url = urlsplit(path)
+ base_url = (
+ f"{url.scheme or url_scheme}://{url.netloc or http_host}"
+ f"/{app_root.lstrip('/')}"
+ )
+ path = url.path
+
+ if url.query:
+ path = f"{path}?{url.query}"
+
+ self.app = app
+ super().__init__(path, base_url, *args, **kwargs)
+
+ def json_dumps(self, obj: t.Any, **kwargs: t.Any) -> str:
+ """Serialize ``obj`` to a JSON-formatted string.
+
+ The serialization will be configured according to the config associated
+ with this EnvironBuilder's ``app``.
+ """
+ return self.app.json.dumps(obj, **kwargs)
+
+
+_werkzeug_version = ""
+
+
+def _get_werkzeug_version() -> str:
+ global _werkzeug_version
+
+ if not _werkzeug_version:
+ _werkzeug_version = importlib.metadata.version("werkzeug")
+
+ return _werkzeug_version
+
+
+class FlaskClient(Client):
+ """Works like a regular Werkzeug test client but has knowledge about
+ Flask's contexts to defer the cleanup of the request context until
+ the end of a ``with`` block. For general information about how to
+ use this class refer to :class:`werkzeug.test.Client`.
+
+ .. versionchanged:: 0.12
+ `app.test_client()` includes preset default environment, which can be
+ set after instantiation of the `app.test_client()` object in
+ `client.environ_base`.
+
+ Basic usage is outlined in the :doc:`/testing` chapter.
+ """
+
+ application: Flask
+
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
+ super().__init__(*args, **kwargs)
+ self.preserve_context = False
+ self._new_contexts: list[t.ContextManager[t.Any]] = []
+ self._context_stack = ExitStack()
+ self.environ_base = {
+ "REMOTE_ADDR": "127.0.0.1",
+ "HTTP_USER_AGENT": f"Werkzeug/{_get_werkzeug_version()}",
+ }
+
+ @contextmanager
+ def session_transaction(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> t.Iterator[SessionMixin]:
+ """When used in combination with a ``with`` statement this opens a
+ session transaction. This can be used to modify the session that
+ the test client uses. Once the ``with`` block is left the session is
+ stored back.
+
+ ::
+
+ with client.session_transaction() as session:
+ session['value'] = 42
+
+ Internally this is implemented by going through a temporary test
+ request context and since session handling could depend on
+ request variables this function accepts the same arguments as
+ :meth:`~flask.Flask.test_request_context` which are directly
+ passed through.
+ """
+ if self._cookies is None:
+ raise TypeError(
+ "Cookies are disabled. Create a client with 'use_cookies=True'."
+ )
+
+ app = self.application
+ ctx = app.test_request_context(*args, **kwargs)
+ self._add_cookies_to_wsgi(ctx.request.environ)
+
+ with ctx:
+ sess = app.session_interface.open_session(app, ctx.request)
+
+ if sess is None:
+ raise RuntimeError("Session backend did not open a session.")
+
+ yield sess
+ resp = app.response_class()
+
+ if app.session_interface.is_null_session(sess):
+ return
+
+ with ctx:
+ app.session_interface.save_session(app, sess, resp)
+
+ self._update_cookies_from_response(
+ ctx.request.host.partition(":")[0],
+ ctx.request.path,
+ resp.headers.getlist("Set-Cookie"),
+ )
+
+ def _copy_environ(self, other: WSGIEnvironment) -> WSGIEnvironment:
+ out = {**self.environ_base, **other}
+
+ if self.preserve_context:
+ out["werkzeug.debug.preserve_context"] = self._new_contexts.append
+
+ return out
+
+ def _request_from_builder_args(
+ self, args: tuple[t.Any, ...], kwargs: dict[str, t.Any]
+ ) -> BaseRequest:
+ kwargs["environ_base"] = self._copy_environ(kwargs.get("environ_base", {}))
+ builder = EnvironBuilder(self.application, *args, **kwargs)
+
+ try:
+ return builder.get_request()
+ finally:
+ builder.close()
+
+ def open(
+ self,
+ *args: t.Any,
+ buffered: bool = False,
+ follow_redirects: bool = False,
+ **kwargs: t.Any,
+ ) -> TestResponse:
+ if args and isinstance(
+ args[0], (werkzeug.test.EnvironBuilder, dict, BaseRequest)
+ ):
+ if isinstance(args[0], werkzeug.test.EnvironBuilder):
+ builder = copy(args[0])
+ builder.environ_base = self._copy_environ(builder.environ_base or {}) # type: ignore[arg-type]
+ request = builder.get_request()
+ elif isinstance(args[0], dict):
+ request = EnvironBuilder.from_environ(
+ args[0], app=self.application, environ_base=self._copy_environ({})
+ ).get_request()
+ else:
+ # isinstance(args[0], BaseRequest)
+ request = copy(args[0])
+ request.environ = self._copy_environ(request.environ)
+ else:
+ # request is None
+ request = self._request_from_builder_args(args, kwargs)
+
+ # Pop any previously preserved contexts. This prevents contexts
+ # from being preserved across redirects or multiple requests
+ # within a single block.
+ self._context_stack.close()
+
+ response = super().open(
+ request,
+ buffered=buffered,
+ follow_redirects=follow_redirects,
+ )
+ response.json_module = self.application.json # type: ignore[assignment]
+
+ # Re-push contexts that were preserved during the request.
+ for cm in self._new_contexts:
+ self._context_stack.enter_context(cm)
+
+ self._new_contexts.clear()
+ return response
+
+ def __enter__(self) -> FlaskClient:
+ if self.preserve_context:
+ raise RuntimeError("Cannot nest client invocations")
+ self.preserve_context = True
+ return self
+
+ def __exit__(
+ self,
+ exc_type: type | None,
+ exc_value: BaseException | None,
+ tb: TracebackType | None,
+ ) -> None:
+ self.preserve_context = False
+ self._context_stack.close()
+
+
+class FlaskCliRunner(CliRunner):
+ """A :class:`~click.testing.CliRunner` for testing a Flask app's
+ CLI commands. Typically created using
+ :meth:`~flask.Flask.test_cli_runner`. See :ref:`testing-cli`.
+ """
+
+ def __init__(self, app: Flask, **kwargs: t.Any) -> None:
+ self.app = app
+ super().__init__(**kwargs)
+
+ def invoke( # type: ignore
+ self, cli: t.Any = None, args: t.Any = None, **kwargs: t.Any
+ ) -> Result:
+ """Invokes a CLI command in an isolated environment. See
+ :meth:`CliRunner.invoke ` for
+ full method documentation. See :ref:`testing-cli` for examples.
+
+ If the ``obj`` argument is not given, passes an instance of
+ :class:`~flask.cli.ScriptInfo` that knows how to load the Flask
+ app being tested.
+
+ :param cli: Command object to invoke. Default is the app's
+ :attr:`~flask.app.Flask.cli` group.
+ :param args: List of strings to invoke the command with.
+
+ :return: a :class:`~click.testing.Result` object.
+ """
+ if cli is None:
+ cli = self.app.cli
+
+ if "obj" not in kwargs:
+ kwargs["obj"] = ScriptInfo(create_app=lambda: self.app)
+
+ return super().invoke(cli, args, **kwargs)
diff --git a/tapdown/lib/python3.11/site-packages/flask/typing.py b/tapdown/lib/python3.11/site-packages/flask/typing.py
new file mode 100644
index 0000000..6b70c40
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/typing.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+import collections.abc as cabc
+import typing as t
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from _typeshed.wsgi import WSGIApplication # noqa: F401
+ from werkzeug.datastructures import Headers # noqa: F401
+ from werkzeug.sansio.response import Response # noqa: F401
+
+# The possible types that are directly convertible or are a Response object.
+ResponseValue = t.Union[
+ "Response",
+ str,
+ bytes,
+ list[t.Any],
+ # Only dict is actually accepted, but Mapping allows for TypedDict.
+ t.Mapping[str, t.Any],
+ t.Iterator[str],
+ t.Iterator[bytes],
+ cabc.AsyncIterable[str], # for Quart, until App is generic.
+ cabc.AsyncIterable[bytes],
+]
+
+# the possible types for an individual HTTP header
+# This should be a Union, but mypy doesn't pass unless it's a TypeVar.
+HeaderValue = t.Union[str, list[str], tuple[str, ...]]
+
+# the possible types for HTTP headers
+HeadersValue = t.Union[
+ "Headers",
+ t.Mapping[str, HeaderValue],
+ t.Sequence[tuple[str, HeaderValue]],
+]
+
+# The possible types returned by a route function.
+ResponseReturnValue = t.Union[
+ ResponseValue,
+ tuple[ResponseValue, HeadersValue],
+ tuple[ResponseValue, int],
+ tuple[ResponseValue, int, HeadersValue],
+ "WSGIApplication",
+]
+
+# Allow any subclass of werkzeug.Response, such as the one from Flask,
+# as a callback argument. Using werkzeug.Response directly makes a
+# callback annotated with flask.Response fail type checking.
+ResponseClass = t.TypeVar("ResponseClass", bound="Response")
+
+AppOrBlueprintKey = t.Optional[str] # The App key is None, whereas blueprints are named
+AfterRequestCallable = t.Union[
+ t.Callable[[ResponseClass], ResponseClass],
+ t.Callable[[ResponseClass], t.Awaitable[ResponseClass]],
+]
+BeforeFirstRequestCallable = t.Union[
+ t.Callable[[], None], t.Callable[[], t.Awaitable[None]]
+]
+BeforeRequestCallable = t.Union[
+ t.Callable[[], t.Optional[ResponseReturnValue]],
+ t.Callable[[], t.Awaitable[t.Optional[ResponseReturnValue]]],
+]
+ShellContextProcessorCallable = t.Callable[[], dict[str, t.Any]]
+TeardownCallable = t.Union[
+ t.Callable[[t.Optional[BaseException]], None],
+ t.Callable[[t.Optional[BaseException]], t.Awaitable[None]],
+]
+TemplateContextProcessorCallable = t.Union[
+ t.Callable[[], dict[str, t.Any]],
+ t.Callable[[], t.Awaitable[dict[str, t.Any]]],
+]
+TemplateFilterCallable = t.Callable[..., t.Any]
+TemplateGlobalCallable = t.Callable[..., t.Any]
+TemplateTestCallable = t.Callable[..., bool]
+URLDefaultCallable = t.Callable[[str, dict[str, t.Any]], None]
+URLValuePreprocessorCallable = t.Callable[
+ [t.Optional[str], t.Optional[dict[str, t.Any]]], None
+]
+
+# This should take Exception, but that either breaks typing the argument
+# with a specific exception, or decorating multiple times with different
+# exceptions (and using a union type on the argument).
+# https://github.com/pallets/flask/issues/4095
+# https://github.com/pallets/flask/issues/4295
+# https://github.com/pallets/flask/issues/4297
+ErrorHandlerCallable = t.Union[
+ t.Callable[[t.Any], ResponseReturnValue],
+ t.Callable[[t.Any], t.Awaitable[ResponseReturnValue]],
+]
+
+RouteCallable = t.Union[
+ t.Callable[..., ResponseReturnValue],
+ t.Callable[..., t.Awaitable[ResponseReturnValue]],
+]
diff --git a/tapdown/lib/python3.11/site-packages/flask/views.py b/tapdown/lib/python3.11/site-packages/flask/views.py
new file mode 100644
index 0000000..53fe976
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/views.py
@@ -0,0 +1,191 @@
+from __future__ import annotations
+
+import typing as t
+
+from . import typing as ft
+from .globals import current_app
+from .globals import request
+
+F = t.TypeVar("F", bound=t.Callable[..., t.Any])
+
+http_method_funcs = frozenset(
+ ["get", "post", "head", "options", "delete", "put", "trace", "patch"]
+)
+
+
+class View:
+ """Subclass this class and override :meth:`dispatch_request` to
+ create a generic class-based view. Call :meth:`as_view` to create a
+ view function that creates an instance of the class with the given
+ arguments and calls its ``dispatch_request`` method with any URL
+ variables.
+
+ See :doc:`views` for a detailed guide.
+
+ .. code-block:: python
+
+ class Hello(View):
+ init_every_request = False
+
+ def dispatch_request(self, name):
+ return f"Hello, {name}!"
+
+ app.add_url_rule(
+ "/hello/", view_func=Hello.as_view("hello")
+ )
+
+ Set :attr:`methods` on the class to change what methods the view
+ accepts.
+
+ Set :attr:`decorators` on the class to apply a list of decorators to
+ the generated view function. Decorators applied to the class itself
+ will not be applied to the generated view function!
+
+ Set :attr:`init_every_request` to ``False`` for efficiency, unless
+ you need to store request-global data on ``self``.
+ """
+
+ #: The methods this view is registered for. Uses the same default
+ #: (``["GET", "HEAD", "OPTIONS"]``) as ``route`` and
+ #: ``add_url_rule`` by default.
+ methods: t.ClassVar[t.Collection[str] | None] = None
+
+ #: Control whether the ``OPTIONS`` method is handled automatically.
+ #: Uses the same default (``True``) as ``route`` and
+ #: ``add_url_rule`` by default.
+ provide_automatic_options: t.ClassVar[bool | None] = None
+
+ #: A list of decorators to apply, in order, to the generated view
+ #: function. Remember that ``@decorator`` syntax is applied bottom
+ #: to top, so the first decorator in the list would be the bottom
+ #: decorator.
+ #:
+ #: .. versionadded:: 0.8
+ decorators: t.ClassVar[list[t.Callable[..., t.Any]]] = []
+
+ #: Create a new instance of this view class for every request by
+ #: default. If a view subclass sets this to ``False``, the same
+ #: instance is used for every request.
+ #:
+ #: A single instance is more efficient, especially if complex setup
+ #: is done during init. However, storing data on ``self`` is no
+ #: longer safe across requests, and :data:`~flask.g` should be used
+ #: instead.
+ #:
+ #: .. versionadded:: 2.2
+ init_every_request: t.ClassVar[bool] = True
+
+ def dispatch_request(self) -> ft.ResponseReturnValue:
+ """The actual view function behavior. Subclasses must override
+ this and return a valid response. Any variables from the URL
+ rule are passed as keyword arguments.
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def as_view(
+ cls, name: str, *class_args: t.Any, **class_kwargs: t.Any
+ ) -> ft.RouteCallable:
+ """Convert the class into a view function that can be registered
+ for a route.
+
+ By default, the generated view will create a new instance of the
+ view class for every request and call its
+ :meth:`dispatch_request` method. If the view class sets
+ :attr:`init_every_request` to ``False``, the same instance will
+ be used for every request.
+
+ Except for ``name``, all other arguments passed to this method
+ are forwarded to the view class ``__init__`` method.
+
+ .. versionchanged:: 2.2
+ Added the ``init_every_request`` class attribute.
+ """
+ if cls.init_every_request:
+
+ def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
+ self = view.view_class( # type: ignore[attr-defined]
+ *class_args, **class_kwargs
+ )
+ return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return]
+
+ else:
+ self = cls(*class_args, **class_kwargs) # pyright: ignore
+
+ def view(**kwargs: t.Any) -> ft.ResponseReturnValue:
+ return current_app.ensure_sync(self.dispatch_request)(**kwargs) # type: ignore[no-any-return]
+
+ if cls.decorators:
+ view.__name__ = name
+ view.__module__ = cls.__module__
+ for decorator in cls.decorators:
+ view = decorator(view)
+
+ # We attach the view class to the view function for two reasons:
+ # first of all it allows us to easily figure out what class-based
+ # view this thing came from, secondly it's also used for instantiating
+ # the view class so you can actually replace it with something else
+ # for testing purposes and debugging.
+ view.view_class = cls # type: ignore
+ view.__name__ = name
+ view.__doc__ = cls.__doc__
+ view.__module__ = cls.__module__
+ view.methods = cls.methods # type: ignore
+ view.provide_automatic_options = cls.provide_automatic_options # type: ignore
+ return view
+
+
+class MethodView(View):
+ """Dispatches request methods to the corresponding instance methods.
+ For example, if you implement a ``get`` method, it will be used to
+ handle ``GET`` requests.
+
+ This can be useful for defining a REST API.
+
+ :attr:`methods` is automatically set based on the methods defined on
+ the class.
+
+ See :doc:`views` for a detailed guide.
+
+ .. code-block:: python
+
+ class CounterAPI(MethodView):
+ def get(self):
+ return str(session.get("counter", 0))
+
+ def post(self):
+ session["counter"] = session.get("counter", 0) + 1
+ return redirect(url_for("counter"))
+
+ app.add_url_rule(
+ "/counter", view_func=CounterAPI.as_view("counter")
+ )
+ """
+
+ def __init_subclass__(cls, **kwargs: t.Any) -> None:
+ super().__init_subclass__(**kwargs)
+
+ if "methods" not in cls.__dict__:
+ methods = set()
+
+ for base in cls.__bases__:
+ if getattr(base, "methods", None):
+ methods.update(base.methods) # type: ignore[attr-defined]
+
+ for key in http_method_funcs:
+ if hasattr(cls, key):
+ methods.add(key.upper())
+
+ if methods:
+ cls.methods = methods
+
+ def dispatch_request(self, **kwargs: t.Any) -> ft.ResponseReturnValue:
+ meth = getattr(self, request.method.lower(), None)
+
+ # If the request method is HEAD and we don't have a handler for it
+ # retry with GET.
+ if meth is None and request.method == "HEAD":
+ meth = getattr(self, "get", None)
+
+ assert meth is not None, f"Unimplemented method {request.method!r}"
+ return current_app.ensure_sync(meth)(**kwargs) # type: ignore[no-any-return]
diff --git a/tapdown/lib/python3.11/site-packages/flask/wrappers.py b/tapdown/lib/python3.11/site-packages/flask/wrappers.py
new file mode 100644
index 0000000..bab6102
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask/wrappers.py
@@ -0,0 +1,257 @@
+from __future__ import annotations
+
+import typing as t
+
+from werkzeug.exceptions import BadRequest
+from werkzeug.exceptions import HTTPException
+from werkzeug.wrappers import Request as RequestBase
+from werkzeug.wrappers import Response as ResponseBase
+
+from . import json
+from .globals import current_app
+from .helpers import _split_blueprint_path
+
+if t.TYPE_CHECKING: # pragma: no cover
+ from werkzeug.routing import Rule
+
+
+class Request(RequestBase):
+ """The request object used by default in Flask. Remembers the
+ matched endpoint and view arguments.
+
+ It is what ends up as :class:`~flask.request`. If you want to replace
+ the request object used you can subclass this and set
+ :attr:`~flask.Flask.request_class` to your subclass.
+
+ The request object is a :class:`~werkzeug.wrappers.Request` subclass and
+ provides all of the attributes Werkzeug defines plus a few Flask
+ specific ones.
+ """
+
+ json_module: t.Any = json
+
+ #: The internal URL rule that matched the request. This can be
+ #: useful to inspect which methods are allowed for the URL from
+ #: a before/after handler (``request.url_rule.methods``) etc.
+ #: Though if the request's method was invalid for the URL rule,
+ #: the valid list is available in ``routing_exception.valid_methods``
+ #: instead (an attribute of the Werkzeug exception
+ #: :exc:`~werkzeug.exceptions.MethodNotAllowed`)
+ #: because the request was never internally bound.
+ #:
+ #: .. versionadded:: 0.6
+ url_rule: Rule | None = None
+
+ #: A dict of view arguments that matched the request. If an exception
+ #: happened when matching, this will be ``None``.
+ view_args: dict[str, t.Any] | None = None
+
+ #: If matching the URL failed, this is the exception that will be
+ #: raised / was raised as part of the request handling. This is
+ #: usually a :exc:`~werkzeug.exceptions.NotFound` exception or
+ #: something similar.
+ routing_exception: HTTPException | None = None
+
+ _max_content_length: int | None = None
+ _max_form_memory_size: int | None = None
+ _max_form_parts: int | None = None
+
+ @property
+ def max_content_length(self) -> int | None:
+ """The maximum number of bytes that will be read during this request. If
+ this limit is exceeded, a 413 :exc:`~werkzeug.exceptions.RequestEntityTooLarge`
+ error is raised. If it is set to ``None``, no limit is enforced at the
+ Flask application level. However, if it is ``None`` and the request has
+ no ``Content-Length`` header and the WSGI server does not indicate that
+ it terminates the stream, then no data is read to avoid an infinite
+ stream.
+
+ Each request defaults to the :data:`MAX_CONTENT_LENGTH` config, which
+ defaults to ``None``. It can be set on a specific ``request`` to apply
+ the limit to that specific view. This should be set appropriately based
+ on an application's or view's specific needs.
+
+ .. versionchanged:: 3.1
+ This can be set per-request.
+
+ .. versionchanged:: 0.6
+ This is configurable through Flask config.
+ """
+ if self._max_content_length is not None:
+ return self._max_content_length
+
+ if not current_app:
+ return super().max_content_length
+
+ return current_app.config["MAX_CONTENT_LENGTH"] # type: ignore[no-any-return]
+
+ @max_content_length.setter
+ def max_content_length(self, value: int | None) -> None:
+ self._max_content_length = value
+
+ @property
+ def max_form_memory_size(self) -> int | None:
+ """The maximum size in bytes any non-file form field may be in a
+ ``multipart/form-data`` body. If this limit is exceeded, a 413
+ :exc:`~werkzeug.exceptions.RequestEntityTooLarge` error is raised. If it
+ is set to ``None``, no limit is enforced at the Flask application level.
+
+ Each request defaults to the :data:`MAX_FORM_MEMORY_SIZE` config, which
+ defaults to ``500_000``. It can be set on a specific ``request`` to
+ apply the limit to that specific view. This should be set appropriately
+ based on an application's or view's specific needs.
+
+ .. versionchanged:: 3.1
+ This is configurable through Flask config.
+ """
+ if self._max_form_memory_size is not None:
+ return self._max_form_memory_size
+
+ if not current_app:
+ return super().max_form_memory_size
+
+ return current_app.config["MAX_FORM_MEMORY_SIZE"] # type: ignore[no-any-return]
+
+ @max_form_memory_size.setter
+ def max_form_memory_size(self, value: int | None) -> None:
+ self._max_form_memory_size = value
+
+ @property # type: ignore[override]
+ def max_form_parts(self) -> int | None:
+ """The maximum number of fields that may be present in a
+ ``multipart/form-data`` body. If this limit is exceeded, a 413
+ :exc:`~werkzeug.exceptions.RequestEntityTooLarge` error is raised. If it
+ is set to ``None``, no limit is enforced at the Flask application level.
+
+ Each request defaults to the :data:`MAX_FORM_PARTS` config, which
+ defaults to ``1_000``. It can be set on a specific ``request`` to apply
+ the limit to that specific view. This should be set appropriately based
+ on an application's or view's specific needs.
+
+ .. versionchanged:: 3.1
+ This is configurable through Flask config.
+ """
+ if self._max_form_parts is not None:
+ return self._max_form_parts
+
+ if not current_app:
+ return super().max_form_parts
+
+ return current_app.config["MAX_FORM_PARTS"] # type: ignore[no-any-return]
+
+ @max_form_parts.setter
+ def max_form_parts(self, value: int | None) -> None:
+ self._max_form_parts = value
+
+ @property
+ def endpoint(self) -> str | None:
+ """The endpoint that matched the request URL.
+
+ This will be ``None`` if matching failed or has not been
+ performed yet.
+
+ This in combination with :attr:`view_args` can be used to
+ reconstruct the same URL or a modified URL.
+ """
+ if self.url_rule is not None:
+ return self.url_rule.endpoint # type: ignore[no-any-return]
+
+ return None
+
+ @property
+ def blueprint(self) -> str | None:
+ """The registered name of the current blueprint.
+
+ This will be ``None`` if the endpoint is not part of a
+ blueprint, or if URL matching failed or has not been performed
+ yet.
+
+ This does not necessarily match the name the blueprint was
+ created with. It may have been nested, or registered with a
+ different name.
+ """
+ endpoint = self.endpoint
+
+ if endpoint is not None and "." in endpoint:
+ return endpoint.rpartition(".")[0]
+
+ return None
+
+ @property
+ def blueprints(self) -> list[str]:
+ """The registered names of the current blueprint upwards through
+ parent blueprints.
+
+ This will be an empty list if there is no current blueprint, or
+ if URL matching failed.
+
+ .. versionadded:: 2.0.1
+ """
+ name = self.blueprint
+
+ if name is None:
+ return []
+
+ return _split_blueprint_path(name)
+
+ def _load_form_data(self) -> None:
+ super()._load_form_data()
+
+ # In debug mode we're replacing the files multidict with an ad-hoc
+ # subclass that raises a different error for key errors.
+ if (
+ current_app
+ and current_app.debug
+ and self.mimetype != "multipart/form-data"
+ and not self.files
+ ):
+ from .debughelpers import attach_enctype_error_multidict
+
+ attach_enctype_error_multidict(self)
+
+ def on_json_loading_failed(self, e: ValueError | None) -> t.Any:
+ try:
+ return super().on_json_loading_failed(e)
+ except BadRequest as ebr:
+ if current_app and current_app.debug:
+ raise
+
+ raise BadRequest() from ebr
+
+
+class Response(ResponseBase):
+ """The response object that is used by default in Flask. Works like the
+ response object from Werkzeug but is set to have an HTML mimetype by
+ default. Quite often you don't have to create this object yourself because
+ :meth:`~flask.Flask.make_response` will take care of that for you.
+
+ If you want to replace the response object used you can subclass this and
+ set :attr:`~flask.Flask.response_class` to your subclass.
+
+ .. versionchanged:: 1.0
+ JSON support is added to the response, like the request. This is useful
+ when testing to get the test client response data as JSON.
+
+ .. versionchanged:: 1.0
+
+ Added :attr:`max_cookie_size`.
+ """
+
+ default_mimetype: str | None = "text/html"
+
+ json_module = json
+
+ autocorrect_location_header = False
+
+ @property
+ def max_cookie_size(self) -> int: # type: ignore
+ """Read-only view of the :data:`MAX_COOKIE_SIZE` config key.
+
+ See :attr:`~werkzeug.wrappers.Response.max_cookie_size` in
+ Werkzeug's docs.
+ """
+ if current_app:
+ return current_app.config["MAX_COOKIE_SIZE"] # type: ignore[no-any-return]
+
+ # return Werkzeug's default when not in an app context
+ return super().max_cookie_size
diff --git a/tapdown/lib/python3.11/site-packages/flask_socketio/__init__.py b/tapdown/lib/python3.11/site-packages/flask_socketio/__init__.py
new file mode 100644
index 0000000..219f30c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_socketio/__init__.py
@@ -0,0 +1,1125 @@
+from functools import wraps
+import os
+import sys
+
+# make sure gevent-socketio is not installed, as it conflicts with
+# python-socketio
+gevent_socketio_found = True
+try:
+ from socketio import socketio_manage # noqa: F401
+except ImportError:
+ gevent_socketio_found = False
+if gevent_socketio_found:
+ print('The gevent-socketio package is incompatible with this version of '
+ 'the Flask-SocketIO extension. Please uninstall it, and then '
+ 'install the latest version of python-socketio in its place.')
+ sys.exit(1)
+
+import flask
+from flask import has_request_context, json as flask_json
+from flask.sessions import SessionMixin
+import socketio
+from socketio.exceptions import ConnectionRefusedError # noqa: F401
+from werkzeug.debug import DebuggedApplication
+from werkzeug._reloader import run_with_reloader
+
+from .namespace import Namespace
+from .test_client import SocketIOTestClient
+
+
+class _SocketIOMiddleware(socketio.WSGIApp):
+ """This WSGI middleware simply exposes the Flask application in the WSGI
+ environment before executing the request.
+ """
+ def __init__(self, socketio_app, flask_app, socketio_path='socket.io'):
+ self.flask_app = flask_app
+ super().__init__(socketio_app, flask_app.wsgi_app,
+ socketio_path=socketio_path)
+
+ def __call__(self, environ, start_response):
+ environ = environ.copy()
+ environ['flask.app'] = self.flask_app
+ return super().__call__(environ, start_response)
+
+
+class _ManagedSession(dict, SessionMixin):
+ """This class is used for user sessions that are managed by
+ Flask-SocketIO. It is simple dict, expanded with the Flask session
+ attributes."""
+ pass
+
+
+class SocketIO:
+ """Create a Flask-SocketIO server.
+
+ :param app: The flask application instance. If the application instance
+ isn't known at the time this class is instantiated, then call
+ ``socketio.init_app(app)`` once the application instance is
+ available.
+ :param manage_session: If set to ``True``, this extension manages the user
+ session for Socket.IO events. If set to ``False``,
+ Flask's own session management is used. When using
+ Flask's cookie based sessions it is recommended that
+ you leave this set to the default of ``True``. When
+ using server-side sessions, a ``False`` setting
+ enables sharing the user session between HTTP routes
+ and Socket.IO events.
+ :param message_queue: A connection URL for a message queue service the
+ server can use for multi-process communication. A
+ message queue is not required when using a single
+ server process.
+ :param channel: The channel name, when using a message queue. If a channel
+ isn't specified, a default channel will be used. If
+ multiple clusters of SocketIO processes need to use the
+ same message queue without interfering with each other,
+ then each cluster should use a different channel.
+ :param path: The path where the Socket.IO server is exposed. Defaults to
+ ``'socket.io'``. Leave this as is unless you know what you are
+ doing.
+ :param resource: Alias to ``path``.
+ :param kwargs: Socket.IO and Engine.IO server options.
+
+ The Socket.IO server options are detailed below:
+
+ :param client_manager: The client manager instance that will manage the
+ client list. When this is omitted, the client list
+ is stored in an in-memory structure, so the use of
+ multiple connected servers is not possible. In most
+ cases, this argument does not need to be set
+ explicitly.
+ :param logger: To enable logging set to ``True`` or pass a logger object to
+ use. To disable logging set to ``False``. The default is
+ ``False``. Note that fatal errors will be logged even when
+ ``logger`` is ``False``.
+ :param json: An alternative json module to use for encoding and decoding
+ packets. Custom json modules must have ``dumps`` and ``loads``
+ functions that are compatible with the standard library
+ versions. To use the same json encoder and decoder as a Flask
+ application, use ``flask.json``.
+ :param async_handlers: If set to ``True``, event handlers for a client are
+ executed in separate threads. To run handlers for a
+ client synchronously, set to ``False``. The default
+ is ``True``.
+ :param always_connect: When set to ``False``, new connections are
+ provisory until the connect handler returns
+ something other than ``False``, at which point they
+ are accepted. When set to ``True``, connections are
+ immediately accepted, and then if the connect
+ handler returns ``False`` a disconnect is issued.
+ Set to ``True`` if you need to emit events from the
+ connect handler and your client is confused when it
+ receives events before the connection acceptance.
+ In any other case use the default of ``False``.
+
+ The Engine.IO server configuration supports the following settings:
+
+ :param async_mode: The asynchronous model to use. See the Deployment
+ section in the documentation for a description of the
+ available options. Valid async modes are ``threading``,
+ ``eventlet``, ``gevent`` and ``gevent_uwsgi``. If this
+ argument is not given, ``eventlet`` is tried first, then
+ ``gevent_uwsgi``, then ``gevent``, and finally
+ ``threading``. The first async mode that has all its
+ dependencies installed is then one that is chosen.
+ :param ping_interval: The interval in seconds at which the server pings
+ the client. The default is 25 seconds. For advanced
+ control, a two element tuple can be given, where
+ the first number is the ping interval and the second
+ is a grace period added by the server.
+ :param ping_timeout: The time in seconds that the client waits for the
+ server to respond before disconnecting. The default
+ is 5 seconds.
+ :param max_http_buffer_size: The maximum size of a message when using the
+ polling transport. The default is 1,000,000
+ bytes.
+ :param allow_upgrades: Whether to allow transport upgrades or not. The
+ default is ``True``.
+ :param http_compression: Whether to compress packages when using the
+ polling transport. The default is ``True``.
+ :param compression_threshold: Only compress messages when their byte size
+ is greater than this value. The default is
+ 1024 bytes.
+ :param cookie: If set to a string, it is the name of the HTTP cookie the
+ server sends back to the client containing the client
+ session id. If set to a dictionary, the ``'name'`` key
+ contains the cookie name and other keys define cookie
+ attributes, where the value of each attribute can be a
+ string, a callable with no arguments, or a boolean. If set
+ to ``None`` (the default), a cookie is not sent to the
+ client.
+ :param cors_allowed_origins: Origin or list of origins that are allowed to
+ connect to this server. Only the same origin
+ is allowed by default. Set this argument to
+ ``'*'`` to allow all origins, or to ``[]`` to
+ disable CORS handling.
+ :param cors_credentials: Whether credentials (cookies, authentication) are
+ allowed in requests to this server. The default is
+ ``True``.
+ :param monitor_clients: If set to ``True``, a background task will ensure
+ inactive clients are closed. Set to ``False`` to
+ disable the monitoring task (not recommended). The
+ default is ``True``.
+ :param engineio_logger: To enable Engine.IO logging set to ``True`` or pass
+ a logger object to use. To disable logging set to
+ ``False``. The default is ``False``. Note that
+ fatal errors are logged even when
+ ``engineio_logger`` is ``False``.
+ """
+ reason = socketio.Server.reason
+
+ def __init__(self, app=None, **kwargs):
+ self.server = None
+ self.server_options = {}
+ self.wsgi_server = None
+ self.handlers = []
+ self.namespace_handlers = []
+ self.exception_handlers = {}
+ self.default_exception_handler = None
+ self.manage_session = True
+ # We can call init_app when:
+ # - we were given the Flask app instance (standard initialization)
+ # - we were not given the app, but we were given a message_queue
+ # (standard initialization for auxiliary process)
+ # In all other cases we collect the arguments and assume the client
+ # will call init_app from an app factory function.
+ if app is not None or 'message_queue' in kwargs:
+ self.init_app(app, **kwargs)
+ else:
+ self.server_options.update(kwargs)
+
+ def init_app(self, app, **kwargs):
+ if app is not None:
+ if not hasattr(app, 'extensions'):
+ app.extensions = {} # pragma: no cover
+ app.extensions['socketio'] = self
+ self.server_options.update(kwargs)
+ self.manage_session = self.server_options.pop('manage_session',
+ self.manage_session)
+
+ if 'client_manager' not in kwargs:
+ url = self.server_options.get('message_queue', None)
+ channel = self.server_options.pop('channel', 'flask-socketio')
+ write_only = app is None
+ if url:
+ if url.startswith(('redis://', "rediss://")):
+ queue_class = socketio.RedisManager
+ elif url.startswith('kafka://'):
+ queue_class = socketio.KafkaManager
+ elif url.startswith('zmq'):
+ queue_class = socketio.ZmqManager
+ else:
+ queue_class = socketio.KombuManager
+ queue = queue_class(url, channel=channel,
+ write_only=write_only)
+ self.server_options['client_manager'] = queue
+
+ if 'json' in self.server_options and \
+ self.server_options['json'] == flask_json:
+ # flask's json module is tricky to use because its output
+ # changes when it is invoked inside or outside the app context
+ # so here to prevent any ambiguities we replace it with wrappers
+ # that ensure that the app context is always present
+ class FlaskSafeJSON:
+ @staticmethod
+ def dumps(*args, **kwargs):
+ with app.app_context():
+ return flask_json.dumps(*args, **kwargs)
+
+ @staticmethod
+ def loads(*args, **kwargs):
+ with app.app_context():
+ return flask_json.loads(*args, **kwargs)
+
+ self.server_options['json'] = FlaskSafeJSON
+
+ resource = self.server_options.pop('path', None) or \
+ self.server_options.pop('resource', None) or 'socket.io'
+ if resource.startswith('/'):
+ resource = resource[1:]
+ if os.environ.get('FLASK_RUN_FROM_CLI'):
+ if self.server_options.get('async_mode') is None:
+ self.server_options['async_mode'] = 'threading'
+ self.server = socketio.Server(**self.server_options)
+ self.async_mode = self.server.async_mode
+ for handler in self.handlers:
+ self.server.on(handler[0], handler[1], namespace=handler[2])
+ for namespace_handler in self.namespace_handlers:
+ self.server.register_namespace(namespace_handler)
+
+ if app is not None:
+ # here we attach the SocketIO middleware to the SocketIO object so
+ # it can be referenced later if debug middleware needs to be
+ # inserted
+ self.sockio_mw = _SocketIOMiddleware(self.server, app,
+ socketio_path=resource)
+ app.wsgi_app = self.sockio_mw
+
+ def on(self, message, namespace=None):
+ """Decorator to register a SocketIO event handler.
+
+ This decorator must be applied to SocketIO event handlers. Example::
+
+ @socketio.on('my event', namespace='/chat')
+ def handle_my_custom_event(json):
+ print('received json: ' + str(json))
+
+ :param message: The name of the event. This is normally a user defined
+ string, but a few event names are already defined. Use
+ ``'message'`` to define a handler that takes a string
+ payload, ``'json'`` to define a handler that takes a
+ JSON blob payload, ``'connect'`` or ``'disconnect'``
+ to create handlers for connection and disconnection
+ events.
+ :param namespace: The namespace on which the handler is to be
+ registered. Defaults to the global namespace.
+ """
+ namespace = namespace or '/'
+
+ def decorator(handler):
+ @wraps(handler)
+ def _handler(sid, *args):
+ nonlocal namespace
+ real_ns = namespace
+ if namespace == '*':
+ real_ns = sid
+ sid = args[0]
+ args = args[1:]
+ real_msg = message
+ if message == '*':
+ real_msg = sid
+ sid = args[0]
+ args = [real_msg] + list(args[1:])
+ return self._handle_event(handler, message, real_ns, sid,
+ *args)
+
+ if self.server:
+ self.server.on(message, _handler, namespace=namespace)
+ else:
+ self.handlers.append((message, _handler, namespace))
+ return handler
+ return decorator
+
+ def on_error(self, namespace=None):
+ """Decorator to define a custom error handler for SocketIO events.
+
+ This decorator can be applied to a function that acts as an error
+ handler for a namespace. This handler will be invoked when a SocketIO
+ event handler raises an exception. The handler function must accept one
+ argument, which is the exception raised. Example::
+
+ @socketio.on_error(namespace='/chat')
+ def chat_error_handler(e):
+ print('An error has occurred: ' + str(e))
+
+ :param namespace: The namespace for which to register the error
+ handler. Defaults to the global namespace.
+ """
+ namespace = namespace or '/'
+
+ def decorator(exception_handler):
+ if not callable(exception_handler):
+ raise ValueError('exception_handler must be callable')
+ self.exception_handlers[namespace] = exception_handler
+ return exception_handler
+ return decorator
+
+ def on_error_default(self, exception_handler):
+ """Decorator to define a default error handler for SocketIO events.
+
+ This decorator can be applied to a function that acts as a default
+ error handler for any namespaces that do not have a specific handler.
+ Example::
+
+ @socketio.on_error_default
+ def error_handler(e):
+ print('An error has occurred: ' + str(e))
+ """
+ if not callable(exception_handler):
+ raise ValueError('exception_handler must be callable')
+ self.default_exception_handler = exception_handler
+ return exception_handler
+
+ def on_event(self, message, handler, namespace=None):
+ """Register a SocketIO event handler.
+
+ ``on_event`` is the non-decorator version of ``'on'``.
+
+ Example::
+
+ def on_foo_event(json):
+ print('received json: ' + str(json))
+
+ socketio.on_event('my event', on_foo_event, namespace='/chat')
+
+ :param message: The name of the event. This is normally a user defined
+ string, but a few event names are already defined. Use
+ ``'message'`` to define a handler that takes a string
+ payload, ``'json'`` to define a handler that takes a
+ JSON blob payload, ``'connect'`` or ``'disconnect'``
+ to create handlers for connection and disconnection
+ events.
+ :param handler: The function that handles the event.
+ :param namespace: The namespace on which the handler is to be
+ registered. Defaults to the global namespace.
+ """
+ self.on(message, namespace=namespace)(handler)
+
+ def event(self, *args, **kwargs):
+ """Decorator to register an event handler.
+
+ This is a simplified version of the ``on()`` method that takes the
+ event name from the decorated function.
+
+ Example usage::
+
+ @socketio.event
+ def my_event(data):
+ print('Received data: ', data)
+
+ The above example is equivalent to::
+
+ @socketio.on('my_event')
+ def my_event(data):
+ print('Received data: ', data)
+
+ A custom namespace can be given as an argument to the decorator::
+
+ @socketio.event(namespace='/test')
+ def my_event(data):
+ print('Received data: ', data)
+ """
+ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
+ # the decorator was invoked without arguments
+ # args[0] is the decorated function
+ return self.on(args[0].__name__)(args[0])
+ else:
+ # the decorator was invoked with arguments
+ def set_handler(handler):
+ return self.on(handler.__name__, *args, **kwargs)(handler)
+
+ return set_handler
+
+ def on_namespace(self, namespace_handler):
+ if not isinstance(namespace_handler, Namespace):
+ raise ValueError('Not a namespace instance.')
+ namespace_handler._set_socketio(self)
+ if self.server:
+ self.server.register_namespace(namespace_handler)
+ else:
+ self.namespace_handlers.append(namespace_handler)
+
+ def emit(self, event, *args, **kwargs):
+ """Emit a server generated SocketIO event.
+
+ This function emits a SocketIO event to one or more connected clients.
+ A JSON blob can be attached to the event as payload. This function can
+ be used outside of a SocketIO event context, so it is appropriate to
+ use when the server is the originator of an event, outside of any
+ client context, such as in a regular HTTP request handler or a
+ background task. Example::
+
+ @app.route('/ping')
+ def ping():
+ socketio.emit('ping event', {'data': 42}, namespace='/chat')
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the global namespace.
+ :param to: Send the message to all the users in the given room, or to
+ the user with the given session ID. If this parameter is not
+ included, the event is sent to all connected users.
+ :param include_self: ``True`` to include the sender when broadcasting
+ or addressing a room, or ``False`` to send to
+ everyone but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param callback: If given, this function will be called to acknowledge
+ that the client has received the message. The
+ arguments that will be passed to the function are
+ those provided by the client. Callback functions can
+ only be used when addressing an individual client.
+ """
+ namespace = kwargs.pop('namespace', '/')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ include_self = kwargs.pop('include_self', True)
+ skip_sid = kwargs.pop('skip_sid', None)
+ if not include_self and not skip_sid:
+ skip_sid = flask.request.sid
+ callback = kwargs.pop('callback', None)
+ if callback:
+ # wrap the callback so that it sets app app and request contexts
+ sid = None
+ original_callback = callback
+ original_namespace = namespace
+ if has_request_context():
+ sid = getattr(flask.request, 'sid', None)
+ original_namespace = getattr(flask.request, 'namespace', None)
+
+ def _callback_wrapper(*args):
+ return self._handle_event(original_callback, None,
+ original_namespace, sid, *args)
+
+ if sid:
+ # the callback wrapper above will install a request context
+ # before invoking the original callback
+ # we only use it if the emit was issued from a Socket.IO
+ # populated request context (i.e. request.sid is defined)
+ callback = _callback_wrapper
+ self.server.emit(event, *args, namespace=namespace, to=to,
+ skip_sid=skip_sid, callback=callback, **kwargs)
+
+ def call(self, event, *args, **kwargs): # pragma: no cover
+ """Emit a SocketIO event and wait for the response.
+
+ This method issues an emit with a callback and waits for the callback
+ to be invoked by the client before returning. If the callback isn’t
+ invoked before the timeout, then a TimeoutError exception is raised. If
+ the Socket.IO connection drops during the wait, this method still waits
+ until the specified timeout. Example::
+
+ def get_status(client, data):
+ status = call('status', {'data': data}, to=client)
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the global namespace.
+ :param to: The session ID of the recipient client.
+ :param timeout: The waiting timeout. If the timeout is reached before
+ the client acknowledges the event, then a
+ ``TimeoutError`` exception is raised. The default is 60
+ seconds.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ client directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always
+ leave this parameter with its default value of
+ ``False``.
+ """
+ namespace = kwargs.pop('namespace', '/')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ return self.server.call(event, *args, namespace=namespace, to=to,
+ **kwargs)
+
+ def send(self, data, json=False, namespace=None, to=None,
+ callback=None, include_self=True, skip_sid=None, **kwargs):
+ """Send a server-generated SocketIO message.
+
+ This function sends a simple SocketIO message to one or more connected
+ clients. The message can be a string or a JSON blob. This is a simpler
+ version of ``emit()``, which should be preferred. This function can be
+ used outside of a SocketIO event context, so it is appropriate to use
+ when the server is the originator of an event.
+
+ :param data: The message to send, either a string or a JSON blob.
+ :param json: ``True`` if ``message`` is a JSON blob, ``False``
+ otherwise.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the global namespace.
+ :param to: Send the message to all the users in the given room, or to
+ the user with the given session ID. If this parameter is not
+ included, the event is sent to all connected users.
+ :param include_self: ``True`` to include the sender when broadcasting
+ or addressing a room, or ``False`` to send to
+ everyone but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param callback: If given, this function will be called to acknowledge
+ that the client has received the message. The
+ arguments that will be passed to the function are
+ those provided by the client. Callback functions can
+ only be used when addressing an individual client.
+ """
+ skip_sid = flask.request.sid if not include_self else skip_sid
+ if json:
+ self.emit('json', data, namespace=namespace, to=to,
+ skip_sid=skip_sid, callback=callback, **kwargs)
+ else:
+ self.emit('message', data, namespace=namespace, to=to,
+ skip_sid=skip_sid, callback=callback, **kwargs)
+
+ def close_room(self, room, namespace=None):
+ """Close a room.
+
+ This function removes any users that are in the given room and then
+ deletes the room from the server. This function can be used outside
+ of a SocketIO event context.
+
+ :param room: The name of the room to close.
+ :param namespace: The namespace under which the room exists. Defaults
+ to the global namespace.
+ """
+ self.server.close_room(room, namespace)
+
+ def run(self, app, host=None, port=None, **kwargs): # pragma: no cover
+ """Run the SocketIO web server.
+
+ :param app: The Flask application instance.
+ :param host: The hostname or IP address for the server to listen on.
+ Defaults to 127.0.0.1.
+ :param port: The port number for the server to listen on. Defaults to
+ 5000.
+ :param debug: ``True`` to start the server in debug mode, ``False`` to
+ start in normal mode.
+ :param use_reloader: ``True`` to enable the Flask reloader, ``False``
+ to disable it.
+ :param reloader_options: A dictionary with options that are passed to
+ the Flask reloader, such as ``extra_files``,
+ ``reloader_type``, etc.
+ :param extra_files: A list of additional files that the Flask
+ reloader should watch. Defaults to ``None``.
+ Deprecated, use ``reloader_options`` instead.
+ :param log_output: If ``True``, the server logs all incoming
+ connections. If ``False`` logging is disabled.
+ Defaults to ``True`` in debug mode, ``False``
+ in normal mode. Unused when the threading async
+ mode is used.
+ :param allow_unsafe_werkzeug: Set to ``True`` to allow the use of the
+ Werkzeug web server in a production
+ setting. Default is ``False``. Set to
+ ``True`` at your own risk.
+ :param kwargs: Additional web server options. The web server options
+ are specific to the server used in each of the supported
+ async modes. Note that options provided here will
+ not be seen when using an external web server such
+ as gunicorn, since this method is not called in that
+ case.
+ """
+ if host is None:
+ host = '127.0.0.1'
+ if port is None:
+ server_name = app.config['SERVER_NAME']
+ if server_name and ':' in server_name:
+ port = int(server_name.rsplit(':', 1)[1])
+ else:
+ port = 5000
+
+ debug = kwargs.pop('debug', app.debug)
+ log_output = kwargs.pop('log_output', debug)
+ use_reloader = kwargs.pop('use_reloader', debug)
+ extra_files = kwargs.pop('extra_files', None)
+ reloader_options = kwargs.pop('reloader_options', {})
+ if extra_files:
+ reloader_options['extra_files'] = extra_files
+
+ app.debug = debug
+ if app.debug and self.server.eio.async_mode != 'threading':
+ # put the debug middleware between the SocketIO middleware
+ # and the Flask application instance
+ #
+ # mw1 mw2 mw3 Flask app
+ # o ---- o ---- o ---- o
+ # /
+ # o Flask-SocketIO
+ # \ middleware
+ # o
+ # Flask-SocketIO WebSocket handler
+ #
+ # BECOMES
+ #
+ # dbg-mw mw1 mw2 mw3 Flask app
+ # o ---- o ---- o ---- o ---- o
+ # /
+ # o Flask-SocketIO
+ # \ middleware
+ # o
+ # Flask-SocketIO WebSocket handler
+ #
+ self.sockio_mw.wsgi_app = DebuggedApplication(
+ self.sockio_mw.wsgi_app, evalex=True)
+
+ allow_unsafe_werkzeug = kwargs.pop('allow_unsafe_werkzeug', False)
+ if self.server.eio.async_mode == 'threading':
+ try:
+ import simple_websocket # noqa: F401
+ except ImportError:
+ from werkzeug._internal import _log
+ _log('warning', 'WebSocket transport not available. Install '
+ 'simple-websocket for improved performance.')
+ if not sys.stdin or not sys.stdin.isatty(): # pragma: no cover
+ if not allow_unsafe_werkzeug:
+ raise RuntimeError('The Werkzeug web server is not '
+ 'designed to run in production. Pass '
+ 'allow_unsafe_werkzeug=True to the '
+ 'run() method to disable this error.')
+ else:
+ from werkzeug._internal import _log
+ _log('warning', ('Werkzeug appears to be used in a '
+ 'production deployment. Consider '
+ 'switching to a production web server '
+ 'instead.'))
+ app.run(host=host, port=port, threaded=True,
+ use_reloader=use_reloader, **reloader_options, **kwargs)
+ elif self.server.eio.async_mode == 'eventlet':
+ def run_server():
+ import eventlet
+ import eventlet.wsgi
+ import eventlet.green
+ addresses = eventlet.green.socket.getaddrinfo(host, port)
+ if not addresses:
+ raise RuntimeError(
+ 'Could not resolve host to a valid address')
+ eventlet_socket = eventlet.listen(addresses[0][4],
+ addresses[0][0])
+
+ # If provided an SSL argument, use an SSL socket
+ ssl_args = ['keyfile', 'certfile', 'server_side', 'cert_reqs',
+ 'ssl_version', 'ca_certs',
+ 'do_handshake_on_connect', 'suppress_ragged_eofs',
+ 'ciphers']
+ ssl_params = {k: kwargs[k] for k in kwargs
+ if k in ssl_args and kwargs[k] is not None}
+ for k in ssl_args:
+ kwargs.pop(k, None)
+ if len(ssl_params) > 0:
+ ssl_params['server_side'] = True # Listening requires true
+ eventlet_socket = eventlet.wrap_ssl(eventlet_socket,
+ **ssl_params)
+
+ eventlet.wsgi.server(eventlet_socket, app,
+ log_output=log_output, **kwargs)
+
+ if use_reloader:
+ run_with_reloader(run_server, **reloader_options)
+ else:
+ run_server()
+ elif self.server.eio.async_mode == 'gevent':
+ from gevent import pywsgi
+ try:
+ from geventwebsocket.handler import WebSocketHandler
+ gevent_websocket = True
+ except ImportError:
+ # WebSocket support will come from the simple-websocket package
+ gevent_websocket = False
+
+ log = 'default'
+ if not log_output:
+ log = None
+ if gevent_websocket:
+ self.wsgi_server = pywsgi.WSGIServer(
+ (host, port), app, handler_class=WebSocketHandler,
+ log=log, **kwargs)
+ else:
+ self.wsgi_server = pywsgi.WSGIServer((host, port), app,
+ log=log, **kwargs)
+
+ if use_reloader:
+ # monkey patching is required by the reloader
+ from gevent import monkey
+ monkey.patch_thread()
+ monkey.patch_time()
+
+ def run_server():
+ self.wsgi_server.serve_forever()
+
+ run_with_reloader(run_server, **reloader_options)
+ else:
+ self.wsgi_server.serve_forever()
+
+ def stop(self):
+ """Stop a running SocketIO web server.
+
+ This method must be called from a HTTP or SocketIO handler function.
+ """
+ if self.server.eio.async_mode == 'threading':
+ func = flask.request.environ.get('werkzeug.server.shutdown')
+ if func:
+ func()
+ else:
+ raise RuntimeError('Cannot stop unknown web server')
+ elif self.server.eio.async_mode == 'eventlet':
+ raise SystemExit
+ elif self.server.eio.async_mode == 'gevent':
+ self.wsgi_server.stop()
+
+ def start_background_task(self, target, *args, **kwargs):
+ """Start a background task using the appropriate async model.
+
+ This is a utility function that applications can use to start a
+ background task using the method that is compatible with the
+ selected async mode.
+
+ :param target: the target function to execute.
+ :param args: arguments to pass to the function.
+ :param kwargs: keyword arguments to pass to the function.
+
+ This function returns an object that represents the background task,
+ on which the ``join()`` method can be invoked to wait for the task to
+ complete.
+ """
+ return self.server.start_background_task(target, *args, **kwargs)
+
+ def sleep(self, seconds=0):
+ """Sleep for the requested amount of time using the appropriate async
+ model.
+
+ This is a utility function that applications can use to put a task to
+ sleep without having to worry about using the correct call for the
+ selected async mode.
+ """
+ return self.server.sleep(seconds)
+
+ def test_client(self, app, namespace=None, query_string=None,
+ headers=None, auth=None, flask_test_client=None):
+ """The Socket.IO test client is useful for testing a Flask-SocketIO
+ server. It works in a similar way to the Flask Test Client, but
+ adapted to the Socket.IO server.
+
+ :param app: The Flask application instance.
+ :param namespace: The namespace for the client. If not provided, the
+ client connects to the server on the global
+ namespace.
+ :param query_string: A string with custom query string arguments.
+ :param headers: A dictionary with custom HTTP headers.
+ :param auth: Optional authentication data, given as a dictionary.
+ :param flask_test_client: The instance of the Flask test client
+ currently in use. Passing the Flask test
+ client is optional, but is necessary if you
+ want the Flask user session and any other
+ cookies set in HTTP routes accessible from
+ Socket.IO events.
+ """
+ return SocketIOTestClient(app, self, namespace=namespace,
+ query_string=query_string, headers=headers,
+ auth=auth,
+ flask_test_client=flask_test_client)
+
+ def _handle_event(self, handler, message, namespace, sid, *args):
+ environ = self.server.get_environ(sid, namespace=namespace)
+ if not environ:
+ # we don't have record of this client, ignore this event
+ return '', 400
+ app = environ['flask.app']
+ with app.request_context(environ):
+ if self.manage_session:
+ # manage a separate session for this client's Socket.IO events
+ # created as a copy of the regular user session
+ if 'saved_session' not in environ:
+ environ['saved_session'] = _ManagedSession(flask.session)
+ session_obj = environ['saved_session']
+ if hasattr(flask, 'globals') and \
+ hasattr(flask.globals, 'request_ctx'):
+ # update session for Flask >= 2.2
+ ctx = flask.globals.request_ctx._get_current_object()
+ else: # pragma: no cover
+ # update session for Flask < 2.2
+ ctx = flask._request_ctx_stack.top
+ ctx.session = session_obj
+ else:
+ # let Flask handle the user session
+ # for cookie based sessions, this effectively freezes the
+ # session to its state at connection time
+ # for server-side sessions, this allows HTTP and Socket.IO to
+ # share the session, with both having read/write access to it
+ session_obj = flask.session._get_current_object()
+ flask.request.sid = sid
+ flask.request.namespace = namespace
+ flask.request.event = {'message': message, 'args': args}
+ try:
+ if message == 'connect':
+ auth = args[1] if len(args) > 1 else None
+ try:
+ ret = handler(auth)
+ except TypeError:
+ ret = handler()
+ else:
+ ret = handler(*args)
+ except ConnectionRefusedError:
+ raise # let this error bubble up to python-socketio
+ except:
+ err_handler = self.exception_handlers.get(
+ namespace, self.default_exception_handler)
+ if err_handler is None:
+ raise
+ type, value, traceback = sys.exc_info()
+ return err_handler(value)
+ if not self.manage_session:
+ # when Flask is managing the user session, it needs to save it
+ if not hasattr(session_obj, 'modified') or \
+ session_obj.modified:
+ resp = app.response_class()
+ app.session_interface.save_session(app, session_obj, resp)
+ return ret
+
+
+def emit(event, *args, **kwargs):
+ """Emit a SocketIO event.
+
+ This function emits a SocketIO event to one or more connected clients. A
+ JSON blob can be attached to the event as payload. This is a function that
+ can only be called from a SocketIO event handler, as in obtains some
+ information from the current client context. Example::
+
+ @socketio.on('my event')
+ def handle_my_custom_event(json):
+ emit('my response', {'data': 42})
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the namespace used by the originating event.
+ A ``'/'`` can be used to explicitly specify the global
+ namespace.
+ :param callback: Callback function to invoke with the client's
+ acknowledgement.
+ :param broadcast: ``True`` to send the message to all clients, or ``False``
+ to only reply to the sender of the originating event.
+ :param to: Send the message to all the users in the given room, or to the
+ user with the given session ID. If this argument is not set and
+ ``broadcast`` is ``False``, then the message is sent only to the
+ originating user.
+ :param include_self: ``True`` to include the sender when broadcasting or
+ addressing a room, or ``False`` to send to everyone
+ but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always leave
+ this parameter with its default value of ``False``.
+ """
+ if 'namespace' in kwargs:
+ namespace = kwargs['namespace']
+ else:
+ namespace = flask.request.namespace
+ callback = kwargs.get('callback')
+ broadcast = kwargs.get('broadcast')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ if to is None and not broadcast:
+ to = flask.request.sid
+ include_self = kwargs.get('include_self', True)
+ skip_sid = kwargs.get('skip_sid')
+ ignore_queue = kwargs.get('ignore_queue', False)
+
+ socketio = flask.current_app.extensions['socketio']
+ return socketio.emit(event, *args, namespace=namespace, to=to,
+ include_self=include_self, skip_sid=skip_sid,
+ callback=callback, ignore_queue=ignore_queue)
+
+
+def call(event, *args, **kwargs): # pragma: no cover
+ """Emit a SocketIO event and wait for the response.
+
+ This function issues an emit with a callback and waits for the callback to
+ be invoked by the client before returning. If the callback isn’t invoked
+ before the timeout, then a TimeoutError exception is raised. If the
+ Socket.IO connection drops during the wait, this method still waits until
+ the specified timeout. Example::
+
+ def get_status(client, data):
+ status = call('status', {'data': data}, to=client)
+
+ :param event: The name of the user event to emit.
+ :param args: A dictionary with the JSON data to send as payload.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the namespace used by the originating event.
+ A ``'/'`` can be used to explicitly specify the global
+ namespace.
+ :param to: The session ID of the recipient client. If this argument is not
+ given, the event is sent to the originating client.
+ :param timeout: The waiting timeout. If the timeout is reached before the
+ client acknowledges the event, then a ``TimeoutError``
+ exception is raised. The default is 60 seconds.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ client directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always leave
+ this parameter with its default value of ``False``.
+ """
+ if 'namespace' in kwargs:
+ namespace = kwargs['namespace']
+ else:
+ namespace = flask.request.namespace
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ if to is None:
+ to = flask.request.sid
+ timeout = kwargs.get('timeout', 60)
+ ignore_queue = kwargs.get('ignore_queue', False)
+
+ socketio = flask.current_app.extensions['socketio']
+ return socketio.call(event, *args, namespace=namespace, to=to,
+ ignore_queue=ignore_queue, timeout=timeout)
+
+
+def send(message, **kwargs):
+ """Send a SocketIO message.
+
+ This function sends a simple SocketIO message to one or more connected
+ clients. The message can be a string or a JSON blob. This is a simpler
+ version of ``emit()``, which should be preferred. This is a function that
+ can only be called from a SocketIO event handler.
+
+ :param message: The message to send, either a string or a JSON blob.
+ :param json: ``True`` if ``message`` is a JSON blob, ``False``
+ otherwise.
+ :param namespace: The namespace under which the message is to be sent.
+ Defaults to the namespace used by the originating event.
+ An empty string can be used to use the global namespace.
+ :param callback: Callback function to invoke with the client's
+ acknowledgement.
+ :param broadcast: ``True`` to send the message to all connected clients, or
+ ``False`` to only reply to the sender of the originating
+ event.
+ :param to: Send the message to all the users in the given room, or to the
+ user with the given session ID. If this argument is not set and
+ ``broadcast`` is ``False``, then the message is sent only to the
+ originating user.
+ :param include_self: ``True`` to include the sender when broadcasting or
+ addressing a room, or ``False`` to send to everyone
+ but the sender.
+ :param skip_sid: The session id of a client to ignore when broadcasting
+ or addressing a room. This is typically set to the
+ originator of the message, so that everyone except
+ that client receive the message. To skip multiple sids
+ pass a list.
+ :param ignore_queue: Only used when a message queue is configured. If
+ set to ``True``, the event is emitted to the
+ clients directly, without going through the queue.
+ This is more efficient, but only works when a
+ single server process is used, or when there is a
+ single addressee. It is recommended to always leave
+ this parameter with its default value of ``False``.
+ """
+ json = kwargs.get('json', False)
+ if 'namespace' in kwargs:
+ namespace = kwargs['namespace']
+ else:
+ namespace = flask.request.namespace
+ callback = kwargs.get('callback')
+ broadcast = kwargs.get('broadcast')
+ to = kwargs.pop('to', None) or kwargs.pop('room', None)
+ if to is None and not broadcast:
+ to = flask.request.sid
+ include_self = kwargs.get('include_self', True)
+ skip_sid = kwargs.get('skip_sid')
+ ignore_queue = kwargs.get('ignore_queue', False)
+
+ socketio = flask.current_app.extensions['socketio']
+ return socketio.send(message, json=json, namespace=namespace, to=to,
+ include_self=include_self, skip_sid=skip_sid,
+ callback=callback, ignore_queue=ignore_queue)
+
+
+def join_room(room, sid=None, namespace=None):
+ """Join a room.
+
+ This function puts the user in a room, under the current namespace. The
+ user and the namespace are obtained from the event context. This is a
+ function that can only be called from a SocketIO event handler. Example::
+
+ @socketio.on('join')
+ def on_join(data):
+ username = session['username']
+ room = data['room']
+ join_room(room)
+ send(username + ' has entered the room.', to=room)
+
+ :param room: The name of the room to join.
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ socketio.server.enter_room(sid, room, namespace=namespace)
+
+
+def leave_room(room, sid=None, namespace=None):
+ """Leave a room.
+
+ This function removes the user from a room, under the current namespace.
+ The user and the namespace are obtained from the event context. Example::
+
+ @socketio.on('leave')
+ def on_leave(data):
+ username = session['username']
+ room = data['room']
+ leave_room(room)
+ send(username + ' has left the room.', to=room)
+
+ :param room: The name of the room to leave.
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ socketio.server.leave_room(sid, room, namespace=namespace)
+
+
+def close_room(room, namespace=None):
+ """Close a room.
+
+ This function removes any users that are in the given room and then deletes
+ the room from the server.
+
+ :param room: The name of the room to close.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ namespace = namespace or flask.request.namespace
+ socketio.server.close_room(room, namespace=namespace)
+
+
+def rooms(sid=None, namespace=None):
+ """Return a list of the rooms the client is in.
+
+ This function returns all the rooms the client has entered, including its
+ own room, assigned by the Socket.IO server.
+
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ return socketio.server.rooms(sid, namespace=namespace)
+
+
+def disconnect(sid=None, namespace=None, silent=False):
+ """Disconnect the client.
+
+ This function terminates the connection with the client. As a result of
+ this call the client will receive a disconnect event. Example::
+
+ @socketio.on('message')
+ def receive_message(msg):
+ if is_banned(session['username']):
+ disconnect()
+ else:
+ # ...
+
+ :param sid: The session id of the client. If not provided, the client is
+ obtained from the request context.
+ :param namespace: The namespace for the room. If not provided, the
+ namespace is obtained from the request context.
+ :param silent: this option is deprecated.
+ """
+ socketio = flask.current_app.extensions['socketio']
+ sid = sid or flask.request.sid
+ namespace = namespace or flask.request.namespace
+ return socketio.server.disconnect(sid, namespace=namespace)
diff --git a/tapdown/lib/python3.11/site-packages/flask_socketio/namespace.py b/tapdown/lib/python3.11/site-packages/flask_socketio/namespace.py
new file mode 100644
index 0000000..541fa79
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_socketio/namespace.py
@@ -0,0 +1,54 @@
+from socketio import Namespace as _Namespace
+
+
+class Namespace(_Namespace):
+ def __init__(self, namespace=None):
+ super().__init__(namespace)
+ self.socketio = None
+
+ def _set_socketio(self, socketio):
+ self.socketio = socketio
+
+ def trigger_event(self, event, *args):
+ """Dispatch an event to the proper handler method.
+
+ In the most common usage, this method is not overloaded by subclasses,
+ as it performs the routing of events to methods. However, this
+ method can be overridden if special dispatching rules are needed, or if
+ having a single method that catches all events is desired.
+ """
+ handler_name = 'on_' + (event or '')
+ if not hasattr(self, handler_name):
+ # there is no handler for this event, so we ignore it
+ return
+ handler = getattr(self, handler_name)
+ try:
+ return self.socketio._handle_event(handler, event, self.namespace,
+ *args)
+ except TypeError:
+ if event == 'disconnect':
+ # legacy disconnect events do not have the reason argument
+ return self.socketio._handle_event(
+ handler, event, self.namespace, *args[:-1])
+ else:
+ raise
+
+ def emit(self, event, data=None, room=None, include_self=True,
+ namespace=None, callback=None):
+ """Emit a custom event to one or more connected clients."""
+ return self.socketio.emit(event, data, room=room,
+ include_self=include_self,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ def send(self, data, room=None, include_self=True, namespace=None,
+ callback=None):
+ """Send a message to one or more connected clients."""
+ return self.socketio.send(data, room=room, include_self=include_self,
+ namespace=namespace or self.namespace,
+ callback=callback)
+
+ def close_room(self, room, namespace=None):
+ """Close a room."""
+ return self.socketio.close_room(room=room,
+ namespace=namespace or self.namespace)
diff --git a/tapdown/lib/python3.11/site-packages/flask_socketio/test_client.py b/tapdown/lib/python3.11/site-packages/flask_socketio/test_client.py
new file mode 100644
index 0000000..312bac1
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_socketio/test_client.py
@@ -0,0 +1,236 @@
+import uuid
+
+from socketio import packet
+from socketio.pubsub_manager import PubSubManager
+from werkzeug.test import EnvironBuilder
+
+
+class SocketIOTestClient:
+ """
+ This class is useful for testing a Flask-SocketIO server. It works in a
+ similar way to the Flask Test Client, but adapted to the Socket.IO server.
+
+ :param app: The Flask application instance.
+ :param socketio: The application's ``SocketIO`` instance.
+ :param namespace: The namespace for the client. If not provided, the client
+ connects to the server on the global namespace.
+ :param query_string: A string with custom query string arguments.
+ :param headers: A dictionary with custom HTTP headers.
+ :param auth: Optional authentication data, given as a dictionary.
+ :param flask_test_client: The instance of the Flask test client
+ currently in use. Passing the Flask test
+ client is optional, but is necessary if you
+ want the Flask user session and any other
+ cookies set in HTTP routes accessible from
+ Socket.IO events.
+ """
+ clients = {}
+
+ def __init__(self, app, socketio, namespace=None, query_string=None,
+ headers=None, auth=None, flask_test_client=None):
+ def _mock_send_packet(eio_sid, pkt):
+ # make sure the packet can be encoded and decoded
+ epkt = pkt.encode()
+ if not isinstance(epkt, list):
+ pkt = packet.Packet(encoded_packet=epkt)
+ else:
+ pkt = packet.Packet(encoded_packet=epkt[0])
+ for att in epkt[1:]:
+ pkt.add_attachment(att)
+ client = self.clients.get(eio_sid)
+ if not client:
+ return
+ if pkt.packet_type == packet.EVENT or \
+ pkt.packet_type == packet.BINARY_EVENT:
+ if pkt.data[0] == 'message' or pkt.data[0] == 'json':
+ client.queue.append({
+ 'name': pkt.data[0],
+ 'args': pkt.data[1],
+ 'namespace': pkt.namespace or '/'})
+ else:
+ client.queue.append({
+ 'name': pkt.data[0],
+ 'args': pkt.data[1:],
+ 'namespace': pkt.namespace or '/'})
+ elif pkt.packet_type == packet.ACK or \
+ pkt.packet_type == packet.BINARY_ACK:
+ client.acks = {'args': pkt.data,
+ 'namespace': pkt.namespace or '/'}
+ elif pkt.packet_type in [packet.DISCONNECT, packet.CONNECT_ERROR]:
+ client.connected[pkt.namespace or '/'] = False
+
+ _current_packet = None
+
+ def _mock_send_eio_packet(eio_sid, eio_pkt):
+ nonlocal _current_packet
+ if _current_packet is not None:
+ _current_packet.add_attachment(eio_pkt.data)
+ if _current_packet.attachment_count == \
+ len(_current_packet.attachments):
+ _mock_send_packet(eio_sid, _current_packet)
+ _current_packet = None
+ else:
+ pkt = packet.Packet(encoded_packet=eio_pkt.data)
+ if pkt.attachment_count == 0:
+ _mock_send_packet(eio_sid, pkt)
+ else:
+ _current_packet = pkt
+
+ self.app = app
+ self.flask_test_client = flask_test_client
+ self.eio_sid = uuid.uuid4().hex
+ self.clients[self.eio_sid] = self
+ self.callback_counter = 0
+ self.socketio = socketio
+ self.connected = {}
+ self.queue = []
+ self.acks = None
+ socketio.server._send_packet = _mock_send_packet
+ socketio.server._send_eio_packet = _mock_send_eio_packet
+ socketio.server.environ[self.eio_sid] = {}
+ socketio.server.async_handlers = False # easier to test when
+ socketio.server.eio.async_handlers = False # events are sync
+ if isinstance(socketio.server.manager, PubSubManager):
+ raise RuntimeError('Test client cannot be used with a message '
+ 'queue. Disable the queue on your test '
+ 'configuration.')
+ socketio.server.manager.initialize()
+ self.connect(namespace=namespace, query_string=query_string,
+ headers=headers, auth=auth)
+
+ def is_connected(self, namespace=None):
+ """Check if a namespace is connected.
+
+ :param namespace: The namespace to check. The global namespace is
+ assumed if this argument is not provided.
+ """
+ return self.connected.get(namespace or '/', False)
+
+ def connect(self, namespace=None, query_string=None, headers=None,
+ auth=None):
+ """Connect the client.
+
+ :param namespace: The namespace for the client. If not provided, the
+ client connects to the server on the global
+ namespace.
+ :param query_string: A string with custom query string arguments.
+ :param headers: A dictionary with custom HTTP headers.
+ :param auth: Optional authentication data, given as a dictionary.
+
+ Note that it is usually not necessary to explicitly call this method,
+ since a connection is automatically established when an instance of
+ this class is created. An example where it this method would be useful
+ is when the application accepts multiple namespace connections.
+ """
+ url = '/socket.io'
+ namespace = namespace or '/'
+ if query_string:
+ if query_string[0] != '?':
+ query_string = '?' + query_string
+ url += query_string
+ environ = EnvironBuilder(url, headers=headers).get_environ()
+ environ['flask.app'] = self.app
+ if self.flask_test_client:
+ # inject cookies from Flask
+ if hasattr(self.flask_test_client, '_add_cookies_to_wsgi'):
+ # flask >= 2.3
+ self.flask_test_client._add_cookies_to_wsgi(environ)
+ else: # pragma: no cover
+ # flask < 2.3
+ self.flask_test_client.cookie_jar.inject_wsgi(environ)
+ self.socketio.server._handle_eio_connect(self.eio_sid, environ)
+ pkt = packet.Packet(packet.CONNECT, auth, namespace=namespace)
+ self.socketio.server._handle_eio_message(self.eio_sid, pkt.encode())
+ sid = self.socketio.server.manager.sid_from_eio_sid(self.eio_sid,
+ namespace)
+ if sid:
+ self.connected[namespace] = True
+
+ def disconnect(self, namespace=None):
+ """Disconnect the client.
+
+ :param namespace: The namespace to disconnect. The global namespace is
+ assumed if this argument is not provided.
+ """
+ if not self.is_connected(namespace):
+ raise RuntimeError('not connected')
+ pkt = packet.Packet(packet.DISCONNECT, namespace=namespace)
+ self.socketio.server._handle_eio_message(self.eio_sid, pkt.encode())
+ del self.connected[namespace or '/']
+
+ def emit(self, event, *args, **kwargs):
+ """Emit an event to the server.
+
+ :param event: The event name.
+ :param *args: The event arguments.
+ :param callback: ``True`` if the client requests a callback, ``False``
+ if not. Note that client-side callbacks are not
+ implemented, a callback request will just tell the
+ server to provide the arguments to invoke the
+ callback, but no callback is invoked. Instead, the
+ arguments that the server provided for the callback
+ are returned by this function.
+ :param namespace: The namespace of the event. The global namespace is
+ assumed if this argument is not provided.
+ """
+ namespace = kwargs.pop('namespace', None)
+ if not self.is_connected(namespace):
+ raise RuntimeError('not connected')
+ callback = kwargs.pop('callback', False)
+ id = None
+ if callback:
+ self.callback_counter += 1
+ id = self.callback_counter
+ pkt = packet.Packet(packet.EVENT, data=[event] + list(args),
+ namespace=namespace, id=id)
+ encoded_pkt = pkt.encode()
+ if isinstance(encoded_pkt, list):
+ for epkt in encoded_pkt:
+ self.socketio.server._handle_eio_message(self.eio_sid, epkt)
+ else:
+ self.socketio.server._handle_eio_message(self.eio_sid, encoded_pkt)
+ if self.acks is not None:
+ ack = self.acks
+ self.acks = None
+ return ack['args'][0] if len(ack['args']) == 1 \
+ else ack['args']
+
+ def send(self, data, json=False, callback=False, namespace=None):
+ """Send a text or JSON message to the server.
+
+ :param data: A string, dictionary or list to send to the server.
+ :param json: ``True`` to send a JSON message, ``False`` to send a text
+ message.
+ :param callback: ``True`` if the client requests a callback, ``False``
+ if not. Note that client-side callbacks are not
+ implemented, a callback request will just tell the
+ server to provide the arguments to invoke the
+ callback, but no callback is invoked. Instead, the
+ arguments that the server provided for the callback
+ are returned by this function.
+ :param namespace: The namespace of the event. The global namespace is
+ assumed if this argument is not provided.
+ """
+ if json:
+ msg = 'json'
+ else:
+ msg = 'message'
+ return self.emit(msg, data, callback=callback, namespace=namespace)
+
+ def get_received(self, namespace=None):
+ """Return the list of messages received from the server.
+
+ Since this is not a real client, any time the server emits an event,
+ the event is simply stored. The test code can invoke this method to
+ obtain the list of events that were received since the last call.
+
+ :param namespace: The namespace to get events from. The global
+ namespace is assumed if this argument is not
+ provided.
+ """
+ if not self.is_connected(namespace):
+ raise RuntimeError('not connected')
+ namespace = namespace or '/'
+ r = [pkt for pkt in self.queue if pkt['namespace'] == namespace]
+ self.queue = [pkt for pkt in self.queue if pkt not in r]
+ return r
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/INSTALLER b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/LICENSE.rst b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/LICENSE.rst
new file mode 100644
index 0000000..9d227a0
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/METADATA b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/METADATA
new file mode 100644
index 0000000..92f239c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/METADATA
@@ -0,0 +1,109 @@
+Metadata-Version: 2.1
+Name: Flask-SQLAlchemy
+Version: 3.1.1
+Summary: Add SQLAlchemy support to your Flask application.
+Maintainer-email: Pallets
+Requires-Python: >=3.8
+Description-Content-Type: text/x-rst
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Requires-Dist: flask>=2.2.5
+Requires-Dist: sqlalchemy>=2.0.16
+Project-URL: Changes, https://flask-sqlalchemy.palletsprojects.com/changes/
+Project-URL: Chat, https://discord.gg/pallets
+Project-URL: Documentation, https://flask-sqlalchemy.palletsprojects.com
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Issue Tracker, https://github.com/pallets-eco/flask-sqlalchemy/issues/
+Project-URL: Source Code, https://github.com/pallets-eco/flask-sqlalchemy/
+
+Flask-SQLAlchemy
+================
+
+Flask-SQLAlchemy is an extension for `Flask`_ that adds support for
+`SQLAlchemy`_ to your application. It aims to simplify using SQLAlchemy
+with Flask by providing useful defaults and extra helpers that make it
+easier to accomplish common tasks.
+
+.. _Flask: https://palletsprojects.com/p/flask/
+.. _SQLAlchemy: https://www.sqlalchemy.org
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ $ pip install -U Flask-SQLAlchemy
+
+.. _pip: https://pip.pypa.io/en/stable/getting-started/
+
+
+A Simple Example
+----------------
+
+.. code-block:: python
+
+ from flask import Flask
+ from flask_sqlalchemy import SQLAlchemy
+ from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
+
+ app = Flask(__name__)
+ app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///example.sqlite"
+
+ class Base(DeclarativeBase):
+ pass
+
+ db = SQLAlchemy(app, model_class=Base)
+
+ class User(db.Model):
+ id: Mapped[int] = mapped_column(db.Integer, primary_key=True)
+ username: Mapped[str] = mapped_column(db.String, unique=True, nullable=False)
+
+ with app.app_context():
+ db.create_all()
+
+ db.session.add(User(username="example"))
+ db.session.commit()
+
+ users = db.session.execute(db.select(User)).scalars()
+
+
+Contributing
+------------
+
+For guidance on setting up a development environment and how to make a
+contribution to Flask-SQLAlchemy, see the `contributing guidelines`_.
+
+.. _contributing guidelines: https://github.com/pallets-eco/flask-sqlalchemy/blob/main/CONTRIBUTING.rst
+
+
+Donate
+------
+
+The Pallets organization develops and supports Flask-SQLAlchemy and
+other popular packages. In order to grow the community of contributors
+and users, and allow the maintainers to devote more time to the
+projects, `please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+- Documentation: https://flask-sqlalchemy.palletsprojects.com/
+- Changes: https://flask-sqlalchemy.palletsprojects.com/changes/
+- PyPI Releases: https://pypi.org/project/Flask-SQLAlchemy/
+- Source Code: https://github.com/pallets-eco/flask-sqlalchemy/
+- Issue Tracker: https://github.com/pallets-eco/flask-sqlalchemy/issues/
+- Website: https://palletsprojects.com/
+- Twitter: https://twitter.com/PalletsTeam
+- Chat: https://discord.gg/pallets
+
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/RECORD b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/RECORD
new file mode 100644
index 0000000..4df6c83
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/RECORD
@@ -0,0 +1,27 @@
+flask_sqlalchemy-3.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+flask_sqlalchemy-3.1.1.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
+flask_sqlalchemy-3.1.1.dist-info/METADATA,sha256=lBxR1akBt7n9XBjIVTL2OV52OhCfFrb-Mqtoe0DCbR8,3432
+flask_sqlalchemy-3.1.1.dist-info/RECORD,,
+flask_sqlalchemy-3.1.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+flask_sqlalchemy-3.1.1.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
+flask_sqlalchemy/__init__.py,sha256=he_w4qQQVS2Z1ms5GCTptDTXNOXBXw0n8zSuWCp8n6Y,653
+flask_sqlalchemy/__pycache__/__init__.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/cli.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/extension.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/model.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/pagination.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/query.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/record_queries.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/session.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/table.cpython-311.pyc,,
+flask_sqlalchemy/__pycache__/track_modifications.cpython-311.pyc,,
+flask_sqlalchemy/cli.py,sha256=pg3QDxP36GW2qnwe_CpPtkRhPchyVSGM6zlBNWuNCFE,484
+flask_sqlalchemy/extension.py,sha256=71tP_kNtb5VgZdafy_OH1sWdZOA6PaT7cJqX7tKgZ-k,38261
+flask_sqlalchemy/model.py,sha256=_mSisC2Eni0TgTyFWeN_O4LIexTeP_sVTdxh03yMK50,11461
+flask_sqlalchemy/pagination.py,sha256=JFpllrqkRkwacb8DAmQWaz9wsvQa0dypfSkhUDSC2ws,11119
+flask_sqlalchemy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+flask_sqlalchemy/query.py,sha256=Uls9qbmnpb9Vba43EDfsRP17eHJ0X4VG7SE22tH5R3g,3748
+flask_sqlalchemy/record_queries.py,sha256=ouS1ayj16h76LJprx13iYdoFZbm6m8OncrOgAVbG1Sk,3520
+flask_sqlalchemy/session.py,sha256=pBbtN8iDc8yuGVt0k18BvZHh2uEI7QPzZXO7eXrRi1g,3426
+flask_sqlalchemy/table.py,sha256=wAPOy8qwyAxpMwOIUJY4iMOultzz2W0D6xvBkQ7U2CE,859
+flask_sqlalchemy/track_modifications.py,sha256=yieyozj7IiVzwnAGZ-ZrgqrzjrUfG0kPrXBfW_hStSU,2755
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/REQUESTED b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/REQUESTED
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/WHEEL b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/WHEEL
new file mode 100644
index 0000000..3b5e64b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy-3.1.1.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.9.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/__init__.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/__init__.py
new file mode 100644
index 0000000..c2fa059
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/__init__.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+import typing as t
+
+from .extension import SQLAlchemy
+
+__all__ = [
+ "SQLAlchemy",
+]
+
+
+def __getattr__(name: str) -> t.Any:
+ if name == "__version__":
+ import importlib.metadata
+ import warnings
+
+ warnings.warn(
+ "The '__version__' attribute is deprecated and will be removed in"
+ " Flask-SQLAlchemy 3.2. Use feature detection or"
+ " 'importlib.metadata.version(\"flask-sqlalchemy\")' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return importlib.metadata.version("flask-sqlalchemy")
+
+ raise AttributeError(name)
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/cli.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/cli.py
new file mode 100644
index 0000000..d7d7e4b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/cli.py
@@ -0,0 +1,16 @@
+from __future__ import annotations
+
+import typing as t
+
+from flask import current_app
+
+
+def add_models_to_shell() -> dict[str, t.Any]:
+ """Registered with :meth:`~flask.Flask.shell_context_processor` if
+ ``add_models_to_shell`` is enabled. Adds the ``db`` instance and all model classes
+ to ``flask shell``.
+ """
+ db = current_app.extensions["sqlalchemy"]
+ out = {m.class_.__name__: m.class_ for m in db.Model._sa_registry.mappers}
+ out["db"] = db
+ return out
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/extension.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/extension.py
new file mode 100644
index 0000000..43e1b9a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/extension.py
@@ -0,0 +1,1008 @@
+from __future__ import annotations
+
+import os
+import types
+import typing as t
+import warnings
+from weakref import WeakKeyDictionary
+
+import sqlalchemy as sa
+import sqlalchemy.event as sa_event
+import sqlalchemy.exc as sa_exc
+import sqlalchemy.orm as sa_orm
+from flask import abort
+from flask import current_app
+from flask import Flask
+from flask import has_app_context
+
+from .model import _QueryProperty
+from .model import BindMixin
+from .model import DefaultMeta
+from .model import DefaultMetaNoName
+from .model import Model
+from .model import NameMixin
+from .pagination import Pagination
+from .pagination import SelectPagination
+from .query import Query
+from .session import _app_ctx_id
+from .session import Session
+from .table import _Table
+
+_O = t.TypeVar("_O", bound=object) # Based on sqlalchemy.orm._typing.py
+
+
+# Type accepted for model_class argument
+_FSA_MCT = t.TypeVar(
+ "_FSA_MCT",
+ bound=t.Union[
+ t.Type[Model],
+ sa_orm.DeclarativeMeta,
+ t.Type[sa_orm.DeclarativeBase],
+ t.Type[sa_orm.DeclarativeBaseNoMeta],
+ ],
+)
+
+
+# Type returned by make_declarative_base
+class _FSAModel(Model):
+ metadata: sa.MetaData
+
+
+def _get_2x_declarative_bases(
+ model_class: _FSA_MCT,
+) -> list[t.Type[t.Union[sa_orm.DeclarativeBase, sa_orm.DeclarativeBaseNoMeta]]]:
+ return [
+ b
+ for b in model_class.__bases__
+ if issubclass(b, (sa_orm.DeclarativeBase, sa_orm.DeclarativeBaseNoMeta))
+ ]
+
+
+class SQLAlchemy:
+ """Integrates SQLAlchemy with Flask. This handles setting up one or more engines,
+ associating tables and models with specific engines, and cleaning up connections and
+ sessions after each request.
+
+ Only the engine configuration is specific to each application, other things like
+ the model, table, metadata, and session are shared for all applications using that
+ extension instance. Call :meth:`init_app` to configure the extension on an
+ application.
+
+ After creating the extension, create model classes by subclassing :attr:`Model`, and
+ table classes with :attr:`Table`. These can be accessed before :meth:`init_app` is
+ called, making it possible to define the models separately from the application.
+
+ Accessing :attr:`session` and :attr:`engine` requires an active Flask application
+ context. This includes methods like :meth:`create_all` which use the engine.
+
+ This class also provides access to names in SQLAlchemy's ``sqlalchemy`` and
+ ``sqlalchemy.orm`` modules. For example, you can use ``db.Column`` and
+ ``db.relationship`` instead of importing ``sqlalchemy.Column`` and
+ ``sqlalchemy.orm.relationship``. This can be convenient when defining models.
+
+ :param app: Call :meth:`init_app` on this Flask application now.
+ :param metadata: Use this as the default :class:`sqlalchemy.schema.MetaData`. Useful
+ for setting a naming convention.
+ :param session_options: Arguments used by :attr:`session` to create each session
+ instance. A ``scopefunc`` key will be passed to the scoped session, not the
+ session instance. See :class:`sqlalchemy.orm.sessionmaker` for a list of
+ arguments.
+ :param query_class: Use this as the default query class for models and dynamic
+ relationships. The query interface is considered legacy in SQLAlchemy.
+ :param model_class: Use this as the model base class when creating the declarative
+ model class :attr:`Model`. Can also be a fully created declarative model class
+ for further customization.
+ :param engine_options: Default arguments used when creating every engine. These are
+ lower precedence than application config. See :func:`sqlalchemy.create_engine`
+ for a list of arguments.
+ :param add_models_to_shell: Add the ``db`` instance and all model classes to
+ ``flask shell``.
+
+ .. versionchanged:: 3.1.0
+ The ``metadata`` parameter can still be used with SQLAlchemy 1.x classes,
+ but is ignored when using SQLAlchemy 2.x style of declarative classes.
+ Instead, specify metadata on your Base class.
+
+ .. versionchanged:: 3.1.0
+ Added the ``disable_autonaming`` parameter.
+
+ .. versionchanged:: 3.1.0
+ Changed ``model_class`` parameter to accepta SQLAlchemy 2.x
+ declarative base subclass.
+
+ .. versionchanged:: 3.0
+ An active Flask application context is always required to access ``session`` and
+ ``engine``.
+
+ .. versionchanged:: 3.0
+ Separate ``metadata`` are used for each bind key.
+
+ .. versionchanged:: 3.0
+ The ``engine_options`` parameter is applied as defaults before per-engine
+ configuration.
+
+ .. versionchanged:: 3.0
+ The session class can be customized in ``session_options``.
+
+ .. versionchanged:: 3.0
+ Added the ``add_models_to_shell`` parameter.
+
+ .. versionchanged:: 3.0
+ Engines are created when calling ``init_app`` rather than the first time they
+ are accessed.
+
+ .. versionchanged:: 3.0
+ All parameters except ``app`` are keyword-only.
+
+ .. versionchanged:: 3.0
+ The extension instance is stored directly as ``app.extensions["sqlalchemy"]``.
+
+ .. versionchanged:: 3.0
+ Setup methods are renamed with a leading underscore. They are considered
+ internal interfaces which may change at any time.
+
+ .. versionchanged:: 3.0
+ Removed the ``use_native_unicode`` parameter and config.
+
+ .. versionchanged:: 2.4
+ Added the ``engine_options`` parameter.
+
+ .. versionchanged:: 2.1
+ Added the ``metadata``, ``query_class``, and ``model_class`` parameters.
+
+ .. versionchanged:: 2.1
+ Use the same query class across ``session``, ``Model.query`` and
+ ``Query``.
+
+ .. versionchanged:: 0.16
+ ``scopefunc`` is accepted in ``session_options``.
+
+ .. versionchanged:: 0.10
+ Added the ``session_options`` parameter.
+ """
+
+ def __init__(
+ self,
+ app: Flask | None = None,
+ *,
+ metadata: sa.MetaData | None = None,
+ session_options: dict[str, t.Any] | None = None,
+ query_class: type[Query] = Query,
+ model_class: _FSA_MCT = Model, # type: ignore[assignment]
+ engine_options: dict[str, t.Any] | None = None,
+ add_models_to_shell: bool = True,
+ disable_autonaming: bool = False,
+ ):
+ if session_options is None:
+ session_options = {}
+
+ self.Query = query_class
+ """The default query class used by ``Model.query`` and ``lazy="dynamic"``
+ relationships.
+
+ .. warning::
+ The query interface is considered legacy in SQLAlchemy.
+
+ Customize this by passing the ``query_class`` parameter to the extension.
+ """
+
+ self.session = self._make_scoped_session(session_options)
+ """A :class:`sqlalchemy.orm.scoping.scoped_session` that creates instances of
+ :class:`.Session` scoped to the current Flask application context. The session
+ will be removed, returning the engine connection to the pool, when the
+ application context exits.
+
+ Customize this by passing ``session_options`` to the extension.
+
+ This requires that a Flask application context is active.
+
+ .. versionchanged:: 3.0
+ The session is scoped to the current app context.
+ """
+
+ self.metadatas: dict[str | None, sa.MetaData] = {}
+ """Map of bind keys to :class:`sqlalchemy.schema.MetaData` instances. The
+ ``None`` key refers to the default metadata, and is available as
+ :attr:`metadata`.
+
+ Customize the default metadata by passing the ``metadata`` parameter to the
+ extension. This can be used to set a naming convention. When metadata for
+ another bind key is created, it copies the default's naming convention.
+
+ .. versionadded:: 3.0
+ """
+
+ if metadata is not None:
+ if len(_get_2x_declarative_bases(model_class)) > 0:
+ warnings.warn(
+ "When using SQLAlchemy 2.x style of declarative classes,"
+ " the `metadata` should be an attribute of the base class."
+ "The metadata passed into SQLAlchemy() is ignored.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ else:
+ metadata.info["bind_key"] = None
+ self.metadatas[None] = metadata
+
+ self.Table = self._make_table_class()
+ """A :class:`sqlalchemy.schema.Table` class that chooses a metadata
+ automatically.
+
+ Unlike the base ``Table``, the ``metadata`` argument is not required. If it is
+ not given, it is selected based on the ``bind_key`` argument.
+
+ :param bind_key: Used to select a different metadata.
+ :param args: Arguments passed to the base class. These are typically the table's
+ name, columns, and constraints.
+ :param kwargs: Arguments passed to the base class.
+
+ .. versionchanged:: 3.0
+ This is a subclass of SQLAlchemy's ``Table`` rather than a function.
+ """
+
+ self.Model = self._make_declarative_base(
+ model_class, disable_autonaming=disable_autonaming
+ )
+ """A SQLAlchemy declarative model class. Subclass this to define database
+ models.
+
+ If a model does not set ``__tablename__``, it will be generated by converting
+ the class name from ``CamelCase`` to ``snake_case``. It will not be generated
+ if the model looks like it uses single-table inheritance.
+
+ If a model or parent class sets ``__bind_key__``, it will use that metadata and
+ database engine. Otherwise, it will use the default :attr:`metadata` and
+ :attr:`engine`. This is ignored if the model sets ``metadata`` or ``__table__``.
+
+ For code using the SQLAlchemy 1.x API, customize this model by subclassing
+ :class:`.Model` and passing the ``model_class`` parameter to the extension.
+ A fully created declarative model class can be
+ passed as well, to use a custom metaclass.
+
+ For code using the SQLAlchemy 2.x API, customize this model by subclassing
+ :class:`sqlalchemy.orm.DeclarativeBase` or
+ :class:`sqlalchemy.orm.DeclarativeBaseNoMeta`
+ and passing the ``model_class`` parameter to the extension.
+ """
+
+ if engine_options is None:
+ engine_options = {}
+
+ self._engine_options = engine_options
+ self._app_engines: WeakKeyDictionary[Flask, dict[str | None, sa.engine.Engine]]
+ self._app_engines = WeakKeyDictionary()
+ self._add_models_to_shell = add_models_to_shell
+
+ if app is not None:
+ self.init_app(app)
+
+ def __repr__(self) -> str:
+ if not has_app_context():
+ return f"<{type(self).__name__}>"
+
+ message = f"{type(self).__name__} {self.engine.url}"
+
+ if len(self.engines) > 1:
+ message = f"{message} +{len(self.engines) - 1}"
+
+ return f"<{message}>"
+
+ def init_app(self, app: Flask) -> None:
+ """Initialize a Flask application for use with this extension instance. This
+ must be called before accessing the database engine or session with the app.
+
+ This sets default configuration values, then configures the extension on the
+ application and creates the engines for each bind key. Therefore, this must be
+ called after the application has been configured. Changes to application config
+ after this call will not be reflected.
+
+ The following keys from ``app.config`` are used:
+
+ - :data:`.SQLALCHEMY_DATABASE_URI`
+ - :data:`.SQLALCHEMY_ENGINE_OPTIONS`
+ - :data:`.SQLALCHEMY_ECHO`
+ - :data:`.SQLALCHEMY_BINDS`
+ - :data:`.SQLALCHEMY_RECORD_QUERIES`
+ - :data:`.SQLALCHEMY_TRACK_MODIFICATIONS`
+
+ :param app: The Flask application to initialize.
+ """
+ if "sqlalchemy" in app.extensions:
+ raise RuntimeError(
+ "A 'SQLAlchemy' instance has already been registered on this Flask app."
+ " Import and use that instance instead."
+ )
+
+ app.extensions["sqlalchemy"] = self
+ app.teardown_appcontext(self._teardown_session)
+
+ if self._add_models_to_shell:
+ from .cli import add_models_to_shell
+
+ app.shell_context_processor(add_models_to_shell)
+
+ basic_uri: str | sa.engine.URL | None = app.config.setdefault(
+ "SQLALCHEMY_DATABASE_URI", None
+ )
+ basic_engine_options = self._engine_options.copy()
+ basic_engine_options.update(
+ app.config.setdefault("SQLALCHEMY_ENGINE_OPTIONS", {})
+ )
+ echo: bool = app.config.setdefault("SQLALCHEMY_ECHO", False)
+ config_binds: dict[
+ str | None, str | sa.engine.URL | dict[str, t.Any]
+ ] = app.config.setdefault("SQLALCHEMY_BINDS", {})
+ engine_options: dict[str | None, dict[str, t.Any]] = {}
+
+ # Build the engine config for each bind key.
+ for key, value in config_binds.items():
+ engine_options[key] = self._engine_options.copy()
+
+ if isinstance(value, (str, sa.engine.URL)):
+ engine_options[key]["url"] = value
+ else:
+ engine_options[key].update(value)
+
+ # Build the engine config for the default bind key.
+ if basic_uri is not None:
+ basic_engine_options["url"] = basic_uri
+
+ if "url" in basic_engine_options:
+ engine_options.setdefault(None, {}).update(basic_engine_options)
+
+ if not engine_options:
+ raise RuntimeError(
+ "Either 'SQLALCHEMY_DATABASE_URI' or 'SQLALCHEMY_BINDS' must be set."
+ )
+
+ engines = self._app_engines.setdefault(app, {})
+
+ # Dispose existing engines in case init_app is called again.
+ if engines:
+ for engine in engines.values():
+ engine.dispose()
+
+ engines.clear()
+
+ # Create the metadata and engine for each bind key.
+ for key, options in engine_options.items():
+ self._make_metadata(key)
+ options.setdefault("echo", echo)
+ options.setdefault("echo_pool", echo)
+ self._apply_driver_defaults(options, app)
+ engines[key] = self._make_engine(key, options, app)
+
+ if app.config.setdefault("SQLALCHEMY_RECORD_QUERIES", False):
+ from . import record_queries
+
+ for engine in engines.values():
+ record_queries._listen(engine)
+
+ if app.config.setdefault("SQLALCHEMY_TRACK_MODIFICATIONS", False):
+ from . import track_modifications
+
+ track_modifications._listen(self.session)
+
+ def _make_scoped_session(
+ self, options: dict[str, t.Any]
+ ) -> sa_orm.scoped_session[Session]:
+ """Create a :class:`sqlalchemy.orm.scoping.scoped_session` around the factory
+ from :meth:`_make_session_factory`. The result is available as :attr:`session`.
+
+ The scope function can be customized using the ``scopefunc`` key in the
+ ``session_options`` parameter to the extension. By default it uses the current
+ thread or greenlet id.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ :param options: The ``session_options`` parameter from ``__init__``. Keyword
+ arguments passed to the session factory. A ``scopefunc`` key is popped.
+
+ .. versionchanged:: 3.0
+ The session is scoped to the current app context.
+
+ .. versionchanged:: 3.0
+ Renamed from ``create_scoped_session``, this method is internal.
+ """
+ scope = options.pop("scopefunc", _app_ctx_id)
+ factory = self._make_session_factory(options)
+ return sa_orm.scoped_session(factory, scope)
+
+ def _make_session_factory(
+ self, options: dict[str, t.Any]
+ ) -> sa_orm.sessionmaker[Session]:
+ """Create the SQLAlchemy :class:`sqlalchemy.orm.sessionmaker` used by
+ :meth:`_make_scoped_session`.
+
+ To customize, pass the ``session_options`` parameter to :class:`SQLAlchemy`. To
+ customize the session class, subclass :class:`.Session` and pass it as the
+ ``class_`` key.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ :param options: The ``session_options`` parameter from ``__init__``. Keyword
+ arguments passed to the session factory.
+
+ .. versionchanged:: 3.0
+ The session class can be customized.
+
+ .. versionchanged:: 3.0
+ Renamed from ``create_session``, this method is internal.
+ """
+ options.setdefault("class_", Session)
+ options.setdefault("query_cls", self.Query)
+ return sa_orm.sessionmaker(db=self, **options)
+
+ def _teardown_session(self, exc: BaseException | None) -> None:
+ """Remove the current session at the end of the request.
+
+ :meta private:
+
+ .. versionadded:: 3.0
+ """
+ self.session.remove()
+
+ def _make_metadata(self, bind_key: str | None) -> sa.MetaData:
+ """Get or create a :class:`sqlalchemy.schema.MetaData` for the given bind key.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ :param bind_key: The name of the metadata being created.
+
+ .. versionadded:: 3.0
+ """
+ if bind_key in self.metadatas:
+ return self.metadatas[bind_key]
+
+ if bind_key is not None:
+ # Copy the naming convention from the default metadata.
+ naming_convention = self._make_metadata(None).naming_convention
+ else:
+ naming_convention = None
+
+ # Set the bind key in info to be used by session.get_bind.
+ metadata = sa.MetaData(
+ naming_convention=naming_convention, info={"bind_key": bind_key}
+ )
+ self.metadatas[bind_key] = metadata
+ return metadata
+
+ def _make_table_class(self) -> type[_Table]:
+ """Create a SQLAlchemy :class:`sqlalchemy.schema.Table` class that chooses a
+ metadata automatically based on the ``bind_key``. The result is available as
+ :attr:`Table`.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ .. versionadded:: 3.0
+ """
+
+ class Table(_Table):
+ def __new__(
+ cls, *args: t.Any, bind_key: str | None = None, **kwargs: t.Any
+ ) -> Table:
+ # If a metadata arg is passed, go directly to the base Table. Also do
+ # this for no args so the correct error is shown.
+ if not args or (len(args) >= 2 and isinstance(args[1], sa.MetaData)):
+ return super().__new__(cls, *args, **kwargs)
+
+ metadata = self._make_metadata(bind_key)
+ return super().__new__(cls, *[args[0], metadata, *args[1:]], **kwargs)
+
+ return Table
+
+ def _make_declarative_base(
+ self,
+ model_class: _FSA_MCT,
+ disable_autonaming: bool = False,
+ ) -> t.Type[_FSAModel]:
+ """Create a SQLAlchemy declarative model class. The result is available as
+ :attr:`Model`.
+
+ To customize, subclass :class:`.Model` and pass it as ``model_class`` to
+ :class:`SQLAlchemy`. To customize at the metaclass level, pass an already
+ created declarative model class as ``model_class``.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ :param model_class: A model base class, or an already created declarative model
+ class.
+
+ :param disable_autonaming: Turns off automatic tablename generation in models.
+
+ .. versionchanged:: 3.1.0
+ Added support for passing SQLAlchemy 2.x base class as model class.
+ Added optional ``disable_autonaming`` parameter.
+
+ .. versionchanged:: 3.0
+ Renamed with a leading underscore, this method is internal.
+
+ .. versionchanged:: 2.3
+ ``model`` can be an already created declarative model class.
+ """
+ model: t.Type[_FSAModel]
+ declarative_bases = _get_2x_declarative_bases(model_class)
+ if len(declarative_bases) > 1:
+ # raise error if more than one declarative base is found
+ raise ValueError(
+ "Only one declarative base can be passed to SQLAlchemy."
+ " Got: {}".format(model_class.__bases__)
+ )
+ elif len(declarative_bases) == 1:
+ body = dict(model_class.__dict__)
+ body["__fsa__"] = self
+ mixin_classes = [BindMixin, NameMixin, Model]
+ if disable_autonaming:
+ mixin_classes.remove(NameMixin)
+ model = types.new_class(
+ "FlaskSQLAlchemyBase",
+ (*mixin_classes, *model_class.__bases__),
+ {"metaclass": type(declarative_bases[0])},
+ lambda ns: ns.update(body),
+ )
+ elif not isinstance(model_class, sa_orm.DeclarativeMeta):
+ metadata = self._make_metadata(None)
+ metaclass = DefaultMetaNoName if disable_autonaming else DefaultMeta
+ model = sa_orm.declarative_base(
+ metadata=metadata, cls=model_class, name="Model", metaclass=metaclass
+ )
+ else:
+ model = model_class # type: ignore[assignment]
+
+ if None not in self.metadatas:
+ # Use the model's metadata as the default metadata.
+ model.metadata.info["bind_key"] = None
+ self.metadatas[None] = model.metadata
+ else:
+ # Use the passed in default metadata as the model's metadata.
+ model.metadata = self.metadatas[None]
+
+ model.query_class = self.Query
+ model.query = _QueryProperty() # type: ignore[assignment]
+ model.__fsa__ = self
+ return model
+
+ def _apply_driver_defaults(self, options: dict[str, t.Any], app: Flask) -> None:
+ """Apply driver-specific configuration to an engine.
+
+ SQLite in-memory databases use ``StaticPool`` and disable ``check_same_thread``.
+ File paths are relative to the app's :attr:`~flask.Flask.instance_path`,
+ which is created if it doesn't exist.
+
+ MySQL sets ``charset="utf8mb4"``, and ``pool_timeout`` defaults to 2 hours.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ :param options: Arguments passed to the engine.
+ :param app: The application that the engine configuration belongs to.
+
+ .. versionchanged:: 3.0
+ SQLite paths are relative to ``app.instance_path``. It does not use
+ ``NullPool`` if ``pool_size`` is 0. Driver-level URIs are supported.
+
+ .. versionchanged:: 3.0
+ MySQL sets ``charset="utf8mb4". It does not set ``pool_size`` to 10. It
+ does not set ``pool_recycle`` if not using a queue pool.
+
+ .. versionchanged:: 3.0
+ Renamed from ``apply_driver_hacks``, this method is internal. It does not
+ return anything.
+
+ .. versionchanged:: 2.5
+ Returns ``(sa_url, options)``.
+ """
+ url = sa.engine.make_url(options["url"])
+
+ if url.drivername in {"sqlite", "sqlite+pysqlite"}:
+ if url.database is None or url.database in {"", ":memory:"}:
+ options["poolclass"] = sa.pool.StaticPool
+
+ if "connect_args" not in options:
+ options["connect_args"] = {}
+
+ options["connect_args"]["check_same_thread"] = False
+ else:
+ # the url might look like sqlite:///file:path?uri=true
+ is_uri = url.query.get("uri", False)
+
+ if is_uri:
+ db_str = url.database[5:]
+ else:
+ db_str = url.database
+
+ if not os.path.isabs(db_str):
+ os.makedirs(app.instance_path, exist_ok=True)
+ db_str = os.path.join(app.instance_path, db_str)
+
+ if is_uri:
+ db_str = f"file:{db_str}"
+
+ options["url"] = url.set(database=db_str)
+ elif url.drivername.startswith("mysql"):
+ # set queue defaults only when using queue pool
+ if (
+ "pool_class" not in options
+ or options["pool_class"] is sa.pool.QueuePool
+ ):
+ options.setdefault("pool_recycle", 7200)
+
+ if "charset" not in url.query:
+ options["url"] = url.update_query_dict({"charset": "utf8mb4"})
+
+ def _make_engine(
+ self, bind_key: str | None, options: dict[str, t.Any], app: Flask
+ ) -> sa.engine.Engine:
+ """Create the :class:`sqlalchemy.engine.Engine` for the given bind key and app.
+
+ To customize, use :data:`.SQLALCHEMY_ENGINE_OPTIONS` or
+ :data:`.SQLALCHEMY_BINDS` config. Pass ``engine_options`` to :class:`SQLAlchemy`
+ to set defaults for all engines.
+
+ This method is used for internal setup. Its signature may change at any time.
+
+ :meta private:
+
+ :param bind_key: The name of the engine being created.
+ :param options: Arguments passed to the engine.
+ :param app: The application that the engine configuration belongs to.
+
+ .. versionchanged:: 3.0
+ Renamed from ``create_engine``, this method is internal.
+ """
+ return sa.engine_from_config(options, prefix="")
+
+ @property
+ def metadata(self) -> sa.MetaData:
+ """The default metadata used by :attr:`Model` and :attr:`Table` if no bind key
+ is set.
+ """
+ return self.metadatas[None]
+
+ @property
+ def engines(self) -> t.Mapping[str | None, sa.engine.Engine]:
+ """Map of bind keys to :class:`sqlalchemy.engine.Engine` instances for current
+ application. The ``None`` key refers to the default engine, and is available as
+ :attr:`engine`.
+
+ To customize, set the :data:`.SQLALCHEMY_BINDS` config, and set defaults by
+ passing the ``engine_options`` parameter to the extension.
+
+ This requires that a Flask application context is active.
+
+ .. versionadded:: 3.0
+ """
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+
+ if app not in self._app_engines:
+ raise RuntimeError(
+ "The current Flask app is not registered with this 'SQLAlchemy'"
+ " instance. Did you forget to call 'init_app', or did you create"
+ " multiple 'SQLAlchemy' instances?"
+ )
+
+ return self._app_engines[app]
+
+ @property
+ def engine(self) -> sa.engine.Engine:
+ """The default :class:`~sqlalchemy.engine.Engine` for the current application,
+ used by :attr:`session` if the :attr:`Model` or :attr:`Table` being queried does
+ not set a bind key.
+
+ To customize, set the :data:`.SQLALCHEMY_ENGINE_OPTIONS` config, and set
+ defaults by passing the ``engine_options`` parameter to the extension.
+
+ This requires that a Flask application context is active.
+ """
+ return self.engines[None]
+
+ def get_engine(
+ self, bind_key: str | None = None, **kwargs: t.Any
+ ) -> sa.engine.Engine:
+ """Get the engine for the given bind key for the current application.
+ This requires that a Flask application context is active.
+
+ :param bind_key: The name of the engine.
+
+ .. deprecated:: 3.0
+ Will be removed in Flask-SQLAlchemy 3.2. Use ``engines[key]`` instead.
+
+ .. versionchanged:: 3.0
+ Renamed the ``bind`` parameter to ``bind_key``. Removed the ``app``
+ parameter.
+ """
+ warnings.warn(
+ "'get_engine' is deprecated and will be removed in Flask-SQLAlchemy"
+ " 3.2. Use 'engine' or 'engines[key]' instead. If you're using"
+ " Flask-Migrate or Alembic, you'll need to update your 'env.py' file.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ if "bind" in kwargs:
+ bind_key = kwargs.pop("bind")
+
+ return self.engines[bind_key]
+
+ def get_or_404(
+ self,
+ entity: type[_O],
+ ident: t.Any,
+ *,
+ description: str | None = None,
+ **kwargs: t.Any,
+ ) -> _O:
+ """Like :meth:`session.get() ` but aborts with a
+ ``404 Not Found`` error instead of returning ``None``.
+
+ :param entity: The model class to query.
+ :param ident: The primary key to query.
+ :param description: A custom message to show on the error page.
+ :param kwargs: Extra arguments passed to ``session.get()``.
+
+ .. versionchanged:: 3.1
+ Pass extra keyword arguments to ``session.get()``.
+
+ .. versionadded:: 3.0
+ """
+ value = self.session.get(entity, ident, **kwargs)
+
+ if value is None:
+ abort(404, description=description)
+
+ return value
+
+ def first_or_404(
+ self, statement: sa.sql.Select[t.Any], *, description: str | None = None
+ ) -> t.Any:
+ """Like :meth:`Result.scalar() `, but aborts
+ with a ``404 Not Found`` error instead of returning ``None``.
+
+ :param statement: The ``select`` statement to execute.
+ :param description: A custom message to show on the error page.
+
+ .. versionadded:: 3.0
+ """
+ value = self.session.execute(statement).scalar()
+
+ if value is None:
+ abort(404, description=description)
+
+ return value
+
+ def one_or_404(
+ self, statement: sa.sql.Select[t.Any], *, description: str | None = None
+ ) -> t.Any:
+ """Like :meth:`Result.scalar_one() `,
+ but aborts with a ``404 Not Found`` error instead of raising ``NoResultFound``
+ or ``MultipleResultsFound``.
+
+ :param statement: The ``select`` statement to execute.
+ :param description: A custom message to show on the error page.
+
+ .. versionadded:: 3.0
+ """
+ try:
+ return self.session.execute(statement).scalar_one()
+ except (sa_exc.NoResultFound, sa_exc.MultipleResultsFound):
+ abort(404, description=description)
+
+ def paginate(
+ self,
+ select: sa.sql.Select[t.Any],
+ *,
+ page: int | None = None,
+ per_page: int | None = None,
+ max_per_page: int | None = None,
+ error_out: bool = True,
+ count: bool = True,
+ ) -> Pagination:
+ """Apply an offset and limit to a select statment based on the current page and
+ number of items per page, returning a :class:`.Pagination` object.
+
+ The statement should select a model class, like ``select(User)``. This applies
+ ``unique()`` and ``scalars()`` modifiers to the result, so compound selects will
+ not return the expected results.
+
+ :param select: The ``select`` statement to paginate.
+ :param page: The current page, used to calculate the offset. Defaults to the
+ ``page`` query arg during a request, or 1 otherwise.
+ :param per_page: The maximum number of items on a page, used to calculate the
+ offset and limit. Defaults to the ``per_page`` query arg during a request,
+ or 20 otherwise.
+ :param max_per_page: The maximum allowed value for ``per_page``, to limit a
+ user-provided value. Use ``None`` for no limit. Defaults to 100.
+ :param error_out: Abort with a ``404 Not Found`` error if no items are returned
+ and ``page`` is not 1, or if ``page`` or ``per_page`` is less than 1, or if
+ either are not ints.
+ :param count: Calculate the total number of values by issuing an extra count
+ query. For very complex queries this may be inaccurate or slow, so it can be
+ disabled and set manually if necessary.
+
+ .. versionchanged:: 3.0
+ The ``count`` query is more efficient.
+
+ .. versionadded:: 3.0
+ """
+ return SelectPagination(
+ select=select,
+ session=self.session(),
+ page=page,
+ per_page=per_page,
+ max_per_page=max_per_page,
+ error_out=error_out,
+ count=count,
+ )
+
+ def _call_for_binds(
+ self, bind_key: str | None | list[str | None], op_name: str
+ ) -> None:
+ """Call a method on each metadata.
+
+ :meta private:
+
+ :param bind_key: A bind key or list of keys. Defaults to all binds.
+ :param op_name: The name of the method to call.
+
+ .. versionchanged:: 3.0
+ Renamed from ``_execute_for_all_tables``.
+ """
+ if bind_key == "__all__":
+ keys: list[str | None] = list(self.metadatas)
+ elif bind_key is None or isinstance(bind_key, str):
+ keys = [bind_key]
+ else:
+ keys = bind_key
+
+ for key in keys:
+ try:
+ engine = self.engines[key]
+ except KeyError:
+ message = f"Bind key '{key}' is not in 'SQLALCHEMY_BINDS' config."
+
+ if key is None:
+ message = f"'SQLALCHEMY_DATABASE_URI' config is not set. {message}"
+
+ raise sa_exc.UnboundExecutionError(message) from None
+
+ metadata = self.metadatas[key]
+ getattr(metadata, op_name)(bind=engine)
+
+ def create_all(self, bind_key: str | None | list[str | None] = "__all__") -> None:
+ """Create tables that do not exist in the database by calling
+ ``metadata.create_all()`` for all or some bind keys. This does not
+ update existing tables, use a migration library for that.
+
+ This requires that a Flask application context is active.
+
+ :param bind_key: A bind key or list of keys to create the tables for. Defaults
+ to all binds.
+
+ .. versionchanged:: 3.0
+ Renamed the ``bind`` parameter to ``bind_key``. Removed the ``app``
+ parameter.
+
+ .. versionchanged:: 0.12
+ Added the ``bind`` and ``app`` parameters.
+ """
+ self._call_for_binds(bind_key, "create_all")
+
+ def drop_all(self, bind_key: str | None | list[str | None] = "__all__") -> None:
+ """Drop tables by calling ``metadata.drop_all()`` for all or some bind keys.
+
+ This requires that a Flask application context is active.
+
+ :param bind_key: A bind key or list of keys to drop the tables from. Defaults to
+ all binds.
+
+ .. versionchanged:: 3.0
+ Renamed the ``bind`` parameter to ``bind_key``. Removed the ``app``
+ parameter.
+
+ .. versionchanged:: 0.12
+ Added the ``bind`` and ``app`` parameters.
+ """
+ self._call_for_binds(bind_key, "drop_all")
+
+ def reflect(self, bind_key: str | None | list[str | None] = "__all__") -> None:
+ """Load table definitions from the database by calling ``metadata.reflect()``
+ for all or some bind keys.
+
+ This requires that a Flask application context is active.
+
+ :param bind_key: A bind key or list of keys to reflect the tables from. Defaults
+ to all binds.
+
+ .. versionchanged:: 3.0
+ Renamed the ``bind`` parameter to ``bind_key``. Removed the ``app``
+ parameter.
+
+ .. versionchanged:: 0.12
+ Added the ``bind`` and ``app`` parameters.
+ """
+ self._call_for_binds(bind_key, "reflect")
+
+ def _set_rel_query(self, kwargs: dict[str, t.Any]) -> None:
+ """Apply the extension's :attr:`Query` class as the default for relationships
+ and backrefs.
+
+ :meta private:
+ """
+ kwargs.setdefault("query_class", self.Query)
+
+ if "backref" in kwargs:
+ backref = kwargs["backref"]
+
+ if isinstance(backref, str):
+ backref = (backref, {})
+
+ backref[1].setdefault("query_class", self.Query)
+
+ def relationship(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> sa_orm.RelationshipProperty[t.Any]:
+ """A :func:`sqlalchemy.orm.relationship` that applies this extension's
+ :attr:`Query` class for dynamic relationships and backrefs.
+
+ .. versionchanged:: 3.0
+ The :attr:`Query` class is set on ``backref``.
+ """
+ self._set_rel_query(kwargs)
+ return sa_orm.relationship(*args, **kwargs)
+
+ def dynamic_loader(
+ self, argument: t.Any, **kwargs: t.Any
+ ) -> sa_orm.RelationshipProperty[t.Any]:
+ """A :func:`sqlalchemy.orm.dynamic_loader` that applies this extension's
+ :attr:`Query` class for relationships and backrefs.
+
+ .. versionchanged:: 3.0
+ The :attr:`Query` class is set on ``backref``.
+ """
+ self._set_rel_query(kwargs)
+ return sa_orm.dynamic_loader(argument, **kwargs)
+
+ def _relation(
+ self, *args: t.Any, **kwargs: t.Any
+ ) -> sa_orm.RelationshipProperty[t.Any]:
+ """A :func:`sqlalchemy.orm.relationship` that applies this extension's
+ :attr:`Query` class for dynamic relationships and backrefs.
+
+ SQLAlchemy 2.0 removes this name, use ``relationship`` instead.
+
+ :meta private:
+
+ .. versionchanged:: 3.0
+ The :attr:`Query` class is set on ``backref``.
+ """
+ self._set_rel_query(kwargs)
+ f = sa_orm.relationship
+ return f(*args, **kwargs)
+
+ def __getattr__(self, name: str) -> t.Any:
+ if name == "relation":
+ return self._relation
+
+ if name == "event":
+ return sa_event
+
+ if name.startswith("_"):
+ raise AttributeError(name)
+
+ for mod in (sa, sa_orm):
+ if hasattr(mod, name):
+ return getattr(mod, name)
+
+ raise AttributeError(name)
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/model.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/model.py
new file mode 100644
index 0000000..c6f9e5a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/model.py
@@ -0,0 +1,330 @@
+from __future__ import annotations
+
+import re
+import typing as t
+
+import sqlalchemy as sa
+import sqlalchemy.orm as sa_orm
+
+from .query import Query
+
+if t.TYPE_CHECKING:
+ from .extension import SQLAlchemy
+
+
+class _QueryProperty:
+ """A class property that creates a query object for a model.
+
+ :meta private:
+ """
+
+ def __get__(self, obj: Model | None, cls: type[Model]) -> Query:
+ return cls.query_class(
+ cls, session=cls.__fsa__.session() # type: ignore[arg-type]
+ )
+
+
+class Model:
+ """The base class of the :attr:`.SQLAlchemy.Model` declarative model class.
+
+ To define models, subclass :attr:`db.Model <.SQLAlchemy.Model>`, not this. To
+ customize ``db.Model``, subclass this and pass it as ``model_class`` to
+ :class:`.SQLAlchemy`. To customize ``db.Model`` at the metaclass level, pass an
+ already created declarative model class as ``model_class``.
+ """
+
+ __fsa__: t.ClassVar[SQLAlchemy]
+ """Internal reference to the extension object.
+
+ :meta private:
+ """
+
+ query_class: t.ClassVar[type[Query]] = Query
+ """Query class used by :attr:`query`. Defaults to :attr:`.SQLAlchemy.Query`, which
+ defaults to :class:`.Query`.
+ """
+
+ query: t.ClassVar[Query] = _QueryProperty() # type: ignore[assignment]
+ """A SQLAlchemy query for a model. Equivalent to ``db.session.query(Model)``. Can be
+ customized per-model by overriding :attr:`query_class`.
+
+ .. warning::
+ The query interface is considered legacy in SQLAlchemy. Prefer using
+ ``session.execute(select())`` instead.
+ """
+
+ def __repr__(self) -> str:
+ state = sa.inspect(self)
+ assert state is not None
+
+ if state.transient:
+ pk = f"(transient {id(self)})"
+ elif state.pending:
+ pk = f"(pending {id(self)})"
+ else:
+ pk = ", ".join(map(str, state.identity))
+
+ return f"<{type(self).__name__} {pk}>"
+
+
+class BindMetaMixin(type):
+ """Metaclass mixin that sets a model's ``metadata`` based on its ``__bind_key__``.
+
+ If the model sets ``metadata`` or ``__table__`` directly, ``__bind_key__`` is
+ ignored. If the ``metadata`` is the same as the parent model, it will not be set
+ directly on the child model.
+ """
+
+ __fsa__: SQLAlchemy
+ metadata: sa.MetaData
+
+ def __init__(
+ cls, name: str, bases: tuple[type, ...], d: dict[str, t.Any], **kwargs: t.Any
+ ) -> None:
+ if not ("metadata" in cls.__dict__ or "__table__" in cls.__dict__):
+ bind_key = getattr(cls, "__bind_key__", None)
+ parent_metadata = getattr(cls, "metadata", None)
+ metadata = cls.__fsa__._make_metadata(bind_key)
+
+ if metadata is not parent_metadata:
+ cls.metadata = metadata
+
+ super().__init__(name, bases, d, **kwargs)
+
+
+class BindMixin:
+ """DeclarativeBase mixin to set a model's ``metadata`` based on ``__bind_key__``.
+
+ If no ``__bind_key__`` is specified, the model will use the default metadata
+ provided by ``DeclarativeBase`` or ``DeclarativeBaseNoMeta``.
+ If the model doesn't set ``metadata`` or ``__table__`` directly
+ and does set ``__bind_key__``, the model will use the metadata
+ for the specified bind key.
+ If the ``metadata`` is the same as the parent model, it will not be set
+ directly on the child model.
+
+ .. versionchanged:: 3.1.0
+ """
+
+ __fsa__: SQLAlchemy
+ metadata: sa.MetaData
+
+ @classmethod
+ def __init_subclass__(cls: t.Type[BindMixin], **kwargs: t.Dict[str, t.Any]) -> None:
+ if not ("metadata" in cls.__dict__ or "__table__" in cls.__dict__) and hasattr(
+ cls, "__bind_key__"
+ ):
+ bind_key = getattr(cls, "__bind_key__", None)
+ parent_metadata = getattr(cls, "metadata", None)
+ metadata = cls.__fsa__._make_metadata(bind_key)
+
+ if metadata is not parent_metadata:
+ cls.metadata = metadata
+
+ super().__init_subclass__(**kwargs)
+
+
+class NameMetaMixin(type):
+ """Metaclass mixin that sets a model's ``__tablename__`` by converting the
+ ``CamelCase`` class name to ``snake_case``. A name is set for non-abstract models
+ that do not otherwise define ``__tablename__``. If a model does not define a primary
+ key, it will not generate a name or ``__table__``, for single-table inheritance.
+ """
+
+ metadata: sa.MetaData
+ __tablename__: str
+ __table__: sa.Table
+
+ def __init__(
+ cls, name: str, bases: tuple[type, ...], d: dict[str, t.Any], **kwargs: t.Any
+ ) -> None:
+ if should_set_tablename(cls):
+ cls.__tablename__ = camel_to_snake_case(cls.__name__)
+
+ super().__init__(name, bases, d, **kwargs)
+
+ # __table_cls__ has run. If no table was created, use the parent table.
+ if (
+ "__tablename__" not in cls.__dict__
+ and "__table__" in cls.__dict__
+ and cls.__dict__["__table__"] is None
+ ):
+ del cls.__table__
+
+ def __table_cls__(cls, *args: t.Any, **kwargs: t.Any) -> sa.Table | None:
+ """This is called by SQLAlchemy during mapper setup. It determines the final
+ table object that the model will use.
+
+ If no primary key is found, that indicates single-table inheritance, so no table
+ will be created and ``__tablename__`` will be unset.
+ """
+ schema = kwargs.get("schema")
+
+ if schema is None:
+ key = args[0]
+ else:
+ key = f"{schema}.{args[0]}"
+
+ # Check if a table with this name already exists. Allows reflected tables to be
+ # applied to models by name.
+ if key in cls.metadata.tables:
+ return sa.Table(*args, **kwargs)
+
+ # If a primary key is found, create a table for joined-table inheritance.
+ for arg in args:
+ if (isinstance(arg, sa.Column) and arg.primary_key) or isinstance(
+ arg, sa.PrimaryKeyConstraint
+ ):
+ return sa.Table(*args, **kwargs)
+
+ # If no base classes define a table, return one that's missing a primary key
+ # so SQLAlchemy shows the correct error.
+ for base in cls.__mro__[1:-1]:
+ if "__table__" in base.__dict__:
+ break
+ else:
+ return sa.Table(*args, **kwargs)
+
+ # Single-table inheritance, use the parent table name. __init__ will unset
+ # __table__ based on this.
+ if "__tablename__" in cls.__dict__:
+ del cls.__tablename__
+
+ return None
+
+
+class NameMixin:
+ """DeclarativeBase mixin that sets a model's ``__tablename__`` by converting the
+ ``CamelCase`` class name to ``snake_case``. A name is set for non-abstract models
+ that do not otherwise define ``__tablename__``. If a model does not define a primary
+ key, it will not generate a name or ``__table__``, for single-table inheritance.
+
+ .. versionchanged:: 3.1.0
+ """
+
+ metadata: sa.MetaData
+ __tablename__: str
+ __table__: sa.Table
+
+ @classmethod
+ def __init_subclass__(cls: t.Type[NameMixin], **kwargs: t.Dict[str, t.Any]) -> None:
+ if should_set_tablename(cls):
+ cls.__tablename__ = camel_to_snake_case(cls.__name__)
+
+ super().__init_subclass__(**kwargs)
+
+ # __table_cls__ has run. If no table was created, use the parent table.
+ if (
+ "__tablename__" not in cls.__dict__
+ and "__table__" in cls.__dict__
+ and cls.__dict__["__table__"] is None
+ ):
+ del cls.__table__
+
+ @classmethod
+ def __table_cls__(cls, *args: t.Any, **kwargs: t.Any) -> sa.Table | None:
+ """This is called by SQLAlchemy during mapper setup. It determines the final
+ table object that the model will use.
+
+ If no primary key is found, that indicates single-table inheritance, so no table
+ will be created and ``__tablename__`` will be unset.
+ """
+ schema = kwargs.get("schema")
+
+ if schema is None:
+ key = args[0]
+ else:
+ key = f"{schema}.{args[0]}"
+
+ # Check if a table with this name already exists. Allows reflected tables to be
+ # applied to models by name.
+ if key in cls.metadata.tables:
+ return sa.Table(*args, **kwargs)
+
+ # If a primary key is found, create a table for joined-table inheritance.
+ for arg in args:
+ if (isinstance(arg, sa.Column) and arg.primary_key) or isinstance(
+ arg, sa.PrimaryKeyConstraint
+ ):
+ return sa.Table(*args, **kwargs)
+
+ # If no base classes define a table, return one that's missing a primary key
+ # so SQLAlchemy shows the correct error.
+ for base in cls.__mro__[1:-1]:
+ if "__table__" in base.__dict__:
+ break
+ else:
+ return sa.Table(*args, **kwargs)
+
+ # Single-table inheritance, use the parent table name. __init__ will unset
+ # __table__ based on this.
+ if "__tablename__" in cls.__dict__:
+ del cls.__tablename__
+
+ return None
+
+
+def should_set_tablename(cls: type) -> bool:
+ """Determine whether ``__tablename__`` should be generated for a model.
+
+ - If no class in the MRO sets a name, one should be generated.
+ - If a declared attr is found, it should be used instead.
+ - If a name is found, it should be used if the class is a mixin, otherwise one
+ should be generated.
+ - Abstract models should not have one generated.
+
+ Later, ``__table_cls__`` will determine if the model looks like single or
+ joined-table inheritance. If no primary key is found, the name will be unset.
+ """
+ if (
+ cls.__dict__.get("__abstract__", False)
+ or (
+ not issubclass(cls, (sa_orm.DeclarativeBase, sa_orm.DeclarativeBaseNoMeta))
+ and not any(isinstance(b, sa_orm.DeclarativeMeta) for b in cls.__mro__[1:])
+ )
+ or any(
+ (b is sa_orm.DeclarativeBase or b is sa_orm.DeclarativeBaseNoMeta)
+ for b in cls.__bases__
+ )
+ ):
+ return False
+
+ for base in cls.__mro__:
+ if "__tablename__" not in base.__dict__:
+ continue
+
+ if isinstance(base.__dict__["__tablename__"], sa_orm.declared_attr):
+ return False
+
+ return not (
+ base is cls
+ or base.__dict__.get("__abstract__", False)
+ or not (
+ # SQLAlchemy 1.x
+ isinstance(base, sa_orm.DeclarativeMeta)
+ # 2.x: DeclarativeBas uses this as metaclass
+ or isinstance(base, sa_orm.decl_api.DeclarativeAttributeIntercept)
+ # 2.x: DeclarativeBaseNoMeta doesn't use a metaclass
+ or issubclass(base, sa_orm.DeclarativeBaseNoMeta)
+ )
+ )
+
+ return True
+
+
+def camel_to_snake_case(name: str) -> str:
+ """Convert a ``CamelCase`` name to ``snake_case``."""
+ name = re.sub(r"((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))", r"_\1", name)
+ return name.lower().lstrip("_")
+
+
+class DefaultMeta(BindMetaMixin, NameMetaMixin, sa_orm.DeclarativeMeta):
+ """SQLAlchemy declarative metaclass that provides ``__bind_key__`` and
+ ``__tablename__`` support.
+ """
+
+
+class DefaultMetaNoName(BindMetaMixin, sa_orm.DeclarativeMeta):
+ """SQLAlchemy declarative metaclass that provides ``__bind_key__`` and
+ ``__tablename__`` support.
+ """
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/pagination.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/pagination.py
new file mode 100644
index 0000000..3d49d6e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/pagination.py
@@ -0,0 +1,364 @@
+from __future__ import annotations
+
+import typing as t
+from math import ceil
+
+import sqlalchemy as sa
+import sqlalchemy.orm as sa_orm
+from flask import abort
+from flask import request
+
+
+class Pagination:
+ """Apply an offset and limit to the query based on the current page and number of
+ items per page.
+
+ Don't create pagination objects manually. They are created by
+ :meth:`.SQLAlchemy.paginate` and :meth:`.Query.paginate`.
+
+ This is a base class, a subclass must implement :meth:`_query_items` and
+ :meth:`_query_count`. Those methods will use arguments passed as ``kwargs`` to
+ perform the queries.
+
+ :param page: The current page, used to calculate the offset. Defaults to the
+ ``page`` query arg during a request, or 1 otherwise.
+ :param per_page: The maximum number of items on a page, used to calculate the
+ offset and limit. Defaults to the ``per_page`` query arg during a request,
+ or 20 otherwise.
+ :param max_per_page: The maximum allowed value for ``per_page``, to limit a
+ user-provided value. Use ``None`` for no limit. Defaults to 100.
+ :param error_out: Abort with a ``404 Not Found`` error if no items are returned
+ and ``page`` is not 1, or if ``page`` or ``per_page`` is less than 1, or if
+ either are not ints.
+ :param count: Calculate the total number of values by issuing an extra count
+ query. For very complex queries this may be inaccurate or slow, so it can be
+ disabled and set manually if necessary.
+ :param kwargs: Information about the query to paginate. Different subclasses will
+ require different arguments.
+
+ .. versionchanged:: 3.0
+ Iterating over a pagination object iterates over its items.
+
+ .. versionchanged:: 3.0
+ Creating instances manually is not a public API.
+ """
+
+ def __init__(
+ self,
+ page: int | None = None,
+ per_page: int | None = None,
+ max_per_page: int | None = 100,
+ error_out: bool = True,
+ count: bool = True,
+ **kwargs: t.Any,
+ ) -> None:
+ self._query_args = kwargs
+ page, per_page = self._prepare_page_args(
+ page=page,
+ per_page=per_page,
+ max_per_page=max_per_page,
+ error_out=error_out,
+ )
+
+ self.page: int = page
+ """The current page."""
+
+ self.per_page: int = per_page
+ """The maximum number of items on a page."""
+
+ self.max_per_page: int | None = max_per_page
+ """The maximum allowed value for ``per_page``."""
+
+ items = self._query_items()
+
+ if not items and page != 1 and error_out:
+ abort(404)
+
+ self.items: list[t.Any] = items
+ """The items on the current page. Iterating over the pagination object is
+ equivalent to iterating over the items.
+ """
+
+ if count:
+ total = self._query_count()
+ else:
+ total = None
+
+ self.total: int | None = total
+ """The total number of items across all pages."""
+
+ @staticmethod
+ def _prepare_page_args(
+ *,
+ page: int | None = None,
+ per_page: int | None = None,
+ max_per_page: int | None = None,
+ error_out: bool = True,
+ ) -> tuple[int, int]:
+ if request:
+ if page is None:
+ try:
+ page = int(request.args.get("page", 1))
+ except (TypeError, ValueError):
+ if error_out:
+ abort(404)
+
+ page = 1
+
+ if per_page is None:
+ try:
+ per_page = int(request.args.get("per_page", 20))
+ except (TypeError, ValueError):
+ if error_out:
+ abort(404)
+
+ per_page = 20
+ else:
+ if page is None:
+ page = 1
+
+ if per_page is None:
+ per_page = 20
+
+ if max_per_page is not None:
+ per_page = min(per_page, max_per_page)
+
+ if page < 1:
+ if error_out:
+ abort(404)
+ else:
+ page = 1
+
+ if per_page < 1:
+ if error_out:
+ abort(404)
+ else:
+ per_page = 20
+
+ return page, per_page
+
+ @property
+ def _query_offset(self) -> int:
+ """The index of the first item to query, passed to ``offset()``.
+
+ :meta private:
+
+ .. versionadded:: 3.0
+ """
+ return (self.page - 1) * self.per_page
+
+ def _query_items(self) -> list[t.Any]:
+ """Execute the query to get the items on the current page.
+
+ Uses init arguments stored in :attr:`_query_args`.
+
+ :meta private:
+
+ .. versionadded:: 3.0
+ """
+ raise NotImplementedError
+
+ def _query_count(self) -> int:
+ """Execute the query to get the total number of items.
+
+ Uses init arguments stored in :attr:`_query_args`.
+
+ :meta private:
+
+ .. versionadded:: 3.0
+ """
+ raise NotImplementedError
+
+ @property
+ def first(self) -> int:
+ """The number of the first item on the page, starting from 1, or 0 if there are
+ no items.
+
+ .. versionadded:: 3.0
+ """
+ if len(self.items) == 0:
+ return 0
+
+ return (self.page - 1) * self.per_page + 1
+
+ @property
+ def last(self) -> int:
+ """The number of the last item on the page, starting from 1, inclusive, or 0 if
+ there are no items.
+
+ .. versionadded:: 3.0
+ """
+ first = self.first
+ return max(first, first + len(self.items) - 1)
+
+ @property
+ def pages(self) -> int:
+ """The total number of pages."""
+ if self.total == 0 or self.total is None:
+ return 0
+
+ return ceil(self.total / self.per_page)
+
+ @property
+ def has_prev(self) -> bool:
+ """``True`` if this is not the first page."""
+ return self.page > 1
+
+ @property
+ def prev_num(self) -> int | None:
+ """The previous page number, or ``None`` if this is the first page."""
+ if not self.has_prev:
+ return None
+
+ return self.page - 1
+
+ def prev(self, *, error_out: bool = False) -> Pagination:
+ """Query the :class:`Pagination` object for the previous page.
+
+ :param error_out: Abort with a ``404 Not Found`` error if no items are returned
+ and ``page`` is not 1, or if ``page`` or ``per_page`` is less than 1, or if
+ either are not ints.
+ """
+ p = type(self)(
+ page=self.page - 1,
+ per_page=self.per_page,
+ error_out=error_out,
+ count=False,
+ **self._query_args,
+ )
+ p.total = self.total
+ return p
+
+ @property
+ def has_next(self) -> bool:
+ """``True`` if this is not the last page."""
+ return self.page < self.pages
+
+ @property
+ def next_num(self) -> int | None:
+ """The next page number, or ``None`` if this is the last page."""
+ if not self.has_next:
+ return None
+
+ return self.page + 1
+
+ def next(self, *, error_out: bool = False) -> Pagination:
+ """Query the :class:`Pagination` object for the next page.
+
+ :param error_out: Abort with a ``404 Not Found`` error if no items are returned
+ and ``page`` is not 1, or if ``page`` or ``per_page`` is less than 1, or if
+ either are not ints.
+ """
+ p = type(self)(
+ page=self.page + 1,
+ per_page=self.per_page,
+ max_per_page=self.max_per_page,
+ error_out=error_out,
+ count=False,
+ **self._query_args,
+ )
+ p.total = self.total
+ return p
+
+ def iter_pages(
+ self,
+ *,
+ left_edge: int = 2,
+ left_current: int = 2,
+ right_current: int = 4,
+ right_edge: int = 2,
+ ) -> t.Iterator[int | None]:
+ """Yield page numbers for a pagination widget. Skipped pages between the edges
+ and middle are represented by a ``None``.
+
+ For example, if there are 20 pages and the current page is 7, the following
+ values are yielded.
+
+ .. code-block:: python
+
+ 1, 2, None, 5, 6, 7, 8, 9, 10, 11, None, 19, 20
+
+ :param left_edge: How many pages to show from the first page.
+ :param left_current: How many pages to show left of the current page.
+ :param right_current: How many pages to show right of the current page.
+ :param right_edge: How many pages to show from the last page.
+
+ .. versionchanged:: 3.0
+ Improved efficiency of calculating what to yield.
+
+ .. versionchanged:: 3.0
+ ``right_current`` boundary is inclusive.
+
+ .. versionchanged:: 3.0
+ All parameters are keyword-only.
+ """
+ pages_end = self.pages + 1
+
+ if pages_end == 1:
+ return
+
+ left_end = min(1 + left_edge, pages_end)
+ yield from range(1, left_end)
+
+ if left_end == pages_end:
+ return
+
+ mid_start = max(left_end, self.page - left_current)
+ mid_end = min(self.page + right_current + 1, pages_end)
+
+ if mid_start - left_end > 0:
+ yield None
+
+ yield from range(mid_start, mid_end)
+
+ if mid_end == pages_end:
+ return
+
+ right_start = max(mid_end, pages_end - right_edge)
+
+ if right_start - mid_end > 0:
+ yield None
+
+ yield from range(right_start, pages_end)
+
+ def __iter__(self) -> t.Iterator[t.Any]:
+ yield from self.items
+
+
+class SelectPagination(Pagination):
+ """Returned by :meth:`.SQLAlchemy.paginate`. Takes ``select`` and ``session``
+ arguments in addition to the :class:`Pagination` arguments.
+
+ .. versionadded:: 3.0
+ """
+
+ def _query_items(self) -> list[t.Any]:
+ select = self._query_args["select"]
+ select = select.limit(self.per_page).offset(self._query_offset)
+ session = self._query_args["session"]
+ return list(session.execute(select).unique().scalars())
+
+ def _query_count(self) -> int:
+ select = self._query_args["select"]
+ sub = select.options(sa_orm.lazyload("*")).order_by(None).subquery()
+ session = self._query_args["session"]
+ out = session.execute(sa.select(sa.func.count()).select_from(sub)).scalar()
+ return out # type: ignore[no-any-return]
+
+
+class QueryPagination(Pagination):
+ """Returned by :meth:`.Query.paginate`. Takes a ``query`` argument in addition to
+ the :class:`Pagination` arguments.
+
+ .. versionadded:: 3.0
+ """
+
+ def _query_items(self) -> list[t.Any]:
+ query = self._query_args["query"]
+ out = query.limit(self.per_page).offset(self._query_offset).all()
+ return out # type: ignore[no-any-return]
+
+ def _query_count(self) -> int:
+ # Query.count automatically disables eager loads
+ out = self._query_args["query"].order_by(None).count()
+ return out # type: ignore[no-any-return]
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/py.typed b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/py.typed
new file mode 100644
index 0000000..e69de29
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/query.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/query.py
new file mode 100644
index 0000000..35f927d
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/query.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import typing as t
+
+import sqlalchemy.exc as sa_exc
+import sqlalchemy.orm as sa_orm
+from flask import abort
+
+from .pagination import Pagination
+from .pagination import QueryPagination
+
+
+class Query(sa_orm.Query): # type: ignore[type-arg]
+ """SQLAlchemy :class:`~sqlalchemy.orm.query.Query` subclass with some extra methods
+ useful for querying in a web application.
+
+ This is the default query class for :attr:`.Model.query`.
+
+ .. versionchanged:: 3.0
+ Renamed to ``Query`` from ``BaseQuery``.
+ """
+
+ def get_or_404(self, ident: t.Any, description: str | None = None) -> t.Any:
+ """Like :meth:`~sqlalchemy.orm.Query.get` but aborts with a ``404 Not Found``
+ error instead of returning ``None``.
+
+ :param ident: The primary key to query.
+ :param description: A custom message to show on the error page.
+ """
+ rv = self.get(ident)
+
+ if rv is None:
+ abort(404, description=description)
+
+ return rv
+
+ def first_or_404(self, description: str | None = None) -> t.Any:
+ """Like :meth:`~sqlalchemy.orm.Query.first` but aborts with a ``404 Not Found``
+ error instead of returning ``None``.
+
+ :param description: A custom message to show on the error page.
+ """
+ rv = self.first()
+
+ if rv is None:
+ abort(404, description=description)
+
+ return rv
+
+ def one_or_404(self, description: str | None = None) -> t.Any:
+ """Like :meth:`~sqlalchemy.orm.Query.one` but aborts with a ``404 Not Found``
+ error instead of raising ``NoResultFound`` or ``MultipleResultsFound``.
+
+ :param description: A custom message to show on the error page.
+
+ .. versionadded:: 3.0
+ """
+ try:
+ return self.one()
+ except (sa_exc.NoResultFound, sa_exc.MultipleResultsFound):
+ abort(404, description=description)
+
+ def paginate(
+ self,
+ *,
+ page: int | None = None,
+ per_page: int | None = None,
+ max_per_page: int | None = None,
+ error_out: bool = True,
+ count: bool = True,
+ ) -> Pagination:
+ """Apply an offset and limit to the query based on the current page and number
+ of items per page, returning a :class:`.Pagination` object.
+
+ :param page: The current page, used to calculate the offset. Defaults to the
+ ``page`` query arg during a request, or 1 otherwise.
+ :param per_page: The maximum number of items on a page, used to calculate the
+ offset and limit. Defaults to the ``per_page`` query arg during a request,
+ or 20 otherwise.
+ :param max_per_page: The maximum allowed value for ``per_page``, to limit a
+ user-provided value. Use ``None`` for no limit. Defaults to 100.
+ :param error_out: Abort with a ``404 Not Found`` error if no items are returned
+ and ``page`` is not 1, or if ``page`` or ``per_page`` is less than 1, or if
+ either are not ints.
+ :param count: Calculate the total number of values by issuing an extra count
+ query. For very complex queries this may be inaccurate or slow, so it can be
+ disabled and set manually if necessary.
+
+ .. versionchanged:: 3.0
+ All parameters are keyword-only.
+
+ .. versionchanged:: 3.0
+ The ``count`` query is more efficient.
+
+ .. versionchanged:: 3.0
+ ``max_per_page`` defaults to 100.
+ """
+ return QueryPagination(
+ query=self,
+ page=page,
+ per_page=per_page,
+ max_per_page=max_per_page,
+ error_out=error_out,
+ count=count,
+ )
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/record_queries.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/record_queries.py
new file mode 100644
index 0000000..e8273be
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/record_queries.py
@@ -0,0 +1,117 @@
+from __future__ import annotations
+
+import dataclasses
+import inspect
+import typing as t
+from time import perf_counter
+
+import sqlalchemy as sa
+import sqlalchemy.event as sa_event
+from flask import current_app
+from flask import g
+from flask import has_app_context
+
+
+def get_recorded_queries() -> list[_QueryInfo]:
+ """Get the list of recorded query information for the current session. Queries are
+ recorded if the config :data:`.SQLALCHEMY_RECORD_QUERIES` is enabled.
+
+ Each query info object has the following attributes:
+
+ ``statement``
+ The string of SQL generated by SQLAlchemy with parameter placeholders.
+ ``parameters``
+ The parameters sent with the SQL statement.
+ ``start_time`` / ``end_time``
+ Timing info about when the query started execution and when the results where
+ returned. Accuracy and value depends on the operating system.
+ ``duration``
+ The time the query took in seconds.
+ ``location``
+ A string description of where in your application code the query was executed.
+ This may not be possible to calculate, and the format is not stable.
+
+ .. versionchanged:: 3.0
+ Renamed from ``get_debug_queries``.
+
+ .. versionchanged:: 3.0
+ The info object is a dataclass instead of a tuple.
+
+ .. versionchanged:: 3.0
+ The info object attribute ``context`` is renamed to ``location``.
+
+ .. versionchanged:: 3.0
+ Not enabled automatically in debug or testing mode.
+ """
+ return g.get("_sqlalchemy_queries", []) # type: ignore[no-any-return]
+
+
+@dataclasses.dataclass
+class _QueryInfo:
+ """Information about an executed query. Returned by :func:`get_recorded_queries`.
+
+ .. versionchanged:: 3.0
+ Renamed from ``_DebugQueryTuple``.
+
+ .. versionchanged:: 3.0
+ Changed to a dataclass instead of a tuple.
+
+ .. versionchanged:: 3.0
+ ``context`` is renamed to ``location``.
+ """
+
+ statement: str | None
+ parameters: t.Any
+ start_time: float
+ end_time: float
+ location: str
+
+ @property
+ def duration(self) -> float:
+ return self.end_time - self.start_time
+
+
+def _listen(engine: sa.engine.Engine) -> None:
+ sa_event.listen(engine, "before_cursor_execute", _record_start, named=True)
+ sa_event.listen(engine, "after_cursor_execute", _record_end, named=True)
+
+
+def _record_start(context: sa.engine.ExecutionContext, **kwargs: t.Any) -> None:
+ if not has_app_context():
+ return
+
+ context._fsa_start_time = perf_counter() # type: ignore[attr-defined]
+
+
+def _record_end(context: sa.engine.ExecutionContext, **kwargs: t.Any) -> None:
+ if not has_app_context():
+ return
+
+ if "_sqlalchemy_queries" not in g:
+ g._sqlalchemy_queries = []
+
+ import_top = current_app.import_name.partition(".")[0]
+ import_dot = f"{import_top}."
+ frame = inspect.currentframe()
+
+ while frame:
+ name = frame.f_globals.get("__name__")
+
+ if name and (name == import_top or name.startswith(import_dot)):
+ code = frame.f_code
+ location = f"{code.co_filename}:{frame.f_lineno} ({code.co_name})"
+ break
+
+ frame = frame.f_back
+ else:
+ location = ""
+
+ g._sqlalchemy_queries.append(
+ _QueryInfo(
+ statement=context.statement,
+ parameters=context.parameters,
+ start_time=context._fsa_start_time, # type: ignore[attr-defined]
+ end_time=perf_counter(),
+ location=location,
+ )
+ )
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/session.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/session.py
new file mode 100644
index 0000000..631fffa
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/session.py
@@ -0,0 +1,111 @@
+from __future__ import annotations
+
+import typing as t
+
+import sqlalchemy as sa
+import sqlalchemy.exc as sa_exc
+import sqlalchemy.orm as sa_orm
+from flask.globals import app_ctx
+
+if t.TYPE_CHECKING:
+ from .extension import SQLAlchemy
+
+
+class Session(sa_orm.Session):
+ """A SQLAlchemy :class:`~sqlalchemy.orm.Session` class that chooses what engine to
+ use based on the bind key associated with the metadata associated with the thing
+ being queried.
+
+ To customize ``db.session``, subclass this and pass it as the ``class_`` key in the
+ ``session_options`` to :class:`.SQLAlchemy`.
+
+ .. versionchanged:: 3.0
+ Renamed from ``SignallingSession``.
+ """
+
+ def __init__(self, db: SQLAlchemy, **kwargs: t.Any) -> None:
+ super().__init__(**kwargs)
+ self._db = db
+ self._model_changes: dict[object, tuple[t.Any, str]] = {}
+
+ def get_bind(
+ self,
+ mapper: t.Any | None = None,
+ clause: t.Any | None = None,
+ bind: sa.engine.Engine | sa.engine.Connection | None = None,
+ **kwargs: t.Any,
+ ) -> sa.engine.Engine | sa.engine.Connection:
+ """Select an engine based on the ``bind_key`` of the metadata associated with
+ the model or table being queried. If no bind key is set, uses the default bind.
+
+ .. versionchanged:: 3.0.3
+ Fix finding the bind for a joined inheritance model.
+
+ .. versionchanged:: 3.0
+ The implementation more closely matches the base SQLAlchemy implementation.
+
+ .. versionchanged:: 2.1
+ Support joining an external transaction.
+ """
+ if bind is not None:
+ return bind
+
+ engines = self._db.engines
+
+ if mapper is not None:
+ try:
+ mapper = sa.inspect(mapper)
+ except sa_exc.NoInspectionAvailable as e:
+ if isinstance(mapper, type):
+ raise sa_orm.exc.UnmappedClassError(mapper) from e
+
+ raise
+
+ engine = _clause_to_engine(mapper.local_table, engines)
+
+ if engine is not None:
+ return engine
+
+ if clause is not None:
+ engine = _clause_to_engine(clause, engines)
+
+ if engine is not None:
+ return engine
+
+ if None in engines:
+ return engines[None]
+
+ return super().get_bind(mapper=mapper, clause=clause, bind=bind, **kwargs)
+
+
+def _clause_to_engine(
+ clause: sa.ClauseElement | None,
+ engines: t.Mapping[str | None, sa.engine.Engine],
+) -> sa.engine.Engine | None:
+ """If the clause is a table, return the engine associated with the table's
+ metadata's bind key.
+ """
+ table = None
+
+ if clause is not None:
+ if isinstance(clause, sa.Table):
+ table = clause
+ elif isinstance(clause, sa.UpdateBase) and isinstance(clause.table, sa.Table):
+ table = clause.table
+
+ if table is not None and "bind_key" in table.metadata.info:
+ key = table.metadata.info["bind_key"]
+
+ if key not in engines:
+ raise sa_exc.UnboundExecutionError(
+ f"Bind key '{key}' is not in 'SQLALCHEMY_BINDS' config."
+ )
+
+ return engines[key]
+
+ return None
+
+
+def _app_ctx_id() -> int:
+ """Get the id of the current Flask application context for the session scope."""
+ return id(app_ctx._get_current_object()) # type: ignore[attr-defined]
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/table.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/table.py
new file mode 100644
index 0000000..ab08a69
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/table.py
@@ -0,0 +1,39 @@
+from __future__ import annotations
+
+import typing as t
+
+import sqlalchemy as sa
+import sqlalchemy.sql.schema as sa_sql_schema
+
+
+class _Table(sa.Table):
+ @t.overload
+ def __init__(
+ self,
+ name: str,
+ *args: sa_sql_schema.SchemaItem,
+ bind_key: str | None = None,
+ **kwargs: t.Any,
+ ) -> None:
+ ...
+
+ @t.overload
+ def __init__(
+ self,
+ name: str,
+ metadata: sa.MetaData,
+ *args: sa_sql_schema.SchemaItem,
+ **kwargs: t.Any,
+ ) -> None:
+ ...
+
+ @t.overload
+ def __init__(
+ self, name: str, *args: sa_sql_schema.SchemaItem, **kwargs: t.Any
+ ) -> None:
+ ...
+
+ def __init__(
+ self, name: str, *args: sa_sql_schema.SchemaItem, **kwargs: t.Any
+ ) -> None:
+ super().__init__(name, *args, **kwargs) # type: ignore[arg-type]
diff --git a/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/track_modifications.py b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/track_modifications.py
new file mode 100644
index 0000000..7028b65
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/flask_sqlalchemy/track_modifications.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import typing as t
+
+import sqlalchemy as sa
+import sqlalchemy.event as sa_event
+import sqlalchemy.orm as sa_orm
+from flask import current_app
+from flask import has_app_context
+from flask.signals import Namespace # type: ignore[attr-defined]
+
+if t.TYPE_CHECKING:
+ from .session import Session
+
+_signals = Namespace()
+
+models_committed = _signals.signal("models-committed")
+"""This Blinker signal is sent after the session is committed if there were changed
+models in the session.
+
+The sender is the application that emitted the changes. The receiver is passed the
+``changes`` argument with a list of tuples in the form ``(instance, operation)``.
+The operations are ``"insert"``, ``"update"``, and ``"delete"``.
+"""
+
+before_models_committed = _signals.signal("before-models-committed")
+"""This signal works exactly like :data:`models_committed` but is emitted before the
+commit takes place.
+"""
+
+
+def _listen(session: sa_orm.scoped_session[Session]) -> None:
+ sa_event.listen(session, "before_flush", _record_ops, named=True)
+ sa_event.listen(session, "before_commit", _record_ops, named=True)
+ sa_event.listen(session, "before_commit", _before_commit)
+ sa_event.listen(session, "after_commit", _after_commit)
+ sa_event.listen(session, "after_rollback", _after_rollback)
+
+
+def _record_ops(session: Session, **kwargs: t.Any) -> None:
+ if not has_app_context():
+ return
+
+ if not current_app.config["SQLALCHEMY_TRACK_MODIFICATIONS"]:
+ return
+
+ for targets, operation in (
+ (session.new, "insert"),
+ (session.dirty, "update"),
+ (session.deleted, "delete"),
+ ):
+ for target in targets:
+ state = sa.inspect(target)
+ key = state.identity_key if state.has_identity else id(target)
+ session._model_changes[key] = (target, operation)
+
+
+def _before_commit(session: Session) -> None:
+ if not has_app_context():
+ return
+
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+
+ if not app.config["SQLALCHEMY_TRACK_MODIFICATIONS"]:
+ return
+
+ if session._model_changes:
+ changes = list(session._model_changes.values())
+ before_models_committed.send(app, changes=changes)
+
+
+def _after_commit(session: Session) -> None:
+ if not has_app_context():
+ return
+
+ app = current_app._get_current_object() # type: ignore[attr-defined]
+
+ if not app.config["SQLALCHEMY_TRACK_MODIFICATIONS"]:
+ return
+
+ if session._model_changes:
+ changes = list(session._model_changes.values())
+ models_committed.send(app, changes=changes)
+ session._model_changes.clear()
+
+
+def _after_rollback(session: Session) -> None:
+ session._model_changes.clear()
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/INSTALLER b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/INSTALLER
new file mode 100644
index 0000000..a1b589e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/METADATA b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/METADATA
new file mode 100644
index 0000000..0e3a649
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/METADATA
@@ -0,0 +1,117 @@
+Metadata-Version: 2.4
+Name: greenlet
+Version: 3.2.4
+Summary: Lightweight in-process concurrent programming
+Home-page: https://greenlet.readthedocs.io/
+Author: Alexey Borzenkov
+Author-email: snaury@gmail.com
+Maintainer: Jason Madden
+Maintainer-email: jason@seecoresoftware.com
+License: MIT AND Python-2.0
+Project-URL: Bug Tracker, https://github.com/python-greenlet/greenlet/issues
+Project-URL: Source Code, https://github.com/python-greenlet/greenlet/
+Project-URL: Documentation, https://greenlet.readthedocs.io/
+Project-URL: Changes, https://greenlet.readthedocs.io/en/latest/changes.html
+Keywords: greenlet coroutine concurrency threads cooperative
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Programming Language :: C
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Programming Language :: Python :: 3.13
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Python: >=3.9
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+License-File: LICENSE.PSF
+Provides-Extra: docs
+Requires-Dist: Sphinx; extra == "docs"
+Requires-Dist: furo; extra == "docs"
+Provides-Extra: test
+Requires-Dist: objgraph; extra == "test"
+Requires-Dist: psutil; extra == "test"
+Requires-Dist: setuptools; extra == "test"
+Dynamic: author
+Dynamic: author-email
+Dynamic: classifier
+Dynamic: description
+Dynamic: description-content-type
+Dynamic: home-page
+Dynamic: keywords
+Dynamic: license
+Dynamic: license-file
+Dynamic: maintainer
+Dynamic: maintainer-email
+Dynamic: platform
+Dynamic: project-url
+Dynamic: provides-extra
+Dynamic: requires-python
+Dynamic: summary
+
+.. This file is included into docs/history.rst
+
+
+Greenlets are lightweight coroutines for in-process concurrent
+programming.
+
+The "greenlet" package is a spin-off of `Stackless`_, a version of
+CPython that supports micro-threads called "tasklets". Tasklets run
+pseudo-concurrently (typically in a single or a few OS-level threads)
+and are synchronized with data exchanges on "channels".
+
+A "greenlet", on the other hand, is a still more primitive notion of
+micro-thread with no implicit scheduling; coroutines, in other words.
+This is useful when you want to control exactly when your code runs.
+You can build custom scheduled micro-threads on top of greenlet;
+however, it seems that greenlets are useful on their own as a way to
+make advanced control flow structures. For example, we can recreate
+generators; the difference with Python's own generators is that our
+generators can call nested functions and the nested functions can
+yield values too. (Additionally, you don't need a "yield" keyword. See
+the example in `test_generator.py
+`_).
+
+Greenlets are provided as a C extension module for the regular unmodified
+interpreter.
+
+.. _`Stackless`: http://www.stackless.com
+
+
+Who is using Greenlet?
+======================
+
+There are several libraries that use Greenlet as a more flexible
+alternative to Python's built in coroutine support:
+
+ - `Concurrence`_
+ - `Eventlet`_
+ - `Gevent`_
+
+.. _Concurrence: http://opensource.hyves.org/concurrence/
+.. _Eventlet: http://eventlet.net/
+.. _Gevent: http://www.gevent.org/
+
+Getting Greenlet
+================
+
+The easiest way to get Greenlet is to install it with pip::
+
+ pip install greenlet
+
+
+Source code archives and binary distributions are available on the
+python package index at https://pypi.org/project/greenlet
+
+The source code repository is hosted on github:
+https://github.com/python-greenlet/greenlet
+
+Documentation is available on readthedocs.org:
+https://greenlet.readthedocs.io
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/RECORD b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/RECORD
new file mode 100644
index 0000000..2f1fe6e
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/RECORD
@@ -0,0 +1,121 @@
+../../../include/site/python3.11/greenlet/greenlet.h,sha256=sz5pYRSQqedgOt2AMgxLZdTjO-qcr_JMvgiEJR9IAJ8,4755
+greenlet-3.2.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+greenlet-3.2.4.dist-info/METADATA,sha256=ZwuiD2PER_KIrBSuuQdUPtK-VCLKtfY5RueYGQheX6o,4120
+greenlet-3.2.4.dist-info/RECORD,,
+greenlet-3.2.4.dist-info/WHEEL,sha256=N6PyfvHGx46Sh1ny6KlB0rtGwHkXZAwlLCEEPBiTPn8,152
+greenlet-3.2.4.dist-info/licenses/LICENSE,sha256=dpgx1uXfrywggC-sz_H6-0wgJd2PYlPfpH_K1Z1NCXk,1434
+greenlet-3.2.4.dist-info/licenses/LICENSE.PSF,sha256=5f88I8EQ5JTNfXNsEP2W1GJFe6_soxCEDbZScpjH1Gs,2424
+greenlet-3.2.4.dist-info/top_level.txt,sha256=YSnRsCRoO61JGlP57o8iKL6rdLWDWuiyKD8ekpWUsDc,9
+greenlet/CObjects.cpp,sha256=OPej1bWBgc4sRrTRQ2aFFML9pzDYKlKhlJSjsI0X_eU,3508
+greenlet/PyGreenlet.cpp,sha256=dGal9uux_E0d6yMaZfVYpdD9x1XFVOrp4s_or_D_UEM,24199
+greenlet/PyGreenlet.hpp,sha256=2ZQlOxYNoy7QwD7mppFoOXe_At56NIsJ0eNsE_hoSsw,1463
+greenlet/PyGreenletUnswitchable.cpp,sha256=PQE0fSZa_IOyUM44IESHkJoD2KtGW3dkhkmZSYY3WHs,4375
+greenlet/PyModule.cpp,sha256=J2TH06dGcNEarioS6NbWXkdME8hJY05XVbdqLrfO5w4,8587
+greenlet/TBrokenGreenlet.cpp,sha256=smN26uC7ahAbNYiS10rtWPjCeTG4jevM8siA2sjJiXg,1021
+greenlet/TExceptionState.cpp,sha256=U7Ctw9fBdNraS0d174MoQW7bN-ae209Ta0JuiKpcpVI,1359
+greenlet/TGreenlet.cpp,sha256=IM4cHsv1drEl35d7n8YOA_wR-R7oRvx5XhOJOK2PBB8,25732
+greenlet/TGreenlet.hpp,sha256=DoN795i3vofgll-20GA-ylg3qCNw-nKprLA6r7CK5HY,28522
+greenlet/TGreenletGlobals.cpp,sha256=YyEmDjKf1g32bsL-unIUScFLnnA1fzLWf2gOMd-D0Zw,3264
+greenlet/TMainGreenlet.cpp,sha256=fvgb8HHB-FVTPEKjR1s_ifCZSpp5D5YQByik0CnIABg,3276
+greenlet/TPythonState.cpp,sha256=b12U09sNjQvKG0_agROFHuJkDDa7HDccWaFW55XViQA,15975
+greenlet/TStackState.cpp,sha256=V444I8Jj9DhQz-9leVW_9dtiSRjaE1NMlgDG02Xxq-Y,7381
+greenlet/TThreadState.hpp,sha256=2Jgg7DtGggMYR_x3CLAvAFf1mIdIDtQvSSItcdmX4ZQ,19131
+greenlet/TThreadStateCreator.hpp,sha256=uYTexDWooXSSgUc5uh-Mhm5BQi3-kR6CqpizvNynBFQ,2610
+greenlet/TThreadStateDestroy.cpp,sha256=36yBCAMq3beXTZd-XnFA7DwaHVSOx2vc28-nf0spysU,8169
+greenlet/TUserGreenlet.cpp,sha256=uemg0lwKXtYB0yzmvyYdIIAsKnNkifXM1OJ2OlrFP1A,23553
+greenlet/__init__.py,sha256=vSR8EU6Bi32-0MkAlx--fzCL-Eheh6EqJWa-7B9LTOk,1723
+greenlet/__pycache__/__init__.cpython-311.pyc,,
+greenlet/_greenlet.cpython-311-x86_64-linux-gnu.so,sha256=TkjvWEnGAXpCQgzzry0_iDHyP40sVXMVuRhT4lj8xTM,1365232
+greenlet/greenlet.cpp,sha256=WdItb1yWL9WNsTqJNf0Iw8ZwDHD49pkDP0rIRGBg2pw,10996
+greenlet/greenlet.h,sha256=sz5pYRSQqedgOt2AMgxLZdTjO-qcr_JMvgiEJR9IAJ8,4755
+greenlet/greenlet_allocator.hpp,sha256=eC0S1AQuep1vnVRsag-r83xgfAtbpn0qQZ-oXzQXaso,2607
+greenlet/greenlet_compiler_compat.hpp,sha256=nRxpLN9iNbnLVyFDeVmOwyeeNm6scQrOed1l7JQYMCM,4346
+greenlet/greenlet_cpython_compat.hpp,sha256=kJG6d_yDwwl3bSZOOFqM3ks1UzVIGcwbsTM2s8C6VYE,4149
+greenlet/greenlet_exceptions.hpp,sha256=06Bx81DtVaJTa6RtiMcV141b-XHv4ppEgVItkblcLWY,4503
+greenlet/greenlet_internal.hpp,sha256=Ajc-_09W4xWzm9XfyXHAeQAFUgKGKsnJwYsTCoNy3ns,2709
+greenlet/greenlet_msvc_compat.hpp,sha256=0MyaiyoCE_A6UROXZlMQRxRS17gfyh0d7NUppU3EVFc,2978
+greenlet/greenlet_refs.hpp,sha256=OnbA91yZf3QHH6-eJccvoNDAaN-pQBMMrclFU1Ot3J4,34436
+greenlet/greenlet_slp_switch.hpp,sha256=kM1QHA2iV-gH4cFyN6lfIagHQxvJZjWOVJdIxRE3TlQ,3198
+greenlet/greenlet_thread_support.hpp,sha256=XUJ6ljWjf9OYyuOILiz8e_yHvT3fbaUiHdhiPNQUV4s,867
+greenlet/platform/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+greenlet/platform/__pycache__/__init__.cpython-311.pyc,,
+greenlet/platform/setup_switch_x64_masm.cmd,sha256=ZpClUJeU0ujEPSTWNSepP0W2f9XiYQKA8QKSoVou8EU,143
+greenlet/platform/switch_aarch64_gcc.h,sha256=GKC0yWNXnbK2X--X6aguRCMj2Tg7hDU1Zkl3RljDvC8,4307
+greenlet/platform/switch_alpha_unix.h,sha256=Z-SvF8JQV3oxWT8JRbL9RFu4gRFxPdJ7cviM8YayMmw,671
+greenlet/platform/switch_amd64_unix.h,sha256=EcSFCBlodEBhqhKjcJqY_5Dn_jn7pKpkJlOvp7gFXLI,2748
+greenlet/platform/switch_arm32_gcc.h,sha256=Z3KkHszdgq6uU4YN3BxvKMG2AdDnovwCCNrqGWZ1Lyo,2479
+greenlet/platform/switch_arm32_ios.h,sha256=mm5_R9aXB92hyxzFRwB71M60H6AlvHjrpTrc72Pz3l8,1892
+greenlet/platform/switch_arm64_masm.asm,sha256=4kpTtfy7rfcr8j1CpJLAK21EtZpGDAJXWRU68HEy5A8,1245
+greenlet/platform/switch_arm64_masm.obj,sha256=DmLnIB_icoEHAz1naue_pJPTZgR9ElM7-Nmztr-o9_U,746
+greenlet/platform/switch_arm64_msvc.h,sha256=RqK5MHLmXI3Q-FQ7tm32KWnbDNZKnkJdq8CR89cz640,398
+greenlet/platform/switch_csky_gcc.h,sha256=kDikyiPpewP71KoBZQO_MukDTXTXBiC7x-hF0_2DL0w,1331
+greenlet/platform/switch_loongarch64_linux.h,sha256=7M-Dhc4Q8tRbJCJhalDLwU6S9Mx8MjmN1RbTDgIvQTM,779
+greenlet/platform/switch_m68k_gcc.h,sha256=VSa6NpZhvyyvF-Q58CTIWSpEDo4FKygOyTz00whctlw,928
+greenlet/platform/switch_mips_unix.h,sha256=E0tYsqc5anDY1BhenU1l8DW-nVHC_BElzLgJw3TGtPk,1426
+greenlet/platform/switch_ppc64_aix.h,sha256=_BL0iyRr3ZA5iPlr3uk9SJ5sNRWGYLrXcZ5z-CE9anE,3860
+greenlet/platform/switch_ppc64_linux.h,sha256=0rriT5XyxPb0GqsSSn_bP9iQsnjsPbBmu0yqo5goSyQ,3815
+greenlet/platform/switch_ppc_aix.h,sha256=pHA4slEjUFP3J3SYm1TAlNPhgb2G_PAtax5cO8BEe1A,2941
+greenlet/platform/switch_ppc_linux.h,sha256=YwrlKUzxlXuiKMQqr6MFAV1bPzWnmvk6X1AqJZEpOWU,2759
+greenlet/platform/switch_ppc_macosx.h,sha256=Z6KN_ud0n6nC3ltJrNz2qtvER6vnRAVRNH9mdIDpMxY,2624
+greenlet/platform/switch_ppc_unix.h,sha256=-ZG7MSSPEA5N4qO9PQChtyEJ-Fm6qInhyZm_ZBHTtMg,2652
+greenlet/platform/switch_riscv_unix.h,sha256=606V6ACDf79Fz_WGItnkgbjIJ0pGg_sHmPyDxQYKK58,949
+greenlet/platform/switch_s390_unix.h,sha256=RRlGu957ybmq95qNNY4Qw1mcaoT3eBnW5KbVwu48KX8,2763
+greenlet/platform/switch_sh_gcc.h,sha256=mcRJBTu-2UBf4kZtX601qofwuDuy-Y-hnxJtrcaB7do,901
+greenlet/platform/switch_sparc_sun_gcc.h,sha256=xZish9GsMHBienUbUMsX1-ZZ-as7hs36sVhYIE3ew8Y,2797
+greenlet/platform/switch_x32_unix.h,sha256=nM98PKtzTWc1lcM7TRMUZJzskVdR1C69U1UqZRWX0GE,1509
+greenlet/platform/switch_x64_masm.asm,sha256=nu6n2sWyXuXfpPx40d9YmLfHXUc1sHgeTvX1kUzuvEM,1841
+greenlet/platform/switch_x64_masm.obj,sha256=GNtTNxYdo7idFUYsQv-mrXWgyT5EJ93-9q90lN6svtQ,1078
+greenlet/platform/switch_x64_msvc.h,sha256=LIeasyKo_vHzspdMzMHbosRhrBfKI4BkQOh4qcTHyJw,1805
+greenlet/platform/switch_x86_msvc.h,sha256=TtGOwinbFfnn6clxMNkCz8i6OmgB6kVRrShoF5iT9to,12838
+greenlet/platform/switch_x86_unix.h,sha256=VplW9H0FF0cZHw1DhJdIUs5q6YLS4cwb2nYwjF83R1s,3059
+greenlet/slp_platformselect.h,sha256=hTb3GFdcPUYJTuu1MY93js7MZEax1_e5E-gflpi0RzI,3959
+greenlet/tests/__init__.py,sha256=EtTtQfpRDde0MhsdAM5Cm7LYIfS_HKUIFwquiH4Q7ac,9736
+greenlet/tests/__pycache__/__init__.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_clearing_run_switches.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_cpp_exception.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_initialstub_already_started.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_slp_switch.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_switch_three_greenlets.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_switch_three_greenlets2.cpython-311.pyc,,
+greenlet/tests/__pycache__/fail_switch_two_greenlets.cpython-311.pyc,,
+greenlet/tests/__pycache__/leakcheck.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_contextvars.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_cpp.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_extension_interface.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_gc.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_generator.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_generator_nested.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_greenlet.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_greenlet_trash.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_leaks.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_stack_saved.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_throw.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_tracing.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_version.cpython-311.pyc,,
+greenlet/tests/__pycache__/test_weakref.cpython-311.pyc,,
+greenlet/tests/_test_extension.c,sha256=vkeGA-6oeJcGILsD7oIrT1qZop2GaTOHXiNT7mcSl-0,5773
+greenlet/tests/_test_extension.cpython-311-x86_64-linux-gnu.so,sha256=p118NJ4hObhSNcvKLduspwQExvXHPDAbWVVMU6o3dqs,17256
+greenlet/tests/_test_extension_cpp.cpp,sha256=e0kVnaB8CCaEhE9yHtNyfqTjevsPDKKx-zgxk7PPK48,6565
+greenlet/tests/_test_extension_cpp.cpython-311-x86_64-linux-gnu.so,sha256=oY-c-ycRV67QTFu7qSj83Uf-XU91QUPv7oqQ4Yd3YF0,57920
+greenlet/tests/fail_clearing_run_switches.py,sha256=o433oA_nUCtOPaMEGc8VEhZIKa71imVHXFw7TsXaP8M,1263
+greenlet/tests/fail_cpp_exception.py,sha256=o_ZbipWikok8Bjc-vjiQvcb5FHh2nVW-McGKMLcMzh0,985
+greenlet/tests/fail_initialstub_already_started.py,sha256=txENn5IyzGx2p-XR1XB7qXmC8JX_4mKDEA8kYBXUQKc,1961
+greenlet/tests/fail_slp_switch.py,sha256=rJBZcZfTWR3e2ERQtPAud6YKShiDsP84PmwOJbp4ey0,524
+greenlet/tests/fail_switch_three_greenlets.py,sha256=zSitV7rkNnaoHYVzAGGLnxz-yPtohXJJzaE8ehFDQ0M,956
+greenlet/tests/fail_switch_three_greenlets2.py,sha256=FPJensn2EJxoropl03JSTVP3kgP33k04h6aDWWozrOk,1285
+greenlet/tests/fail_switch_two_greenlets.py,sha256=1CaI8s3504VbbF1vj1uBYuy-zxBHVzHPIAd1LIc8ONg,817
+greenlet/tests/leakcheck.py,sha256=JHgc45bnTyVtn9MiprIlz2ygSXMFtcaCSp2eB9XIhQE,12612
+greenlet/tests/test_contextvars.py,sha256=xutO-qZgKTwKsA9lAqTjIcTBEiQV4RpNKM-vO2_YCVU,10541
+greenlet/tests/test_cpp.py,sha256=hpxhFAdKJTpAVZP8CBGs1ZcrKdscI9BaDZk4btkI5d4,2736
+greenlet/tests/test_extension_interface.py,sha256=eJ3cwLacdK2WbsrC-4DgeyHdwLRcG4zx7rrkRtqSzC4,3829
+greenlet/tests/test_gc.py,sha256=PCOaRpIyjNnNlDogGL3FZU_lrdXuM-pv1rxeE5TP5mc,2923
+greenlet/tests/test_generator.py,sha256=tONXiTf98VGm347o1b-810daPiwdla5cbpFg6QI1R1g,1240
+greenlet/tests/test_generator_nested.py,sha256=7v4HOYrf1XZP39dk5IUMubdZ8yc3ynwZcqj9GUJyMSA,3718
+greenlet/tests/test_greenlet.py,sha256=gSG6hOjKYyRRe5ZzNUpskrUcMnBT3WU4yITTzaZfLH4,47995
+greenlet/tests/test_greenlet_trash.py,sha256=n2dBlQfOoEO1ODatFi8QdhboH3fB86YtqzcYMYOXxbw,7947
+greenlet/tests/test_leaks.py,sha256=OFSE870Zyql85HukfC_XYa2c4gDQBU889RV1AlLum74,18076
+greenlet/tests/test_stack_saved.py,sha256=eyzqNY2VCGuGlxhT_In6TvZ6Okb0AXFZVyBEnK1jDwA,446
+greenlet/tests/test_throw.py,sha256=u2TQ_WvvCd6N6JdXWIxVEcXkKu5fepDlz9dktYdmtng,3712
+greenlet/tests/test_tracing.py,sha256=NFD6Vcww8grBnFQFhCNdswwGetjLeLQ7vL2Qqw3LWBM,8591
+greenlet/tests/test_version.py,sha256=O9DpAITsOFgiRcjd4odQ7ejmwx_N9Q1zQENVcbtFHIc,1339
+greenlet/tests/test_weakref.py,sha256=F8M23btEF87bIbpptLNBORosbQqNZGiYeKMqYjWrsak,883
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/WHEEL b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/WHEEL
new file mode 100644
index 0000000..283ae68
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: setuptools (80.9.0)
+Root-Is-Purelib: false
+Tag: cp311-cp311-manylinux_2_24_x86_64
+Tag: cp311-cp311-manylinux_2_28_x86_64
+
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/licenses/LICENSE b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/licenses/LICENSE
new file mode 100644
index 0000000..b73a4a1
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/licenses/LICENSE
@@ -0,0 +1,30 @@
+The following files are derived from Stackless Python and are subject to the
+same license as Stackless Python:
+
+ src/greenlet/slp_platformselect.h
+ files in src/greenlet/platform/ directory
+
+See LICENSE.PSF and http://www.stackless.com/ for details.
+
+Unless otherwise noted, the files in greenlet have been released under the
+following MIT license:
+
+Copyright (c) Armin Rigo, Christian Tismer and contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/licenses/LICENSE.PSF b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/licenses/LICENSE.PSF
new file mode 100644
index 0000000..d3b509a
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/licenses/LICENSE.PSF
@@ -0,0 +1,47 @@
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011 Python Software Foundation; All Rights Reserved" are retained in Python
+alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/top_level.txt b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/top_level.txt
new file mode 100644
index 0000000..46725be
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet-3.2.4.dist-info/top_level.txt
@@ -0,0 +1 @@
+greenlet
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/CObjects.cpp b/tapdown/lib/python3.11/site-packages/greenlet/CObjects.cpp
new file mode 100644
index 0000000..c135995
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/CObjects.cpp
@@ -0,0 +1,157 @@
+#ifndef COBJECTS_CPP
+#define COBJECTS_CPP
+/*****************************************************************************
+ * C interface
+ *
+ * These are exported using the CObject API
+ */
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wunused-function"
+#endif
+
+#include "greenlet_exceptions.hpp"
+
+#include "greenlet_internal.hpp"
+#include "greenlet_refs.hpp"
+
+
+#include "TThreadStateDestroy.cpp"
+
+#include "PyGreenlet.hpp"
+
+using greenlet::PyErrOccurred;
+using greenlet::Require;
+
+
+
+extern "C" {
+static PyGreenlet*
+PyGreenlet_GetCurrent(void)
+{
+ return GET_THREAD_STATE().state().get_current().relinquish_ownership();
+}
+
+static int
+PyGreenlet_SetParent(PyGreenlet* g, PyGreenlet* nparent)
+{
+ return green_setparent((PyGreenlet*)g, (PyObject*)nparent, NULL);
+}
+
+static PyGreenlet*
+PyGreenlet_New(PyObject* run, PyGreenlet* parent)
+{
+ using greenlet::refs::NewDictReference;
+ // In the past, we didn't use green_new and green_init, but that
+ // was a maintenance issue because we duplicated code. This way is
+ // much safer, but slightly slower. If that's a problem, we could
+ // refactor green_init to separate argument parsing from initialization.
+ OwnedGreenlet g = OwnedGreenlet::consuming(green_new(&PyGreenlet_Type, nullptr, nullptr));
+ if (!g) {
+ return NULL;
+ }
+
+ try {
+ NewDictReference kwargs;
+ if (run) {
+ kwargs.SetItem(mod_globs->str_run, run);
+ }
+ if (parent) {
+ kwargs.SetItem("parent", (PyObject*)parent);
+ }
+
+ Require(green_init(g.borrow(), mod_globs->empty_tuple, kwargs.borrow()));
+ }
+ catch (const PyErrOccurred&) {
+ return nullptr;
+ }
+
+ return g.relinquish_ownership();
+}
+
+static PyObject*
+PyGreenlet_Switch(PyGreenlet* self, PyObject* args, PyObject* kwargs)
+{
+ if (!PyGreenlet_Check(self)) {
+ PyErr_BadArgument();
+ return NULL;
+ }
+
+ if (args == NULL) {
+ args = mod_globs->empty_tuple;
+ }
+
+ if (kwargs == NULL || !PyDict_Check(kwargs)) {
+ kwargs = NULL;
+ }
+
+ return green_switch(self, args, kwargs);
+}
+
+static PyObject*
+PyGreenlet_Throw(PyGreenlet* self, PyObject* typ, PyObject* val, PyObject* tb)
+{
+ if (!PyGreenlet_Check(self)) {
+ PyErr_BadArgument();
+ return nullptr;
+ }
+ try {
+ PyErrPieces err_pieces(typ, val, tb);
+ return internal_green_throw(self, err_pieces).relinquish_ownership();
+ }
+ catch (const PyErrOccurred&) {
+ return nullptr;
+ }
+}
+
+
+
+static int
+Extern_PyGreenlet_MAIN(PyGreenlet* self)
+{
+ if (!PyGreenlet_Check(self)) {
+ PyErr_BadArgument();
+ return -1;
+ }
+ return self->pimpl->main();
+}
+
+static int
+Extern_PyGreenlet_ACTIVE(PyGreenlet* self)
+{
+ if (!PyGreenlet_Check(self)) {
+ PyErr_BadArgument();
+ return -1;
+ }
+ return self->pimpl->active();
+}
+
+static int
+Extern_PyGreenlet_STARTED(PyGreenlet* self)
+{
+ if (!PyGreenlet_Check(self)) {
+ PyErr_BadArgument();
+ return -1;
+ }
+ return self->pimpl->started();
+}
+
+static PyGreenlet*
+Extern_PyGreenlet_GET_PARENT(PyGreenlet* self)
+{
+ if (!PyGreenlet_Check(self)) {
+ PyErr_BadArgument();
+ return NULL;
+ }
+ // This can return NULL even if there is no exception
+ return self->pimpl->parent().acquire();
+}
+} // extern C.
+
+/** End C API ****************************************************************/
+#ifdef __clang__
+# pragma clang diagnostic pop
+#endif
+
+
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/PyGreenlet.cpp b/tapdown/lib/python3.11/site-packages/greenlet/PyGreenlet.cpp
new file mode 100644
index 0000000..6b118a5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/PyGreenlet.cpp
@@ -0,0 +1,751 @@
+/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
+#ifndef PYGREENLET_CPP
+#define PYGREENLET_CPP
+/*****************
+The Python slot functions for TGreenlet.
+ */
+
+
+#define PY_SSIZE_T_CLEAN
+#include
+#include "structmember.h" // PyMemberDef
+
+#include "greenlet_internal.hpp"
+#include "TThreadStateDestroy.cpp"
+#include "TGreenlet.hpp"
+// #include "TUserGreenlet.cpp"
+// #include "TMainGreenlet.cpp"
+// #include "TBrokenGreenlet.cpp"
+
+
+#include "greenlet_refs.hpp"
+#include "greenlet_slp_switch.hpp"
+
+#include "greenlet_thread_support.hpp"
+#include "TGreenlet.hpp"
+
+#include "TGreenletGlobals.cpp"
+#include "TThreadStateDestroy.cpp"
+#include "PyGreenlet.hpp"
+// #include "TGreenlet.cpp"
+
+// #include "TExceptionState.cpp"
+// #include "TPythonState.cpp"
+// #include "TStackState.cpp"
+
+using greenlet::LockGuard;
+using greenlet::LockInitError;
+using greenlet::PyErrOccurred;
+using greenlet::Require;
+
+using greenlet::g_handle_exit;
+using greenlet::single_result;
+
+using greenlet::Greenlet;
+using greenlet::UserGreenlet;
+using greenlet::MainGreenlet;
+using greenlet::BrokenGreenlet;
+using greenlet::ThreadState;
+using greenlet::PythonState;
+
+
+
+static PyGreenlet*
+green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds))
+{
+ PyGreenlet* o =
+ (PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs->empty_tuple, mod_globs->empty_dict);
+ if (o) {
+ // Recall: borrowing or getting the current greenlet
+ // causes the "deleteme list" to get cleared. So constructing a greenlet
+ // can do things like cause other greenlets to get finalized.
+ UserGreenlet* c = new UserGreenlet(o, GET_THREAD_STATE().state().borrow_current());
+ assert(Py_REFCNT(o) == 1);
+ // Also: This looks like a memory leak, but isn't. Constructing the
+ // C++ object assigns it to the pimpl pointer of the Python object (o);
+ // we'll need that later.
+ assert(c == o->pimpl);
+ }
+ return o;
+}
+
+
+// green_init is used in the tp_init slot. So it's important that
+// it can be called directly from CPython. Thus, we don't use
+// BorrowedGreenlet and BorrowedObject --- although in theory
+// these should be binary layout compatible, that may not be
+// guaranteed to be the case (32-bit linux ppc possibly).
+static int
+green_init(PyGreenlet* self, PyObject* args, PyObject* kwargs)
+{
+ PyArgParseParam run;
+ PyArgParseParam nparent;
+ static const char* kwlist[] = {
+ "run",
+ "parent",
+ NULL
+ };
+
+ // recall: The O specifier does NOT increase the reference count.
+ if (!PyArg_ParseTupleAndKeywords(
+ args, kwargs, "|OO:green", (char**)kwlist, &run, &nparent)) {
+ return -1;
+ }
+
+ if (run) {
+ if (green_setrun(self, run, NULL)) {
+ return -1;
+ }
+ }
+ if (nparent && !nparent.is_None()) {
+ return green_setparent(self, nparent, NULL);
+ }
+ return 0;
+}
+
+
+
+static int
+green_traverse(PyGreenlet* self, visitproc visit, void* arg)
+{
+ // We must only visit referenced objects, i.e. only objects
+ // Py_INCREF'ed by this greenlet (directly or indirectly):
+ //
+ // - stack_prev is not visited: holds previous stack pointer, but it's not
+ // referenced
+ // - frames are not visited as we don't strongly reference them;
+ // alive greenlets are not garbage collected
+ // anyway. This can be a problem, however, if this greenlet is
+ // never allowed to finish, and is referenced from the frame: we
+ // have an uncollectible cycle in that case. Note that the
+ // frame object itself is also frequently not even tracked by the GC
+ // starting with Python 3.7 (frames are allocated by the
+ // interpreter untracked, and only become tracked when their
+ // evaluation is finished if they have a refcount > 1). All of
+ // this is to say that we should probably strongly reference
+ // the frame object. Doing so, while always allowing GC on a
+ // greenlet, solves several leaks for us.
+
+ Py_VISIT(self->dict);
+ if (!self->pimpl) {
+ // Hmm. I have seen this at interpreter shutdown time,
+ // I think. That's very odd because this doesn't go away until
+ // we're ``green_dealloc()``, at which point we shouldn't be
+ // traversed anymore.
+ return 0;
+ }
+
+ return self->pimpl->tp_traverse(visit, arg);
+}
+
+static int
+green_is_gc(PyObject* _self)
+{
+ BorrowedGreenlet self(_self);
+ int result = 0;
+ /* Main greenlet can be garbage collected since it can only
+ become unreachable if the underlying thread exited.
+ Active greenlets --- including those that are suspended ---
+ cannot be garbage collected, however.
+ */
+ if (self->main() || !self->active()) {
+ result = 1;
+ }
+ // The main greenlet pointer will eventually go away after the thread dies.
+ if (self->was_running_in_dead_thread()) {
+ // Our thread is dead! We can never run again. Might as well
+ // GC us. Note that if a tuple containing only us and other
+ // immutable objects had been scanned before this, when we
+ // would have returned 0, the tuple will take itself out of GC
+ // tracking and never be investigated again. So that could
+ // result in both us and the tuple leaking due to an
+ // unreachable/uncollectible reference. The same goes for
+ // dictionaries.
+ //
+ // It's not a great idea to be changing our GC state on the
+ // fly.
+ result = 1;
+ }
+ return result;
+}
+
+
+static int
+green_clear(PyGreenlet* self)
+{
+ /* Greenlet is only cleared if it is about to be collected.
+ Since active greenlets are not garbage collectable, we can
+ be sure that, even if they are deallocated during clear,
+ nothing they reference is in unreachable or finalizers,
+ so even if it switches we are relatively safe. */
+ // XXX: Are we responsible for clearing weakrefs here?
+ Py_CLEAR(self->dict);
+ return self->pimpl->tp_clear();
+}
+
+/**
+ * Returns 0 on failure (the object was resurrected) or 1 on success.
+ **/
+static int
+_green_dealloc_kill_started_non_main_greenlet(BorrowedGreenlet self)
+{
+ /* Hacks hacks hacks copied from instance_dealloc() */
+ /* Temporarily resurrect the greenlet. */
+ assert(self.REFCNT() == 0);
+ Py_SET_REFCNT(self.borrow(), 1);
+ /* Save the current exception, if any. */
+ PyErrPieces saved_err;
+ try {
+ // BY THE TIME WE GET HERE, the state may actually be going
+ // away
+ // if we're shutting down the interpreter and freeing thread
+ // entries,
+ // this could result in freeing greenlets that were leaked. So
+ // we can't try to read the state.
+ self->deallocing_greenlet_in_thread(
+ self->thread_state()
+ ? static_cast(GET_THREAD_STATE())
+ : nullptr);
+ }
+ catch (const PyErrOccurred&) {
+ PyErr_WriteUnraisable(self.borrow_o());
+ /* XXX what else should we do? */
+ }
+ /* Check for no resurrection must be done while we keep
+ * our internal reference, otherwise PyFile_WriteObject
+ * causes recursion if using Py_INCREF/Py_DECREF
+ */
+ if (self.REFCNT() == 1 && self->active()) {
+ /* Not resurrected, but still not dead!
+ XXX what else should we do? we complain. */
+ PyObject* f = PySys_GetObject("stderr");
+ Py_INCREF(self.borrow_o()); /* leak! */
+ if (f != NULL) {
+ PyFile_WriteString("GreenletExit did not kill ", f);
+ PyFile_WriteObject(self.borrow_o(), f, 0);
+ PyFile_WriteString("\n", f);
+ }
+ }
+ /* Restore the saved exception. */
+ saved_err.PyErrRestore();
+ /* Undo the temporary resurrection; can't use DECREF here,
+ * it would cause a recursive call.
+ */
+ assert(self.REFCNT() > 0);
+
+ Py_ssize_t refcnt = self.REFCNT() - 1;
+ Py_SET_REFCNT(self.borrow_o(), refcnt);
+ if (refcnt != 0) {
+ /* Resurrected! */
+ _Py_NewReference(self.borrow_o());
+ Py_SET_REFCNT(self.borrow_o(), refcnt);
+ /* Better to use tp_finalizer slot (PEP 442)
+ * and call ``PyObject_CallFinalizerFromDealloc``,
+ * but that's only supported in Python 3.4+; see
+ * Modules/_io/iobase.c for an example.
+ * TODO: We no longer run on anything that old, switch to finalizers.
+ *
+ * The following approach is copied from iobase.c in CPython 2.7.
+ * (along with much of this function in general). Here's their
+ * comment:
+ *
+ * When called from a heap type's dealloc, the type will be
+ * decref'ed on return (see e.g. subtype_dealloc in typeobject.c).
+ *
+ * On free-threaded builds of CPython, the type is meant to be immortal
+ * so we probably shouldn't mess with this? See
+ * test_issue_245_reference_counting_subclass_no_threads
+ */
+ if (PyType_HasFeature(self.TYPE(), Py_TPFLAGS_HEAPTYPE)) {
+ Py_INCREF(self.TYPE());
+ }
+
+ PyObject_GC_Track((PyObject*)self);
+
+ GREENLET_Py_DEC_REFTOTAL;
+#ifdef COUNT_ALLOCS
+ --Py_TYPE(self)->tp_frees;
+ --Py_TYPE(self)->tp_allocs;
+#endif /* COUNT_ALLOCS */
+ return 0;
+ }
+ return 1;
+}
+
+
+static void
+green_dealloc(PyGreenlet* self)
+{
+ PyObject_GC_UnTrack(self);
+ BorrowedGreenlet me(self);
+ if (me->active()
+ && me->started()
+ && !me->main()) {
+ if (!_green_dealloc_kill_started_non_main_greenlet(me)) {
+ return;
+ }
+ }
+
+ if (self->weakreflist != NULL) {
+ PyObject_ClearWeakRefs((PyObject*)self);
+ }
+ Py_CLEAR(self->dict);
+
+ if (self->pimpl) {
+ // In case deleting this, which frees some memory,
+ // somehow winds up calling back into us. That's usually a
+ //bug in our code.
+ Greenlet* p = self->pimpl;
+ self->pimpl = nullptr;
+ delete p;
+ }
+ // and finally we're done. self is now invalid.
+ Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+
+static OwnedObject
+internal_green_throw(BorrowedGreenlet self, PyErrPieces& err_pieces)
+{
+ PyObject* result = nullptr;
+ err_pieces.PyErrRestore();
+ assert(PyErr_Occurred());
+ if (self->started() && !self->active()) {
+ /* dead greenlet: turn GreenletExit into a regular return */
+ result = g_handle_exit(OwnedObject()).relinquish_ownership();
+ }
+ self->args() <<= result;
+
+ return single_result(self->g_switch());
+}
+
+
+
+PyDoc_STRVAR(
+ green_switch_doc,
+ "switch(*args, **kwargs)\n"
+ "\n"
+ "Switch execution to this greenlet.\n"
+ "\n"
+ "If this greenlet has never been run, then this greenlet\n"
+ "will be switched to using the body of ``self.run(*args, **kwargs)``.\n"
+ "\n"
+ "If the greenlet is active (has been run, but was switch()'ed\n"
+ "out before leaving its run function), then this greenlet will\n"
+ "be resumed and the return value to its switch call will be\n"
+ "None if no arguments are given, the given argument if one\n"
+ "argument is given, or the args tuple and keyword args dict if\n"
+ "multiple arguments are given.\n"
+ "\n"
+ "If the greenlet is dead, or is the current greenlet then this\n"
+ "function will simply return the arguments using the same rules as\n"
+ "above.\n");
+
+static PyObject*
+green_switch(PyGreenlet* self, PyObject* args, PyObject* kwargs)
+{
+ using greenlet::SwitchingArgs;
+ SwitchingArgs switch_args(OwnedObject::owning(args), OwnedObject::owning(kwargs));
+ self->pimpl->may_switch_away();
+ self->pimpl->args() <<= switch_args;
+
+ // If we're switching out of a greenlet, and that switch is the
+ // last thing the greenlet does, the greenlet ought to be able to
+ // go ahead and die at that point. Currently, someone else must
+ // manually switch back to the greenlet so that we "fall off the
+ // end" and can perform cleanup. You'd think we'd be able to
+ // figure out that this is happening using the frame's ``f_lasti``
+ // member, which is supposed to be an index into
+ // ``frame->f_code->co_code``, the bytecode string. However, in
+ // recent interpreters, ``f_lasti`` tends not to be updated thanks
+ // to things like the PREDICT() macros in ceval.c. So it doesn't
+ // really work to do that in many cases. For example, the Python
+ // code:
+ // def run():
+ // greenlet.getcurrent().parent.switch()
+ // produces bytecode of len 16, with the actual call to switch()
+ // being at index 10 (in Python 3.10). However, the reported
+ // ``f_lasti`` we actually see is...5! (Which happens to be the
+ // second byte of the CALL_METHOD op for ``getcurrent()``).
+
+ try {
+ //OwnedObject result = single_result(self->pimpl->g_switch());
+ OwnedObject result(single_result(self->pimpl->g_switch()));
+#ifndef NDEBUG
+ // Note that the current greenlet isn't necessarily self. If self
+ // finished, we went to one of its parents.
+ assert(!self->pimpl->args());
+
+ const BorrowedGreenlet& current = GET_THREAD_STATE().state().borrow_current();
+ // It's possible it's never been switched to.
+ assert(!current->args());
+#endif
+ PyObject* p = result.relinquish_ownership();
+
+ if (!p && !PyErr_Occurred()) {
+ // This shouldn't be happening anymore, so the asserts
+ // are there for debug builds. Non-debug builds
+ // crash "gracefully" in this case, although there is an
+ // argument to be made for killing the process in all
+ // cases --- for this to be the case, our switches
+ // probably nested in an incorrect way, so the state is
+ // suspicious. Nothing should be corrupt though, just
+ // confused at the Python level. Letting this propagate is
+ // probably good enough.
+ assert(p || PyErr_Occurred());
+ throw PyErrOccurred(
+ mod_globs->PyExc_GreenletError,
+ "Greenlet.switch() returned NULL without an exception set."
+ );
+ }
+ return p;
+ }
+ catch(const PyErrOccurred&) {
+ return nullptr;
+ }
+}
+
+PyDoc_STRVAR(
+ green_throw_doc,
+ "Switches execution to this greenlet, but immediately raises the\n"
+ "given exception in this greenlet. If no argument is provided, the "
+ "exception\n"
+ "defaults to `greenlet.GreenletExit`. The normal exception\n"
+ "propagation rules apply, as described for `switch`. Note that calling "
+ "this\n"
+ "method is almost equivalent to the following::\n"
+ "\n"
+ " def raiser():\n"
+ " raise typ, val, tb\n"
+ " g_raiser = greenlet(raiser, parent=g)\n"
+ " g_raiser.switch()\n"
+ "\n"
+ "except that this trick does not work for the\n"
+ "`greenlet.GreenletExit` exception, which would not propagate\n"
+ "from ``g_raiser`` to ``g``.\n");
+
+static PyObject*
+green_throw(PyGreenlet* self, PyObject* args)
+{
+ PyArgParseParam typ(mod_globs->PyExc_GreenletExit);
+ PyArgParseParam val;
+ PyArgParseParam tb;
+
+ if (!PyArg_ParseTuple(args, "|OOO:throw", &typ, &val, &tb)) {
+ return nullptr;
+ }
+
+ assert(typ.borrow() || val.borrow());
+
+ self->pimpl->may_switch_away();
+ try {
+ // Both normalizing the error and the actual throw_greenlet
+ // could throw PyErrOccurred.
+ PyErrPieces err_pieces(typ.borrow(), val.borrow(), tb.borrow());
+
+ return internal_green_throw(self, err_pieces).relinquish_ownership();
+ }
+ catch (const PyErrOccurred&) {
+ return nullptr;
+ }
+}
+
+static int
+green_bool(PyGreenlet* self)
+{
+ return self->pimpl->active();
+}
+
+/**
+ * CAUTION: Allocates memory, may run GC and arbitrary Python code.
+ */
+static PyObject*
+green_getdict(PyGreenlet* self, void* UNUSED(context))
+{
+ if (self->dict == NULL) {
+ self->dict = PyDict_New();
+ if (self->dict == NULL) {
+ return NULL;
+ }
+ }
+ Py_INCREF(self->dict);
+ return self->dict;
+}
+
+static int
+green_setdict(PyGreenlet* self, PyObject* val, void* UNUSED(context))
+{
+ PyObject* tmp;
+
+ if (val == NULL) {
+ PyErr_SetString(PyExc_TypeError, "__dict__ may not be deleted");
+ return -1;
+ }
+ if (!PyDict_Check(val)) {
+ PyErr_SetString(PyExc_TypeError, "__dict__ must be a dictionary");
+ return -1;
+ }
+ tmp = self->dict;
+ Py_INCREF(val);
+ self->dict = val;
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static bool
+_green_not_dead(BorrowedGreenlet self)
+{
+ // XXX: Where else should we do this?
+ // Probably on entry to most Python-facing functions?
+ if (self->was_running_in_dead_thread()) {
+ self->deactivate_and_free();
+ return false;
+ }
+ return self->active() || !self->started();
+}
+
+
+static PyObject*
+green_getdead(PyGreenlet* self, void* UNUSED(context))
+{
+ if (_green_not_dead(self)) {
+ Py_RETURN_FALSE;
+ }
+ else {
+ Py_RETURN_TRUE;
+ }
+}
+
+static PyObject*
+green_get_stack_saved(PyGreenlet* self, void* UNUSED(context))
+{
+ return PyLong_FromSsize_t(self->pimpl->stack_saved());
+}
+
+
+static PyObject*
+green_getrun(PyGreenlet* self, void* UNUSED(context))
+{
+ try {
+ OwnedObject result(BorrowedGreenlet(self)->run());
+ return result.relinquish_ownership();
+ }
+ catch(const PyErrOccurred&) {
+ return nullptr;
+ }
+}
+
+
+static int
+green_setrun(PyGreenlet* self, PyObject* nrun, void* UNUSED(context))
+{
+ try {
+ BorrowedGreenlet(self)->run(nrun);
+ return 0;
+ }
+ catch(const PyErrOccurred&) {
+ return -1;
+ }
+}
+
+static PyObject*
+green_getparent(PyGreenlet* self, void* UNUSED(context))
+{
+ return BorrowedGreenlet(self)->parent().acquire_or_None();
+}
+
+
+static int
+green_setparent(PyGreenlet* self, PyObject* nparent, void* UNUSED(context))
+{
+ try {
+ BorrowedGreenlet(self)->parent(nparent);
+ }
+ catch(const PyErrOccurred&) {
+ return -1;
+ }
+ return 0;
+}
+
+
+static PyObject*
+green_getcontext(const PyGreenlet* self, void* UNUSED(context))
+{
+ const Greenlet *const g = self->pimpl;
+ try {
+ OwnedObject result(g->context());
+ return result.relinquish_ownership();
+ }
+ catch(const PyErrOccurred&) {
+ return nullptr;
+ }
+}
+
+static int
+green_setcontext(PyGreenlet* self, PyObject* nctx, void* UNUSED(context))
+{
+ try {
+ BorrowedGreenlet(self)->context(nctx);
+ return 0;
+ }
+ catch(const PyErrOccurred&) {
+ return -1;
+ }
+}
+
+
+static PyObject*
+green_getframe(PyGreenlet* self, void* UNUSED(context))
+{
+ const PythonState::OwnedFrame& top_frame = BorrowedGreenlet(self)->top_frame();
+ return top_frame.acquire_or_None();
+}
+
+
+static PyObject*
+green_getstate(PyGreenlet* self)
+{
+ PyErr_Format(PyExc_TypeError,
+ "cannot serialize '%s' object",
+ Py_TYPE(self)->tp_name);
+ return nullptr;
+}
+
+static PyObject*
+green_repr(PyGreenlet* _self)
+{
+ BorrowedGreenlet self(_self);
+ /*
+ Return a string like
+
+
+ The handling of greenlets across threads is not super good.
+ We mostly use the internal definitions of these terms, but they
+ generally should make sense to users as well.
+ */
+ PyObject* result;
+ int never_started = !self->started() && !self->active();
+
+ const char* const tp_name = Py_TYPE(self)->tp_name;
+
+ if (_green_not_dead(self)) {
+ /* XXX: The otid= is almost useless because you can't correlate it to
+ any thread identifier exposed to Python. We could use
+ PyThreadState_GET()->thread_id, but we'd need to save that in the
+ greenlet, or save the whole PyThreadState object itself.
+
+ As it stands, its only useful for identifying greenlets from the same thread.
+ */
+ const char* state_in_thread;
+ if (self->was_running_in_dead_thread()) {
+ // The thread it was running in is dead!
+ // This can happen, especially at interpreter shut down.
+ // It complicates debugging output because it may be
+ // impossible to access the current thread state at that
+ // time. Thus, don't access the current thread state.
+ state_in_thread = " (thread exited)";
+ }
+ else {
+ state_in_thread = GET_THREAD_STATE().state().is_current(self)
+ ? " current"
+ : (self->started() ? " suspended" : "");
+ }
+ result = PyUnicode_FromFormat(
+ "<%s object at %p (otid=%p)%s%s%s%s>",
+ tp_name,
+ self.borrow_o(),
+ self->thread_state(),
+ state_in_thread,
+ self->active() ? " active" : "",
+ never_started ? " pending" : " started",
+ self->main() ? " main" : ""
+ );
+ }
+ else {
+ result = PyUnicode_FromFormat(
+ "<%s object at %p (otid=%p) %sdead>",
+ tp_name,
+ self.borrow_o(),
+ self->thread_state(),
+ self->was_running_in_dead_thread()
+ ? "(thread exited) "
+ : ""
+ );
+ }
+
+ return result;
+}
+
+
+static PyMethodDef green_methods[] = {
+ {
+ .ml_name="switch",
+ .ml_meth=reinterpret_cast(green_switch),
+ .ml_flags=METH_VARARGS | METH_KEYWORDS,
+ .ml_doc=green_switch_doc
+ },
+ {.ml_name="throw", .ml_meth=(PyCFunction)green_throw, .ml_flags=METH_VARARGS, .ml_doc=green_throw_doc},
+ {.ml_name="__getstate__", .ml_meth=(PyCFunction)green_getstate, .ml_flags=METH_NOARGS, .ml_doc=NULL},
+ {.ml_name=NULL, .ml_meth=NULL} /* sentinel */
+};
+
+static PyGetSetDef green_getsets[] = {
+ /* name, getter, setter, doc, context pointer */
+ {.name="__dict__", .get=(getter)green_getdict, .set=(setter)green_setdict},
+ {.name="run", .get=(getter)green_getrun, .set=(setter)green_setrun},
+ {.name="parent", .get=(getter)green_getparent, .set=(setter)green_setparent},
+ {.name="gr_frame", .get=(getter)green_getframe },
+ {
+ .name="gr_context",
+ .get=(getter)green_getcontext,
+ .set=(setter)green_setcontext
+ },
+ {.name="dead", .get=(getter)green_getdead},
+ {.name="_stack_saved", .get=(getter)green_get_stack_saved},
+ {.name=NULL}
+};
+
+static PyMemberDef green_members[] = {
+ {.name=NULL}
+};
+
+static PyNumberMethods green_as_number = {
+ .nb_bool=(inquiry)green_bool,
+};
+
+
+PyTypeObject PyGreenlet_Type = {
+ .ob_base=PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name="greenlet.greenlet", /* tp_name */
+ .tp_basicsize=sizeof(PyGreenlet), /* tp_basicsize */
+ /* methods */
+ .tp_dealloc=(destructor)green_dealloc, /* tp_dealloc */
+ .tp_repr=(reprfunc)green_repr, /* tp_repr */
+ .tp_as_number=&green_as_number, /* tp_as _number*/
+ .tp_flags=G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+ .tp_doc="greenlet(run=None, parent=None) -> greenlet\n\n"
+ "Creates a new greenlet object (without running it).\n\n"
+ " - *run* -- The callable to invoke.\n"
+ " - *parent* -- The parent greenlet. The default is the current "
+ "greenlet.", /* tp_doc */
+ .tp_traverse=(traverseproc)green_traverse, /* tp_traverse */
+ .tp_clear=(inquiry)green_clear, /* tp_clear */
+ .tp_weaklistoffset=offsetof(PyGreenlet, weakreflist), /* tp_weaklistoffset */
+
+ .tp_methods=green_methods, /* tp_methods */
+ .tp_members=green_members, /* tp_members */
+ .tp_getset=green_getsets, /* tp_getset */
+ .tp_dictoffset=offsetof(PyGreenlet, dict), /* tp_dictoffset */
+ .tp_init=(initproc)green_init, /* tp_init */
+ .tp_alloc=PyType_GenericAlloc, /* tp_alloc */
+ .tp_new=(newfunc)green_new, /* tp_new */
+ .tp_free=PyObject_GC_Del, /* tp_free */
+ .tp_is_gc=(inquiry)green_is_gc, /* tp_is_gc */
+};
+
+#endif
+
+// Local Variables:
+// flycheck-clang-include-path: ("/opt/local/Library/Frameworks/Python.framework/Versions/3.8/include/python3.8")
+// End:
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/PyGreenlet.hpp b/tapdown/lib/python3.11/site-packages/greenlet/PyGreenlet.hpp
new file mode 100644
index 0000000..df6cd80
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/PyGreenlet.hpp
@@ -0,0 +1,35 @@
+#ifndef PYGREENLET_HPP
+#define PYGREENLET_HPP
+
+
+#include "greenlet.h"
+#include "greenlet_compiler_compat.hpp"
+#include "greenlet_refs.hpp"
+
+
+using greenlet::refs::OwnedGreenlet;
+using greenlet::refs::BorrowedGreenlet;
+using greenlet::refs::BorrowedObject;;
+using greenlet::refs::OwnedObject;
+using greenlet::refs::PyErrPieces;
+
+
+// XXX: These doesn't really belong here, it's not a Python slot.
+static OwnedObject internal_green_throw(BorrowedGreenlet self, PyErrPieces& err_pieces);
+
+static PyGreenlet* green_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds));
+static int green_clear(PyGreenlet* self);
+static int green_init(PyGreenlet* self, PyObject* args, PyObject* kwargs);
+static int green_setparent(PyGreenlet* self, PyObject* nparent, void* UNUSED(context));
+static int green_setrun(PyGreenlet* self, PyObject* nrun, void* UNUSED(context));
+static int green_traverse(PyGreenlet* self, visitproc visit, void* arg);
+static void green_dealloc(PyGreenlet* self);
+static PyObject* green_getparent(PyGreenlet* self, void* UNUSED(context));
+
+static int green_is_gc(PyObject* self);
+static PyObject* green_getdead(PyGreenlet* self, void* UNUSED(context));
+static PyObject* green_getrun(PyGreenlet* self, void* UNUSED(context));
+static int green_setcontext(PyGreenlet* self, PyObject* nctx, void* UNUSED(context));
+static PyObject* green_getframe(PyGreenlet* self, void* UNUSED(context));
+static PyObject* green_repr(PyGreenlet* self);
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/PyGreenletUnswitchable.cpp b/tapdown/lib/python3.11/site-packages/greenlet/PyGreenletUnswitchable.cpp
new file mode 100644
index 0000000..1b768ee
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/PyGreenletUnswitchable.cpp
@@ -0,0 +1,147 @@
+/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
+/**
+ Implementation of the Python slots for PyGreenletUnswitchable_Type
+*/
+#ifndef PY_GREENLET_UNSWITCHABLE_CPP
+#define PY_GREENLET_UNSWITCHABLE_CPP
+
+
+
+#define PY_SSIZE_T_CLEAN
+#include
+#include "structmember.h" // PyMemberDef
+
+#include "greenlet_internal.hpp"
+// Code after this point can assume access to things declared in stdint.h,
+// including the fixed-width types. This goes for the platform-specific switch functions
+// as well.
+#include "greenlet_refs.hpp"
+#include "greenlet_slp_switch.hpp"
+
+#include "greenlet_thread_support.hpp"
+#include "TGreenlet.hpp"
+
+#include "TGreenlet.cpp"
+#include "TGreenletGlobals.cpp"
+#include "TThreadStateDestroy.cpp"
+
+
+using greenlet::LockGuard;
+using greenlet::LockInitError;
+using greenlet::PyErrOccurred;
+using greenlet::Require;
+
+using greenlet::g_handle_exit;
+using greenlet::single_result;
+
+using greenlet::Greenlet;
+using greenlet::UserGreenlet;
+using greenlet::MainGreenlet;
+using greenlet::BrokenGreenlet;
+using greenlet::ThreadState;
+using greenlet::PythonState;
+
+
+#include "PyGreenlet.hpp"
+
+static PyGreenlet*
+green_unswitchable_new(PyTypeObject* type, PyObject* UNUSED(args), PyObject* UNUSED(kwds))
+{
+ PyGreenlet* o =
+ (PyGreenlet*)PyBaseObject_Type.tp_new(type, mod_globs->empty_tuple, mod_globs->empty_dict);
+ if (o) {
+ new BrokenGreenlet(o, GET_THREAD_STATE().state().borrow_current());
+ assert(Py_REFCNT(o) == 1);
+ }
+ return o;
+}
+
+static PyObject*
+green_unswitchable_getforce(PyGreenlet* self, void* UNUSED(context))
+{
+ BrokenGreenlet* broken = dynamic_cast(self->pimpl);
+ return PyBool_FromLong(broken->_force_switch_error);
+}
+
+static int
+green_unswitchable_setforce(PyGreenlet* self, PyObject* nforce, void* UNUSED(context))
+{
+ if (!nforce) {
+ PyErr_SetString(
+ PyExc_AttributeError,
+ "Cannot delete force_switch_error"
+ );
+ return -1;
+ }
+ BrokenGreenlet* broken = dynamic_cast(self->pimpl);
+ int is_true = PyObject_IsTrue(nforce);
+ if (is_true == -1) {
+ return -1;
+ }
+ broken->_force_switch_error = is_true;
+ return 0;
+}
+
+static PyObject*
+green_unswitchable_getforceslp(PyGreenlet* self, void* UNUSED(context))
+{
+ BrokenGreenlet* broken = dynamic_cast(self->pimpl);
+ return PyBool_FromLong(broken->_force_slp_switch_error);
+}
+
+static int
+green_unswitchable_setforceslp(PyGreenlet* self, PyObject* nforce, void* UNUSED(context))
+{
+ if (!nforce) {
+ PyErr_SetString(
+ PyExc_AttributeError,
+ "Cannot delete force_slp_switch_error"
+ );
+ return -1;
+ }
+ BrokenGreenlet* broken = dynamic_cast(self->pimpl);
+ int is_true = PyObject_IsTrue(nforce);
+ if (is_true == -1) {
+ return -1;
+ }
+ broken->_force_slp_switch_error = is_true;
+ return 0;
+}
+
+static PyGetSetDef green_unswitchable_getsets[] = {
+ /* name, getter, setter, doc, closure (context pointer) */
+ {
+ .name="force_switch_error",
+ .get=(getter)green_unswitchable_getforce,
+ .set=(setter)green_unswitchable_setforce,
+ .doc=NULL
+ },
+ {
+ .name="force_slp_switch_error",
+ .get=(getter)green_unswitchable_getforceslp,
+ .set=(setter)green_unswitchable_setforceslp,
+ .doc=nullptr
+ },
+ {.name=nullptr}
+};
+
+PyTypeObject PyGreenletUnswitchable_Type = {
+ .ob_base=PyVarObject_HEAD_INIT(NULL, 0)
+ .tp_name="greenlet._greenlet.UnswitchableGreenlet",
+ .tp_dealloc= (destructor)green_dealloc, /* tp_dealloc */
+ .tp_flags=G_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+ .tp_doc="Undocumented internal class", /* tp_doc */
+ .tp_traverse=(traverseproc)green_traverse, /* tp_traverse */
+ .tp_clear=(inquiry)green_clear, /* tp_clear */
+
+ .tp_getset=green_unswitchable_getsets, /* tp_getset */
+ .tp_base=&PyGreenlet_Type, /* tp_base */
+ .tp_init=(initproc)green_init, /* tp_init */
+ .tp_alloc=PyType_GenericAlloc, /* tp_alloc */
+ .tp_new=(newfunc)green_unswitchable_new, /* tp_new */
+ .tp_free=PyObject_GC_Del, /* tp_free */
+ .tp_is_gc=(inquiry)green_is_gc, /* tp_is_gc */
+};
+
+
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/PyModule.cpp b/tapdown/lib/python3.11/site-packages/greenlet/PyModule.cpp
new file mode 100644
index 0000000..6adcb5c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/PyModule.cpp
@@ -0,0 +1,292 @@
+#ifndef PY_MODULE_CPP
+#define PY_MODULE_CPP
+
+#include "greenlet_internal.hpp"
+
+
+#include "TGreenletGlobals.cpp"
+#include "TMainGreenlet.cpp"
+#include "TThreadStateDestroy.cpp"
+
+using greenlet::LockGuard;
+using greenlet::ThreadState;
+
+#ifdef __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wunused-function"
+# pragma clang diagnostic ignored "-Wunused-variable"
+#endif
+
+PyDoc_STRVAR(mod_getcurrent_doc,
+ "getcurrent() -> greenlet\n"
+ "\n"
+ "Returns the current greenlet (i.e. the one which called this "
+ "function).\n");
+
+static PyObject*
+mod_getcurrent(PyObject* UNUSED(module))
+{
+ return GET_THREAD_STATE().state().get_current().relinquish_ownership_o();
+}
+
+PyDoc_STRVAR(mod_settrace_doc,
+ "settrace(callback) -> object\n"
+ "\n"
+ "Sets a new tracing function and returns the previous one.\n");
+static PyObject*
+mod_settrace(PyObject* UNUSED(module), PyObject* args)
+{
+ PyArgParseParam tracefunc;
+ if (!PyArg_ParseTuple(args, "O", &tracefunc)) {
+ return NULL;
+ }
+ ThreadState& state = GET_THREAD_STATE();
+ OwnedObject previous = state.get_tracefunc();
+ if (!previous) {
+ previous = Py_None;
+ }
+
+ state.set_tracefunc(tracefunc);
+
+ return previous.relinquish_ownership();
+}
+
+PyDoc_STRVAR(mod_gettrace_doc,
+ "gettrace() -> object\n"
+ "\n"
+ "Returns the currently set tracing function, or None.\n");
+
+static PyObject*
+mod_gettrace(PyObject* UNUSED(module))
+{
+ OwnedObject tracefunc = GET_THREAD_STATE().state().get_tracefunc();
+ if (!tracefunc) {
+ tracefunc = Py_None;
+ }
+ return tracefunc.relinquish_ownership();
+}
+
+
+
+PyDoc_STRVAR(mod_set_thread_local_doc,
+ "set_thread_local(key, value) -> None\n"
+ "\n"
+ "Set a value in the current thread-local dictionary. Debugging only.\n");
+
+static PyObject*
+mod_set_thread_local(PyObject* UNUSED(module), PyObject* args)
+{
+ PyArgParseParam key;
+ PyArgParseParam value;
+ PyObject* result = NULL;
+
+ if (PyArg_UnpackTuple(args, "set_thread_local", 2, 2, &key, &value)) {
+ if(PyDict_SetItem(
+ PyThreadState_GetDict(), // borrow
+ key,
+ value) == 0 ) {
+ // success
+ Py_INCREF(Py_None);
+ result = Py_None;
+ }
+ }
+ return result;
+}
+
+PyDoc_STRVAR(mod_get_pending_cleanup_count_doc,
+ "get_pending_cleanup_count() -> Integer\n"
+ "\n"
+ "Get the number of greenlet cleanup operations pending. Testing only.\n");
+
+
+static PyObject*
+mod_get_pending_cleanup_count(PyObject* UNUSED(module))
+{
+ LockGuard cleanup_lock(*mod_globs->thread_states_to_destroy_lock);
+ return PyLong_FromSize_t(mod_globs->thread_states_to_destroy.size());
+}
+
+PyDoc_STRVAR(mod_get_total_main_greenlets_doc,
+ "get_total_main_greenlets() -> Integer\n"
+ "\n"
+ "Quickly return the number of main greenlets that exist. Testing only.\n");
+
+static PyObject*
+mod_get_total_main_greenlets(PyObject* UNUSED(module))
+{
+ return PyLong_FromSize_t(G_TOTAL_MAIN_GREENLETS);
+}
+
+
+
+PyDoc_STRVAR(mod_get_clocks_used_doing_optional_cleanup_doc,
+ "get_clocks_used_doing_optional_cleanup() -> Integer\n"
+ "\n"
+ "Get the number of clock ticks the program has used doing optional "
+ "greenlet cleanup.\n"
+ "Beginning in greenlet 2.0, greenlet tries to find and dispose of greenlets\n"
+ "that leaked after a thread exited. This requires invoking Python's garbage collector,\n"
+ "which may have a performance cost proportional to the number of live objects.\n"
+ "This function returns the amount of processor time\n"
+ "greenlet has used to do this. In programs that run with very large amounts of live\n"
+ "objects, this metric can be used to decide whether the cost of doing this cleanup\n"
+ "is worth the memory leak being corrected. If not, you can disable the cleanup\n"
+ "using ``enable_optional_cleanup(False)``.\n"
+ "The units are arbitrary and can only be compared to themselves (similarly to ``time.clock()``);\n"
+ "for example, to see how it scales with your heap. You can attempt to convert them into seconds\n"
+ "by dividing by the value of CLOCKS_PER_SEC."
+ "If cleanup has been disabled, returns None."
+ "\n"
+ "This is an implementation specific, provisional API. It may be changed or removed\n"
+ "in the future.\n"
+ ".. versionadded:: 2.0"
+ );
+static PyObject*
+mod_get_clocks_used_doing_optional_cleanup(PyObject* UNUSED(module))
+{
+ std::clock_t& clocks = ThreadState::clocks_used_doing_gc();
+
+ if (clocks == std::clock_t(-1)) {
+ Py_RETURN_NONE;
+ }
+ // This might not actually work on some implementations; clock_t
+ // is an opaque type.
+ return PyLong_FromSsize_t(clocks);
+}
+
+PyDoc_STRVAR(mod_enable_optional_cleanup_doc,
+ "mod_enable_optional_cleanup(bool) -> None\n"
+ "\n"
+ "Enable or disable optional cleanup operations.\n"
+ "See ``get_clocks_used_doing_optional_cleanup()`` for details.\n"
+ );
+static PyObject*
+mod_enable_optional_cleanup(PyObject* UNUSED(module), PyObject* flag)
+{
+ int is_true = PyObject_IsTrue(flag);
+ if (is_true == -1) {
+ return nullptr;
+ }
+
+ std::clock_t& clocks = ThreadState::clocks_used_doing_gc();
+ if (is_true) {
+ // If we already have a value, we don't want to lose it.
+ if (clocks == std::clock_t(-1)) {
+ clocks = 0;
+ }
+ }
+ else {
+ clocks = std::clock_t(-1);
+ }
+ Py_RETURN_NONE;
+}
+
+
+
+
+#if !GREENLET_PY313
+PyDoc_STRVAR(mod_get_tstate_trash_delete_nesting_doc,
+ "get_tstate_trash_delete_nesting() -> Integer\n"
+ "\n"
+ "Return the 'trash can' nesting level. Testing only.\n");
+static PyObject*
+mod_get_tstate_trash_delete_nesting(PyObject* UNUSED(module))
+{
+ PyThreadState* tstate = PyThreadState_GET();
+
+#if GREENLET_PY312
+ return PyLong_FromLong(tstate->trash.delete_nesting);
+#else
+ return PyLong_FromLong(tstate->trash_delete_nesting);
+#endif
+}
+#endif
+
+
+
+
+static PyMethodDef GreenMethods[] = {
+ {
+ .ml_name="getcurrent",
+ .ml_meth=(PyCFunction)mod_getcurrent,
+ .ml_flags=METH_NOARGS,
+ .ml_doc=mod_getcurrent_doc
+ },
+ {
+ .ml_name="settrace",
+ .ml_meth=(PyCFunction)mod_settrace,
+ .ml_flags=METH_VARARGS,
+ .ml_doc=mod_settrace_doc
+ },
+ {
+ .ml_name="gettrace",
+ .ml_meth=(PyCFunction)mod_gettrace,
+ .ml_flags=METH_NOARGS,
+ .ml_doc=mod_gettrace_doc
+ },
+ {
+ .ml_name="set_thread_local",
+ .ml_meth=(PyCFunction)mod_set_thread_local,
+ .ml_flags=METH_VARARGS,
+ .ml_doc=mod_set_thread_local_doc
+ },
+ {
+ .ml_name="get_pending_cleanup_count",
+ .ml_meth=(PyCFunction)mod_get_pending_cleanup_count,
+ .ml_flags=METH_NOARGS,
+ .ml_doc=mod_get_pending_cleanup_count_doc
+ },
+ {
+ .ml_name="get_total_main_greenlets",
+ .ml_meth=(PyCFunction)mod_get_total_main_greenlets,
+ .ml_flags=METH_NOARGS,
+ .ml_doc=mod_get_total_main_greenlets_doc
+ },
+ {
+ .ml_name="get_clocks_used_doing_optional_cleanup",
+ .ml_meth=(PyCFunction)mod_get_clocks_used_doing_optional_cleanup,
+ .ml_flags=METH_NOARGS,
+ .ml_doc=mod_get_clocks_used_doing_optional_cleanup_doc
+ },
+ {
+ .ml_name="enable_optional_cleanup",
+ .ml_meth=(PyCFunction)mod_enable_optional_cleanup,
+ .ml_flags=METH_O,
+ .ml_doc=mod_enable_optional_cleanup_doc
+ },
+#if !GREENLET_PY313
+ {
+ .ml_name="get_tstate_trash_delete_nesting",
+ .ml_meth=(PyCFunction)mod_get_tstate_trash_delete_nesting,
+ .ml_flags=METH_NOARGS,
+ .ml_doc=mod_get_tstate_trash_delete_nesting_doc
+ },
+#endif
+ {.ml_name=NULL, .ml_meth=NULL} /* Sentinel */
+};
+
+static const char* const copy_on_greentype[] = {
+ "getcurrent",
+ "error",
+ "GreenletExit",
+ "settrace",
+ "gettrace",
+ NULL
+};
+
+static struct PyModuleDef greenlet_module_def = {
+ .m_base=PyModuleDef_HEAD_INIT,
+ .m_name="greenlet._greenlet",
+ .m_doc=NULL,
+ .m_size=-1,
+ .m_methods=GreenMethods,
+};
+
+
+#endif
+
+#ifdef __clang__
+# pragma clang diagnostic pop
+#elif defined(__GNUC__)
+# pragma GCC diagnostic pop
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TBrokenGreenlet.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TBrokenGreenlet.cpp
new file mode 100644
index 0000000..7e9ab5b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TBrokenGreenlet.cpp
@@ -0,0 +1,45 @@
+/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
+/**
+ * Implementation of greenlet::UserGreenlet.
+ *
+ * Format with:
+ * clang-format -i --style=file src/greenlet/greenlet.c
+ *
+ *
+ * Fix missing braces with:
+ * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
+*/
+
+#include "TGreenlet.hpp"
+
+namespace greenlet {
+
+void* BrokenGreenlet::operator new(size_t UNUSED(count))
+{
+ return allocator.allocate(1);
+}
+
+
+void BrokenGreenlet::operator delete(void* ptr)
+{
+ return allocator.deallocate(static_cast(ptr),
+ 1);
+}
+
+greenlet::PythonAllocator greenlet::BrokenGreenlet::allocator;
+
+bool
+BrokenGreenlet::force_slp_switch_error() const noexcept
+{
+ return this->_force_slp_switch_error;
+}
+
+UserGreenlet::switchstack_result_t BrokenGreenlet::g_switchstack(void)
+{
+ if (this->_force_switch_error) {
+ return switchstack_result_t(-1);
+ }
+ return UserGreenlet::g_switchstack();
+}
+
+}; //namespace greenlet
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TExceptionState.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TExceptionState.cpp
new file mode 100644
index 0000000..08a94ae
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TExceptionState.cpp
@@ -0,0 +1,62 @@
+#ifndef GREENLET_EXCEPTION_STATE_CPP
+#define GREENLET_EXCEPTION_STATE_CPP
+
+#include
+#include "TGreenlet.hpp"
+
+namespace greenlet {
+
+
+ExceptionState::ExceptionState()
+{
+ this->clear();
+}
+
+void ExceptionState::operator<<(const PyThreadState *const tstate) noexcept
+{
+ this->exc_info = tstate->exc_info;
+ this->exc_state = tstate->exc_state;
+}
+
+void ExceptionState::operator>>(PyThreadState *const tstate) noexcept
+{
+ tstate->exc_state = this->exc_state;
+ tstate->exc_info =
+ this->exc_info ? this->exc_info : &tstate->exc_state;
+ this->clear();
+}
+
+void ExceptionState::clear() noexcept
+{
+ this->exc_info = nullptr;
+ this->exc_state.exc_value = nullptr;
+#if !GREENLET_PY311
+ this->exc_state.exc_type = nullptr;
+ this->exc_state.exc_traceback = nullptr;
+#endif
+ this->exc_state.previous_item = nullptr;
+}
+
+int ExceptionState::tp_traverse(visitproc visit, void* arg) noexcept
+{
+ Py_VISIT(this->exc_state.exc_value);
+#if !GREENLET_PY311
+ Py_VISIT(this->exc_state.exc_type);
+ Py_VISIT(this->exc_state.exc_traceback);
+#endif
+ return 0;
+}
+
+void ExceptionState::tp_clear() noexcept
+{
+ Py_CLEAR(this->exc_state.exc_value);
+#if !GREENLET_PY311
+ Py_CLEAR(this->exc_state.exc_type);
+ Py_CLEAR(this->exc_state.exc_traceback);
+#endif
+}
+
+
+}; // namespace greenlet
+
+#endif // GREENLET_EXCEPTION_STATE_CPP
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TGreenlet.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TGreenlet.cpp
new file mode 100644
index 0000000..d12722b
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TGreenlet.cpp
@@ -0,0 +1,719 @@
+/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
+/**
+ * Implementation of greenlet::Greenlet.
+ *
+ * Format with:
+ * clang-format -i --style=file src/greenlet/greenlet.c
+ *
+ *
+ * Fix missing braces with:
+ * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
+*/
+#ifndef TGREENLET_CPP
+#define TGREENLET_CPP
+#include "greenlet_internal.hpp"
+#include "TGreenlet.hpp"
+
+
+#include "TGreenletGlobals.cpp"
+#include "TThreadStateDestroy.cpp"
+
+namespace greenlet {
+
+Greenlet::Greenlet(PyGreenlet* p)
+ : Greenlet(p, StackState())
+{
+}
+
+Greenlet::Greenlet(PyGreenlet* p, const StackState& initial_stack)
+ : _self(p), stack_state(initial_stack)
+{
+ assert(p->pimpl == nullptr);
+ p->pimpl = this;
+}
+
+Greenlet::~Greenlet()
+{
+ // XXX: Can't do this. tp_clear is a virtual function, and by the
+ // time we're here, we've sliced off our child classes.
+ //this->tp_clear();
+ this->_self->pimpl = nullptr;
+}
+
+bool
+Greenlet::force_slp_switch_error() const noexcept
+{
+ return false;
+}
+
+void
+Greenlet::release_args()
+{
+ this->switch_args.CLEAR();
+}
+
+/**
+ * CAUTION: This will allocate memory and may trigger garbage
+ * collection and arbitrary Python code.
+ */
+OwnedObject
+Greenlet::throw_GreenletExit_during_dealloc(const ThreadState& UNUSED(current_thread_state))
+{
+ // If we're killed because we lost all references in the
+ // middle of a switch, that's ok. Don't reset the args/kwargs,
+ // we still want to pass them to the parent.
+ PyErr_SetString(mod_globs->PyExc_GreenletExit,
+ "Killing the greenlet because all references have vanished.");
+ // To get here it had to have run before
+ return this->g_switch();
+}
+
+inline void
+Greenlet::slp_restore_state() noexcept
+{
+#ifdef SLP_BEFORE_RESTORE_STATE
+ SLP_BEFORE_RESTORE_STATE();
+#endif
+ this->stack_state.copy_heap_to_stack(
+ this->thread_state()->borrow_current()->stack_state);
+}
+
+
+inline int
+Greenlet::slp_save_state(char *const stackref) noexcept
+{
+ // XXX: This used to happen in the middle, before saving, but
+ // after finding the next owner. Does that matter? This is
+ // only defined for Sparc/GCC where it flushes register
+ // windows to the stack (I think)
+#ifdef SLP_BEFORE_SAVE_STATE
+ SLP_BEFORE_SAVE_STATE();
+#endif
+ return this->stack_state.copy_stack_to_heap(stackref,
+ this->thread_state()->borrow_current()->stack_state);
+}
+
+/**
+ * CAUTION: This will allocate memory and may trigger garbage
+ * collection and arbitrary Python code.
+ */
+OwnedObject
+Greenlet::on_switchstack_or_initialstub_failure(
+ Greenlet* target,
+ const Greenlet::switchstack_result_t& err,
+ const bool target_was_me,
+ const bool was_initial_stub)
+{
+ // If we get here, either g_initialstub()
+ // failed, or g_switchstack() failed. Either one of those
+ // cases SHOULD leave us in the original greenlet with a valid stack.
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ was_initial_stub
+ ? "Failed to switch stacks into a greenlet for the first time."
+ : "Failed to switch stacks into a running greenlet.");
+ }
+ this->release_args();
+
+ if (target && !target_was_me) {
+ target->murder_in_place();
+ }
+
+ assert(!err.the_new_current_greenlet);
+ assert(!err.origin_greenlet);
+ return OwnedObject();
+
+}
+
+OwnedGreenlet
+Greenlet::g_switchstack_success() noexcept
+{
+ PyThreadState* tstate = PyThreadState_GET();
+ // restore the saved state
+ this->python_state >> tstate;
+ this->exception_state >> tstate;
+
+ // The thread state hasn't been changed yet.
+ ThreadState* thread_state = this->thread_state();
+ OwnedGreenlet result(thread_state->get_current());
+ thread_state->set_current(this->self());
+ //assert(thread_state->borrow_current().borrow() == this->_self);
+ return result;
+}
+
+Greenlet::switchstack_result_t
+Greenlet::g_switchstack(void)
+{
+ // if any of these assertions fail, it's likely because we
+ // switched away and tried to switch back to us. Early stages of
+ // switching are not reentrant because we re-use ``this->args()``.
+ // Switching away would happen if we trigger a garbage collection
+ // (by just using some Python APIs that happen to allocate Python
+ // objects) and some garbage had weakref callbacks or __del__ that
+ // switches (people don't write code like that by hand, but with
+ // gevent it's possible without realizing it)
+ assert(this->args() || PyErr_Occurred());
+ { /* save state */
+ if (this->thread_state()->is_current(this->self())) {
+ // Hmm, nothing to do.
+ // TODO: Does this bypass trace events that are
+ // important?
+ return switchstack_result_t(0,
+ this, this->thread_state()->borrow_current());
+ }
+ BorrowedGreenlet current = this->thread_state()->borrow_current();
+ PyThreadState* tstate = PyThreadState_GET();
+
+ current->python_state << tstate;
+ current->exception_state << tstate;
+ this->python_state.will_switch_from(tstate);
+ switching_thread_state = this;
+ current->expose_frames();
+ }
+ assert(this->args() || PyErr_Occurred());
+ // If this is the first switch into a greenlet, this will
+ // return twice, once with 1 in the new greenlet, once with 0
+ // in the origin.
+ int err;
+ if (this->force_slp_switch_error()) {
+ err = -1;
+ }
+ else {
+ err = slp_switch();
+ }
+
+ if (err < 0) { /* error */
+ // Tested by
+ // test_greenlet.TestBrokenGreenlets.test_failed_to_slp_switch_into_running
+ //
+ // It's not clear if it's worth trying to clean up and
+ // continue here. Failing to switch stacks is a big deal which
+ // may not be recoverable (who knows what state the stack is in).
+ // Also, we've stolen references in preparation for calling
+ // ``g_switchstack_success()`` and we don't have a clean
+ // mechanism for backing that all out.
+ Py_FatalError("greenlet: Failed low-level slp_switch(). The stack is probably corrupt.");
+ }
+
+ // No stack-based variables are valid anymore.
+
+ // But the global is volatile so we can reload it without the
+ // compiler caching it from earlier.
+ Greenlet* greenlet_that_switched_in = switching_thread_state; // aka this
+ switching_thread_state = nullptr;
+ // except that no stack variables are valid, we would:
+ // assert(this == greenlet_that_switched_in);
+
+ // switchstack success is where we restore the exception state,
+ // etc. It returns the origin greenlet because its convenient.
+
+ OwnedGreenlet origin = greenlet_that_switched_in->g_switchstack_success();
+ assert(greenlet_that_switched_in->args() || PyErr_Occurred());
+ return switchstack_result_t(err, greenlet_that_switched_in, origin);
+}
+
+
+inline void
+Greenlet::check_switch_allowed() const
+{
+ // TODO: Make this take a parameter of the current greenlet,
+ // or current main greenlet, to make the check for
+ // cross-thread switching cheaper. Surely somewhere up the
+ // call stack we've already accessed the thread local variable.
+
+ // We expect to always have a main greenlet now; accessing the thread state
+ // created it. However, if we get here and cleanup has already
+ // begun because we're a greenlet that was running in a
+ // (now dead) thread, these invariants will not hold true. In
+ // fact, accessing `this->thread_state` may not even be possible.
+
+ // If the thread this greenlet was running in is dead,
+ // we'll still have a reference to a main greenlet, but the
+ // thread state pointer we have is bogus.
+ // TODO: Give the objects an API to determine if they belong
+ // to a dead thread.
+
+ const BorrowedMainGreenlet main_greenlet = this->find_main_greenlet_in_lineage();
+
+ if (!main_greenlet) {
+ throw PyErrOccurred(mod_globs->PyExc_GreenletError,
+ "cannot switch to a garbage collected greenlet");
+ }
+
+ if (!main_greenlet->thread_state()) {
+ throw PyErrOccurred(mod_globs->PyExc_GreenletError,
+ "cannot switch to a different thread (which happens to have exited)");
+ }
+
+ // The main greenlet we found was from the .parent lineage.
+ // That may or may not have any relationship to the main
+ // greenlet of the running thread. We can't actually access
+ // our this->thread_state members to try to check that,
+ // because it could be in the process of getting destroyed,
+ // but setting the main_greenlet->thread_state member to NULL
+ // may not be visible yet. So we need to check against the
+ // current thread state (once the cheaper checks are out of
+ // the way)
+ const BorrowedMainGreenlet current_main_greenlet = GET_THREAD_STATE().state().borrow_main_greenlet();
+ if (
+ // lineage main greenlet is not this thread's greenlet
+ current_main_greenlet != main_greenlet
+ || (
+ // atteched to some thread
+ this->main_greenlet()
+ // XXX: Same condition as above. Was this supposed to be
+ // this->main_greenlet()?
+ && current_main_greenlet != main_greenlet)
+ // switching into a known dead thread (XXX: which, if we get here,
+ // is bad, because we just accessed the thread state, which is
+ // gone!)
+ || (!current_main_greenlet->thread_state())) {
+ // CAUTION: This may trigger memory allocations, gc, and
+ // arbitrary Python code.
+ throw PyErrOccurred(
+ mod_globs->PyExc_GreenletError,
+ "Cannot switch to a different thread\n\tCurrent: %R\n\tExpected: %R",
+ current_main_greenlet, main_greenlet);
+ }
+}
+
+const OwnedObject
+Greenlet::context() const
+{
+ using greenlet::PythonStateContext;
+ OwnedObject result;
+
+ if (this->is_currently_running_in_some_thread()) {
+ /* Currently running greenlet: context is stored in the thread state,
+ not the greenlet object. */
+ if (GET_THREAD_STATE().state().is_current(this->self())) {
+ result = PythonStateContext::context(PyThreadState_GET());
+ }
+ else {
+ throw ValueError(
+ "cannot get context of a "
+ "greenlet that is running in a different thread");
+ }
+ }
+ else {
+ /* Greenlet is not running: just return context. */
+ result = this->python_state.context();
+ }
+ if (!result) {
+ result = OwnedObject::None();
+ }
+ return result;
+}
+
+
+void
+Greenlet::context(BorrowedObject given)
+{
+ using greenlet::PythonStateContext;
+ if (!given) {
+ throw AttributeError("can't delete context attribute");
+ }
+ if (given.is_None()) {
+ /* "Empty context" is stored as NULL, not None. */
+ given = nullptr;
+ }
+
+ //checks type, incrs refcnt
+ greenlet::refs::OwnedContext context(given);
+ PyThreadState* tstate = PyThreadState_GET();
+
+ if (this->is_currently_running_in_some_thread()) {
+ if (!GET_THREAD_STATE().state().is_current(this->self())) {
+ throw ValueError("cannot set context of a greenlet"
+ " that is running in a different thread");
+ }
+
+ /* Currently running greenlet: context is stored in the thread state,
+ not the greenlet object. */
+ OwnedObject octx = OwnedObject::consuming(PythonStateContext::context(tstate));
+ PythonStateContext::context(tstate, context.relinquish_ownership());
+ }
+ else {
+ /* Greenlet is not running: just set context. Note that the
+ greenlet may be dead.*/
+ this->python_state.context() = context;
+ }
+}
+
+/**
+ * CAUTION: May invoke arbitrary Python code.
+ *
+ * Figure out what the result of ``greenlet.switch(arg, kwargs)``
+ * should be and transfers ownership of it to the left-hand-side.
+ *
+ * If switch() was just passed an arg tuple, then we'll just return that.
+ * If only keyword arguments were passed, then we'll pass the keyword
+ * argument dict. Otherwise, we'll create a tuple of (args, kwargs) and
+ * return both.
+ *
+ * CAUTION: This may allocate a new tuple object, which may
+ * cause the Python garbage collector to run, which in turn may
+ * run arbitrary Python code that switches.
+ */
+OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) noexcept
+{
+ // Because this may invoke arbitrary Python code, which could
+ // result in switching back to us, we need to get the
+ // arguments locally on the stack.
+ assert(rhs);
+ OwnedObject args = rhs.args();
+ OwnedObject kwargs = rhs.kwargs();
+ rhs.CLEAR();
+ // We shouldn't be called twice for the same switch.
+ assert(args || kwargs);
+ assert(!rhs);
+
+ if (!kwargs) {
+ lhs = args;
+ }
+ else if (!PyDict_Size(kwargs.borrow())) {
+ lhs = args;
+ }
+ else if (!PySequence_Length(args.borrow())) {
+ lhs = kwargs;
+ }
+ else {
+ // PyTuple_Pack allocates memory, may GC, may run arbitrary
+ // Python code.
+ lhs = OwnedObject::consuming(PyTuple_Pack(2, args.borrow(), kwargs.borrow()));
+ }
+ return lhs;
+}
+
+static OwnedObject
+g_handle_exit(const OwnedObject& greenlet_result)
+{
+ if (!greenlet_result && mod_globs->PyExc_GreenletExit.PyExceptionMatches()) {
+ /* catch and ignore GreenletExit */
+ PyErrFetchParam val;
+ PyErr_Fetch(PyErrFetchParam(), val, PyErrFetchParam());
+ if (!val) {
+ return OwnedObject::None();
+ }
+ return OwnedObject(val);
+ }
+
+ if (greenlet_result) {
+ // package the result into a 1-tuple
+ // PyTuple_Pack increments the reference of its arguments,
+ // so we always need to decref the greenlet result;
+ // the owner will do that.
+ return OwnedObject::consuming(PyTuple_Pack(1, greenlet_result.borrow()));
+ }
+
+ return OwnedObject();
+}
+
+
+
+/**
+ * May run arbitrary Python code.
+ */
+OwnedObject
+Greenlet::g_switch_finish(const switchstack_result_t& err)
+{
+ assert(err.the_new_current_greenlet == this);
+
+ ThreadState& state = *this->thread_state();
+ // Because calling the trace function could do arbitrary things,
+ // including switching away from this greenlet and then maybe
+ // switching back, we need to capture the arguments now so that
+ // they don't change.
+ OwnedObject result;
+ if (this->args()) {
+ result <<= this->args();
+ }
+ else {
+ assert(PyErr_Occurred());
+ }
+ assert(!this->args());
+ try {
+ // Our only caller handles the bad error case
+ assert(err.status >= 0);
+ assert(state.borrow_current() == this->self());
+ if (OwnedObject tracefunc = state.get_tracefunc()) {
+ assert(result || PyErr_Occurred());
+ g_calltrace(tracefunc,
+ result ? mod_globs->event_switch : mod_globs->event_throw,
+ err.origin_greenlet,
+ this->self());
+ }
+ // The above could have invoked arbitrary Python code, but
+ // it couldn't switch back to this object and *also*
+ // throw an exception, so the args won't have changed.
+
+ if (PyErr_Occurred()) {
+ // We get here if we fell of the end of the run() function
+ // raising an exception. The switch itself was
+ // successful, but the function raised.
+ // valgrind reports that memory allocated here can still
+ // be reached after a test run.
+ throw PyErrOccurred::from_current();
+ }
+ return result;
+ }
+ catch (const PyErrOccurred&) {
+ /* Turn switch errors into switch throws */
+ /* Turn trace errors into switch throws */
+ this->release_args();
+ throw;
+ }
+}
+
+void
+Greenlet::g_calltrace(const OwnedObject& tracefunc,
+ const greenlet::refs::ImmortalEventName& event,
+ const BorrowedGreenlet& origin,
+ const BorrowedGreenlet& target)
+{
+ PyErrPieces saved_exc;
+ try {
+ TracingGuard tracing_guard;
+ // TODO: We have saved the active exception (if any) that's
+ // about to be raised. In the 'throw' case, we could provide
+ // the exception to the tracefunction, which seems very helpful.
+ tracing_guard.CallTraceFunction(tracefunc, event, origin, target);
+ }
+ catch (const PyErrOccurred&) {
+ // In case of exceptions trace function is removed,
+ // and any existing exception is replaced with the tracing
+ // exception.
+ GET_THREAD_STATE().state().set_tracefunc(Py_None);
+ throw;
+ }
+
+ saved_exc.PyErrRestore();
+ assert(
+ (event == mod_globs->event_throw && PyErr_Occurred())
+ || (event == mod_globs->event_switch && !PyErr_Occurred())
+ );
+}
+
+void
+Greenlet::murder_in_place()
+{
+ if (this->active()) {
+ assert(!this->is_currently_running_in_some_thread());
+ this->deactivate_and_free();
+ }
+}
+
+inline void
+Greenlet::deactivate_and_free()
+{
+ if (!this->active()) {
+ return;
+ }
+ // Throw away any saved stack.
+ this->stack_state = StackState();
+ assert(!this->stack_state.active());
+ // Throw away any Python references.
+ // We're holding a borrowed reference to the last
+ // frame we executed. Since we borrowed it, the
+ // normal traversal, clear, and dealloc functions
+ // ignore it, meaning it leaks. (The thread state
+ // object can't find it to clear it when that's
+ // deallocated either, because by definition if we
+ // got an object on this list, it wasn't
+ // running and the thread state doesn't have
+ // this frame.)
+ // So here, we *do* clear it.
+ this->python_state.tp_clear(true);
+}
+
+bool
+Greenlet::belongs_to_thread(const ThreadState* thread_state) const
+{
+ if (!this->thread_state() // not running anywhere, or thread
+ // exited
+ || !thread_state) { // same, or there is no thread state.
+ return false;
+ }
+ return true;
+}
+
+
+void
+Greenlet::deallocing_greenlet_in_thread(const ThreadState* current_thread_state)
+{
+ /* Cannot raise an exception to kill the greenlet if
+ it is not running in the same thread! */
+ if (this->belongs_to_thread(current_thread_state)) {
+ assert(current_thread_state);
+ // To get here it had to have run before
+ /* Send the greenlet a GreenletExit exception. */
+
+ // We don't care about the return value, only whether an
+ // exception happened.
+ this->throw_GreenletExit_during_dealloc(*current_thread_state);
+ return;
+ }
+
+ // Not the same thread! Temporarily save the greenlet
+ // into its thread's deleteme list, *if* it exists.
+ // If that thread has already exited, and processed its pending
+ // cleanup, we'll never be able to clean everything up: we won't
+ // be able to raise an exception.
+ // That's mostly OK! Since we can't add it to a list, our refcount
+ // won't increase, and we'll go ahead with the DECREFs later.
+
+ ThreadState *const thread_state = this->thread_state();
+ if (thread_state) {
+ thread_state->delete_when_thread_running(this->self());
+ }
+ else {
+ // The thread is dead, we can't raise an exception.
+ // We need to make it look non-active, though, so that dealloc
+ // finishes killing it.
+ this->deactivate_and_free();
+ }
+ return;
+}
+
+
+int
+Greenlet::tp_traverse(visitproc visit, void* arg)
+{
+
+ int result;
+ if ((result = this->exception_state.tp_traverse(visit, arg)) != 0) {
+ return result;
+ }
+ //XXX: This is ugly. But so is handling everything having to do
+ //with the top frame.
+ bool visit_top_frame = this->was_running_in_dead_thread();
+ // When true, the thread is dead. Our implicit weak reference to the
+ // frame is now all that's left; we consider ourselves to
+ // strongly own it now.
+ if ((result = this->python_state.tp_traverse(visit, arg, visit_top_frame)) != 0) {
+ return result;
+ }
+ return 0;
+}
+
+int
+Greenlet::tp_clear()
+{
+ bool own_top_frame = this->was_running_in_dead_thread();
+ this->exception_state.tp_clear();
+ this->python_state.tp_clear(own_top_frame);
+ return 0;
+}
+
+bool Greenlet::is_currently_running_in_some_thread() const
+{
+ return this->stack_state.active() && !this->python_state.top_frame();
+}
+
+#if GREENLET_PY312
+void GREENLET_NOINLINE(Greenlet::expose_frames)()
+{
+ if (!this->python_state.top_frame()) {
+ return;
+ }
+
+ _PyInterpreterFrame* last_complete_iframe = nullptr;
+ _PyInterpreterFrame* iframe = this->python_state.top_frame()->f_frame;
+ while (iframe) {
+ // We must make a copy before looking at the iframe contents,
+ // since iframe might point to a portion of the greenlet's C stack
+ // that was spilled when switching greenlets.
+ _PyInterpreterFrame iframe_copy;
+ this->stack_state.copy_from_stack(&iframe_copy, iframe, sizeof(*iframe));
+ if (!_PyFrame_IsIncomplete(&iframe_copy)) {
+ // If the iframe were OWNED_BY_CSTACK then it would always be
+ // incomplete. Since it's not incomplete, it's not on the C stack
+ // and we can access it through the original `iframe` pointer
+ // directly. This is important since GetFrameObject might
+ // lazily _create_ the frame object and we don't want the
+ // interpreter to lose track of it.
+ assert(iframe_copy.owner != FRAME_OWNED_BY_CSTACK);
+
+ // We really want to just write:
+ // PyFrameObject* frame = _PyFrame_GetFrameObject(iframe);
+ // but _PyFrame_GetFrameObject calls _PyFrame_MakeAndSetFrameObject
+ // which is not a visible symbol in libpython. The easiest
+ // way to get a public function to call it is using
+ // PyFrame_GetBack, which is defined as follows:
+ // assert(frame != NULL);
+ // assert(!_PyFrame_IsIncomplete(frame->f_frame));
+ // PyFrameObject *back = frame->f_back;
+ // if (back == NULL) {
+ // _PyInterpreterFrame *prev = frame->f_frame->previous;
+ // prev = _PyFrame_GetFirstComplete(prev);
+ // if (prev) {
+ // back = _PyFrame_GetFrameObject(prev);
+ // }
+ // }
+ // return (PyFrameObject*)Py_XNewRef(back);
+ if (!iframe->frame_obj) {
+ PyFrameObject dummy_frame;
+ _PyInterpreterFrame dummy_iframe;
+ dummy_frame.f_back = nullptr;
+ dummy_frame.f_frame = &dummy_iframe;
+ // force the iframe to be considered complete without
+ // needing to check its code object:
+ dummy_iframe.owner = FRAME_OWNED_BY_GENERATOR;
+ dummy_iframe.previous = iframe;
+ assert(!_PyFrame_IsIncomplete(&dummy_iframe));
+ // Drop the returned reference immediately; the iframe
+ // continues to hold a strong reference
+ Py_XDECREF(PyFrame_GetBack(&dummy_frame));
+ assert(iframe->frame_obj);
+ }
+
+ // This is a complete frame, so make the last one of those we saw
+ // point at it, bypassing any incomplete frames (which may have
+ // been on the C stack) in between the two. We're overwriting
+ // last_complete_iframe->previous and need that to be reversible,
+ // so we store the original previous ptr in the frame object
+ // (which we must have created on a previous iteration through
+ // this loop). The frame object has a bunch of storage that is
+ // only used when its iframe is OWNED_BY_FRAME_OBJECT, which only
+ // occurs when the frame object outlives the frame's execution,
+ // which can't have happened yet because the frame is currently
+ // executing as far as the interpreter is concerned. So, we can
+ // reuse it for our own purposes.
+ assert(iframe->owner == FRAME_OWNED_BY_THREAD
+ || iframe->owner == FRAME_OWNED_BY_GENERATOR);
+ if (last_complete_iframe) {
+ assert(last_complete_iframe->frame_obj);
+ memcpy(&last_complete_iframe->frame_obj->_f_frame_data[0],
+ &last_complete_iframe->previous, sizeof(void *));
+ last_complete_iframe->previous = iframe;
+ }
+ last_complete_iframe = iframe;
+ }
+ // Frames that are OWNED_BY_FRAME_OBJECT are linked via the
+ // frame's f_back while all others are linked via the iframe's
+ // previous ptr. Since all the frames we traverse are running
+ // as far as the interpreter is concerned, we don't have to
+ // worry about the OWNED_BY_FRAME_OBJECT case.
+ iframe = iframe_copy.previous;
+ }
+
+ // Give the outermost complete iframe a null previous pointer to
+ // account for any potential incomplete/C-stack iframes between it
+ // and the actual top-of-stack
+ if (last_complete_iframe) {
+ assert(last_complete_iframe->frame_obj);
+ memcpy(&last_complete_iframe->frame_obj->_f_frame_data[0],
+ &last_complete_iframe->previous, sizeof(void *));
+ last_complete_iframe->previous = nullptr;
+ }
+}
+#else
+void Greenlet::expose_frames()
+{
+
+}
+#endif
+
+}; // namespace greenlet
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TGreenlet.hpp b/tapdown/lib/python3.11/site-packages/greenlet/TGreenlet.hpp
new file mode 100644
index 0000000..e152353
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TGreenlet.hpp
@@ -0,0 +1,830 @@
+#ifndef GREENLET_GREENLET_HPP
+#define GREENLET_GREENLET_HPP
+/*
+ * Declarations of the core data structures.
+*/
+
+#define PY_SSIZE_T_CLEAN
+#include
+
+#include "greenlet_compiler_compat.hpp"
+#include "greenlet_refs.hpp"
+#include "greenlet_cpython_compat.hpp"
+#include "greenlet_allocator.hpp"
+
+using greenlet::refs::OwnedObject;
+using greenlet::refs::OwnedGreenlet;
+using greenlet::refs::OwnedMainGreenlet;
+using greenlet::refs::BorrowedGreenlet;
+
+#if PY_VERSION_HEX < 0x30B00A6
+# define _PyCFrame CFrame
+# define _PyInterpreterFrame _interpreter_frame
+#endif
+
+#if GREENLET_PY312
+# define Py_BUILD_CORE
+# include "internal/pycore_frame.h"
+#endif
+
+#if GREENLET_PY314
+# include "internal/pycore_interpframe_structs.h"
+#if defined(_MSC_VER) || defined(__MINGW64__)
+# include "greenlet_msvc_compat.hpp"
+#else
+# include "internal/pycore_interpframe.h"
+#endif
+#endif
+
+// XXX: TODO: Work to remove all virtual functions
+// for speed of calling and size of objects (no vtable).
+// One pattern is the Curiously Recurring Template
+namespace greenlet
+{
+ class ExceptionState
+ {
+ private:
+ G_NO_COPIES_OF_CLS(ExceptionState);
+
+ // Even though these are borrowed objects, we actually own
+ // them, when they're not null.
+ // XXX: Express that in the API.
+ private:
+ _PyErr_StackItem* exc_info;
+ _PyErr_StackItem exc_state;
+ public:
+ ExceptionState();
+ void operator<<(const PyThreadState *const tstate) noexcept;
+ void operator>>(PyThreadState* tstate) noexcept;
+ void clear() noexcept;
+
+ int tp_traverse(visitproc visit, void* arg) noexcept;
+ void tp_clear() noexcept;
+ };
+
+ template
+ void operator<<(const PyThreadState *const tstate, T& exc);
+
+ class PythonStateContext
+ {
+ protected:
+ greenlet::refs::OwnedContext _context;
+ public:
+ inline const greenlet::refs::OwnedContext& context() const
+ {
+ return this->_context;
+ }
+ inline greenlet::refs::OwnedContext& context()
+ {
+ return this->_context;
+ }
+
+ inline void tp_clear()
+ {
+ this->_context.CLEAR();
+ }
+
+ template
+ inline static PyObject* context(T* tstate)
+ {
+ return tstate->context;
+ }
+
+ template
+ inline static void context(T* tstate, PyObject* new_context)
+ {
+ tstate->context = new_context;
+ tstate->context_ver++;
+ }
+ };
+ class SwitchingArgs;
+ class PythonState : public PythonStateContext
+ {
+ public:
+ typedef greenlet::refs::OwnedReference OwnedFrame;
+ private:
+ G_NO_COPIES_OF_CLS(PythonState);
+ // We own this if we're suspended (although currently we don't
+ // tp_traverse into it; that's a TODO). If we're running, it's
+ // empty. If we get deallocated and *still* have a frame, it
+ // won't be reachable from the place that normally decref's
+ // it, so we need to do it (hence owning it).
+ OwnedFrame _top_frame;
+#if GREENLET_USE_CFRAME
+ _PyCFrame* cframe;
+ int use_tracing;
+#endif
+#if GREENLET_PY314
+ int py_recursion_depth;
+ // I think this is only used by the JIT. At least,
+ // we only got errors not switching it when the JIT was enabled.
+ // Python/generated_cases.c.h:12469: _PyEval_EvalFrameDefault:
+ // Assertion `tstate->current_executor == NULL' failed.
+ // see https://github.com/python-greenlet/greenlet/issues/460
+ PyObject* current_executor;
+#elif GREENLET_PY312
+ int py_recursion_depth;
+ int c_recursion_depth;
+#else
+ int recursion_depth;
+#endif
+#if GREENLET_PY313
+ PyObject *delete_later;
+#else
+ int trash_delete_nesting;
+#endif
+#if GREENLET_PY311
+ _PyInterpreterFrame* current_frame;
+ _PyStackChunk* datastack_chunk;
+ PyObject** datastack_top;
+ PyObject** datastack_limit;
+#endif
+ // The PyInterpreterFrame list on 3.12+ contains some entries that are
+ // on the C stack, which can't be directly accessed while a greenlet is
+ // suspended. In order to keep greenlet gr_frame introspection working,
+ // we adjust stack switching to rewrite the interpreter frame list
+ // to skip these C-stack frames; we call this "exposing" the greenlet's
+ // frames because it makes them valid to work with in Python. Then when
+ // the greenlet is resumed we need to remember to reverse the operation
+ // we did. The C-stack frames are "entry frames" which are a low-level
+ // interpreter detail; they're not needed for introspection, but do
+ // need to be present for the eval loop to work.
+ void unexpose_frames();
+
+ public:
+
+ PythonState();
+ // You can use this for testing whether we have a frame
+ // or not. It returns const so they can't modify it.
+ const OwnedFrame& top_frame() const noexcept;
+
+ inline void operator<<(const PyThreadState *const tstate) noexcept;
+ inline void operator>>(PyThreadState* tstate) noexcept;
+ void clear() noexcept;
+
+ int tp_traverse(visitproc visit, void* arg, bool visit_top_frame) noexcept;
+ void tp_clear(bool own_top_frame) noexcept;
+ void set_initial_state(const PyThreadState* const tstate) noexcept;
+#if GREENLET_USE_CFRAME
+ void set_new_cframe(_PyCFrame& frame) noexcept;
+#endif
+
+ void may_switch_away() noexcept;
+ inline void will_switch_from(PyThreadState *const origin_tstate) noexcept;
+ void did_finish(PyThreadState* tstate) noexcept;
+ };
+
+ class StackState
+ {
+ // By having only plain C (POD) members, no virtual functions
+ // or bases, we get a trivial assignment operator generated
+ // for us. However, that's not safe since we do manage memory.
+ // So we declare an assignment operator that only works if we
+ // don't have any memory allocated. (We don't use
+ // std::shared_ptr for reference counting just to keep this
+ // object small)
+ private:
+ char* _stack_start;
+ char* stack_stop;
+ char* stack_copy;
+ intptr_t _stack_saved;
+ StackState* stack_prev;
+ inline int copy_stack_to_heap_up_to(const char* const stop) noexcept;
+ inline void free_stack_copy() noexcept;
+
+ public:
+ /**
+ * Creates a started, but inactive, state, using *current*
+ * as the previous.
+ */
+ StackState(void* mark, StackState& current);
+ /**
+ * Creates an inactive, unstarted, state.
+ */
+ StackState();
+ ~StackState();
+ StackState(const StackState& other);
+ StackState& operator=(const StackState& other);
+ inline void copy_heap_to_stack(const StackState& current) noexcept;
+ inline int copy_stack_to_heap(char* const stackref, const StackState& current) noexcept;
+ inline bool started() const noexcept;
+ inline bool main() const noexcept;
+ inline bool active() const noexcept;
+ inline void set_active() noexcept;
+ inline void set_inactive() noexcept;
+ inline intptr_t stack_saved() const noexcept;
+ inline char* stack_start() const noexcept;
+ static inline StackState make_main() noexcept;
+#ifdef GREENLET_USE_STDIO
+ friend std::ostream& operator<<(std::ostream& os, const StackState& s);
+#endif
+
+ // Fill in [dest, dest + n) with the values that would be at
+ // [src, src + n) while this greenlet is running. This is like memcpy
+ // except that if the greenlet is suspended it accounts for the portion
+ // of the greenlet's stack that was spilled to the heap. `src` may
+ // be on this greenlet's stack, or on the heap, but not on a different
+ // greenlet's stack.
+ void copy_from_stack(void* dest, const void* src, size_t n) const;
+ };
+#ifdef GREENLET_USE_STDIO
+ std::ostream& operator<<(std::ostream& os, const StackState& s);
+#endif
+
+ class SwitchingArgs
+ {
+ private:
+ G_NO_ASSIGNMENT_OF_CLS(SwitchingArgs);
+ // If args and kwargs are both false (NULL), this is a *throw*, not a
+ // switch. PyErr_... must have been called already.
+ OwnedObject _args;
+ OwnedObject _kwargs;
+ public:
+
+ SwitchingArgs()
+ {}
+
+ SwitchingArgs(const OwnedObject& args, const OwnedObject& kwargs)
+ : _args(args),
+ _kwargs(kwargs)
+ {}
+
+ SwitchingArgs(const SwitchingArgs& other)
+ : _args(other._args),
+ _kwargs(other._kwargs)
+ {}
+
+ const OwnedObject& args()
+ {
+ return this->_args;
+ }
+
+ const OwnedObject& kwargs()
+ {
+ return this->_kwargs;
+ }
+
+ /**
+ * Moves ownership from the argument to this object.
+ */
+ SwitchingArgs& operator<<=(SwitchingArgs& other)
+ {
+ if (this != &other) {
+ this->_args = other._args;
+ this->_kwargs = other._kwargs;
+ other.CLEAR();
+ }
+ return *this;
+ }
+
+ /**
+ * Acquires ownership of the argument (consumes the reference).
+ */
+ SwitchingArgs& operator<<=(PyObject* args)
+ {
+ this->_args = OwnedObject::consuming(args);
+ this->_kwargs.CLEAR();
+ return *this;
+ }
+
+ /**
+ * Acquires ownership of the argument.
+ *
+ * Sets the args to be the given value; clears the kwargs.
+ */
+ SwitchingArgs& operator<<=(OwnedObject& args)
+ {
+ assert(&args != &this->_args);
+ this->_args = args;
+ this->_kwargs.CLEAR();
+ args.CLEAR();
+
+ return *this;
+ }
+
+ explicit operator bool() const noexcept
+ {
+ return this->_args || this->_kwargs;
+ }
+
+ inline void CLEAR()
+ {
+ this->_args.CLEAR();
+ this->_kwargs.CLEAR();
+ }
+
+ const std::string as_str() const noexcept
+ {
+ return PyUnicode_AsUTF8(
+ OwnedObject::consuming(
+ PyUnicode_FromFormat(
+ "SwitchingArgs(args=%R, kwargs=%R)",
+ this->_args.borrow(),
+ this->_kwargs.borrow()
+ )
+ ).borrow()
+ );
+ }
+ };
+
+ class ThreadState;
+
+ class UserGreenlet;
+ class MainGreenlet;
+
+ class Greenlet
+ {
+ private:
+ G_NO_COPIES_OF_CLS(Greenlet);
+ PyGreenlet* const _self;
+ private:
+ // XXX: Work to remove these.
+ friend class ThreadState;
+ friend class UserGreenlet;
+ friend class MainGreenlet;
+ protected:
+ ExceptionState exception_state;
+ SwitchingArgs switch_args;
+ StackState stack_state;
+ PythonState python_state;
+ Greenlet(PyGreenlet* p, const StackState& initial_state);
+ public:
+ // This constructor takes ownership of the PyGreenlet, by
+ // setting ``p->pimpl = this;``.
+ Greenlet(PyGreenlet* p);
+ virtual ~Greenlet();
+
+ const OwnedObject context() const;
+
+ // You MUST call this _very_ early in the switching process to
+ // prepare anything that may need prepared. This might perform
+ // garbage collections or otherwise run arbitrary Python code.
+ //
+ // One specific use of it is for Python 3.11+, preventing
+ // running arbitrary code at unsafe times. See
+ // PythonState::may_switch_away().
+ inline void may_switch_away()
+ {
+ this->python_state.may_switch_away();
+ }
+
+ inline void context(refs::BorrowedObject new_context);
+
+ inline SwitchingArgs& args()
+ {
+ return this->switch_args;
+ }
+
+ virtual const refs::BorrowedMainGreenlet main_greenlet() const = 0;
+
+ inline intptr_t stack_saved() const noexcept
+ {
+ return this->stack_state.stack_saved();
+ }
+
+ // This is used by the macro SLP_SAVE_STATE to compute the
+ // difference in stack sizes. It might be nice to handle the
+ // computation ourself, but the type of the result
+ // varies by platform, so doing it in the macro is the
+ // simplest way.
+ inline const char* stack_start() const noexcept
+ {
+ return this->stack_state.stack_start();
+ }
+
+ virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state);
+ virtual OwnedObject g_switch() = 0;
+ /**
+ * Force the greenlet to appear dead. Used when it's not
+ * possible to throw an exception into a greenlet anymore.
+ *
+ * This losses access to the thread state and the main greenlet.
+ */
+ virtual void murder_in_place();
+
+ /**
+ * Called when somebody notices we were running in a dead
+ * thread to allow cleaning up resources (because we can't
+ * raise GreenletExit into it anymore).
+ * This is very similar to ``murder_in_place()``, except that
+ * it DOES NOT lose the main greenlet or thread state.
+ */
+ inline void deactivate_and_free();
+
+
+ // Called when some thread wants to deallocate a greenlet
+ // object.
+ // The thread may or may not be the same thread the greenlet
+ // was running in.
+ // The thread state will be null if the thread the greenlet
+ // was running in was known to have exited.
+ void deallocing_greenlet_in_thread(const ThreadState* current_state);
+
+ // Must be called on 3.12+ before exposing a suspended greenlet's
+ // frames to user code. This rewrites the linked list of interpreter
+ // frames to skip the ones that are being stored on the C stack (which
+ // can't be safely accessed while the greenlet is suspended because
+ // that stack space might be hosting a different greenlet), and
+ // sets PythonState::frames_were_exposed so we remember to restore
+ // the original list before resuming the greenlet. The C-stack frames
+ // are a low-level interpreter implementation detail; while they're
+ // important to the bytecode eval loop, they're superfluous for
+ // introspection purposes.
+ void expose_frames();
+
+
+ // TODO: Figure out how to make these non-public.
+ inline void slp_restore_state() noexcept;
+ inline int slp_save_state(char *const stackref) noexcept;
+
+ inline bool is_currently_running_in_some_thread() const;
+ virtual bool belongs_to_thread(const ThreadState* state) const;
+
+ inline bool started() const
+ {
+ return this->stack_state.started();
+ }
+ inline bool active() const
+ {
+ return this->stack_state.active();
+ }
+ inline bool main() const
+ {
+ return this->stack_state.main();
+ }
+ virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const = 0;
+
+ virtual const OwnedGreenlet parent() const = 0;
+ virtual void parent(const refs::BorrowedObject new_parent) = 0;
+
+ inline const PythonState::OwnedFrame& top_frame()
+ {
+ return this->python_state.top_frame();
+ }
+
+ virtual const OwnedObject& run() const = 0;
+ virtual void run(const refs::BorrowedObject nrun) = 0;
+
+
+ virtual int tp_traverse(visitproc visit, void* arg);
+ virtual int tp_clear();
+
+
+ // Return the thread state that the greenlet is running in, or
+ // null if the greenlet is not running or the thread is known
+ // to have exited.
+ virtual ThreadState* thread_state() const noexcept = 0;
+
+ // Return true if the greenlet is known to have been running
+ // (active) in a thread that has now exited.
+ virtual bool was_running_in_dead_thread() const noexcept = 0;
+
+ // Return a borrowed greenlet that is the Python object
+ // this object represents.
+ inline BorrowedGreenlet self() const noexcept
+ {
+ return BorrowedGreenlet(this->_self);
+ }
+
+ // For testing. If this returns true, we should pretend that
+ // slp_switch() failed.
+ virtual bool force_slp_switch_error() const noexcept;
+
+ protected:
+ inline void release_args();
+
+ // The functions that must not be inlined are declared virtual.
+ // We also mark them as protected, not private, so that the
+ // compiler is forced to call them through a function pointer.
+ // (A sufficiently smart compiler could directly call a private
+ // virtual function since it can never be overridden in a
+ // subclass).
+
+ // Also TODO: Switch away from integer error codes and to enums,
+ // or throw exceptions when possible.
+ struct switchstack_result_t
+ {
+ int status;
+ Greenlet* the_new_current_greenlet;
+ OwnedGreenlet origin_greenlet;
+
+ switchstack_result_t()
+ : status(0),
+ the_new_current_greenlet(nullptr)
+ {}
+
+ switchstack_result_t(int err)
+ : status(err),
+ the_new_current_greenlet(nullptr)
+ {}
+
+ switchstack_result_t(int err, Greenlet* state, OwnedGreenlet& origin)
+ : status(err),
+ the_new_current_greenlet(state),
+ origin_greenlet(origin)
+ {
+ }
+
+ switchstack_result_t(int err, Greenlet* state, const BorrowedGreenlet& origin)
+ : status(err),
+ the_new_current_greenlet(state),
+ origin_greenlet(origin)
+ {
+ }
+
+ switchstack_result_t(const switchstack_result_t& other)
+ : status(other.status),
+ the_new_current_greenlet(other.the_new_current_greenlet),
+ origin_greenlet(other.origin_greenlet)
+ {}
+
+ switchstack_result_t& operator=(const switchstack_result_t& other)
+ {
+ this->status = other.status;
+ this->the_new_current_greenlet = other.the_new_current_greenlet;
+ this->origin_greenlet = other.origin_greenlet;
+ return *this;
+ }
+ };
+
+ OwnedObject on_switchstack_or_initialstub_failure(
+ Greenlet* target,
+ const switchstack_result_t& err,
+ const bool target_was_me=false,
+ const bool was_initial_stub=false);
+
+ // Returns the previous greenlet we just switched away from.
+ virtual OwnedGreenlet g_switchstack_success() noexcept;
+
+
+ // Check the preconditions for switching to this greenlet; if they
+ // aren't met, throws PyErrOccurred. Most callers will want to
+ // catch this and clear the arguments
+ inline void check_switch_allowed() const;
+ class GreenletStartedWhileInPython : public std::runtime_error
+ {
+ public:
+ GreenletStartedWhileInPython() : std::runtime_error("")
+ {}
+ };
+
+ protected:
+
+
+ /**
+ Perform a stack switch into this greenlet.
+
+ This temporarily sets the global variable
+ ``switching_thread_state`` to this greenlet; as soon as the
+ call to ``slp_switch`` completes, this is reset to NULL.
+ Consequently, this depends on the GIL.
+
+ TODO: Adopt the stackman model and pass ``slp_switch`` a
+ callback function and context pointer; this eliminates the
+ need for global variables altogether.
+
+ Because the stack switch happens in this function, this
+ function can't use its own stack (local) variables, set
+ before the switch, and then accessed after the switch.
+
+ Further, you con't even access ``g_thread_state_global``
+ before and after the switch from the global variable.
+ Because it is thread local some compilers cache it in a
+ register/on the stack, notably new versions of MSVC; this
+ breaks with strange crashes sometime later, because writing
+ to anything in ``g_thread_state_global`` after the switch
+ is actually writing to random memory. For this reason, we
+ call a non-inlined function to finish the operation. (XXX:
+ The ``/GT`` MSVC compiler argument probably fixes that.)
+
+ It is very important that stack switch is 'atomic', i.e. no
+ calls into other Python code allowed (except very few that
+ are safe), because global variables are very fragile. (This
+ should no longer be the case with thread-local variables.)
+
+ */
+ // Made virtual to facilitate subclassing UserGreenlet for testing.
+ virtual switchstack_result_t g_switchstack(void);
+
+class TracingGuard
+{
+private:
+ PyThreadState* tstate;
+public:
+ TracingGuard()
+ : tstate(PyThreadState_GET())
+ {
+ PyThreadState_EnterTracing(this->tstate);
+ }
+
+ ~TracingGuard()
+ {
+ PyThreadState_LeaveTracing(this->tstate);
+ this->tstate = nullptr;
+ }
+
+ inline void CallTraceFunction(const OwnedObject& tracefunc,
+ const greenlet::refs::ImmortalEventName& event,
+ const BorrowedGreenlet& origin,
+ const BorrowedGreenlet& target)
+ {
+ // TODO: This calls tracefunc(event, (origin, target)). Add a shortcut
+ // function for that that's specialized to avoid the Py_BuildValue
+ // string parsing, or start with just using "ON" format with PyTuple_Pack(2,
+ // origin, target). That seems like what the N format is meant
+ // for.
+ // XXX: Why does event not automatically cast back to a PyObject?
+ // It tries to call the "deleted constructor ImmortalEventName
+ // const" instead.
+ assert(tracefunc);
+ assert(event);
+ assert(origin);
+ assert(target);
+ greenlet::refs::NewReference retval(
+ PyObject_CallFunction(
+ tracefunc.borrow(),
+ "O(OO)",
+ event.borrow(),
+ origin.borrow(),
+ target.borrow()
+ ));
+ if (!retval) {
+ throw PyErrOccurred::from_current();
+ }
+ }
+};
+
+ static void
+ g_calltrace(const OwnedObject& tracefunc,
+ const greenlet::refs::ImmortalEventName& event,
+ const greenlet::refs::BorrowedGreenlet& origin,
+ const BorrowedGreenlet& target);
+ private:
+ OwnedObject g_switch_finish(const switchstack_result_t& err);
+
+ };
+
+ class UserGreenlet : public Greenlet
+ {
+ private:
+ static greenlet::PythonAllocator allocator;
+ OwnedMainGreenlet _main_greenlet;
+ OwnedObject _run_callable;
+ OwnedGreenlet _parent;
+ public:
+ static void* operator new(size_t UNUSED(count));
+ static void operator delete(void* ptr);
+
+ UserGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent);
+ virtual ~UserGreenlet();
+
+ virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const;
+ virtual bool was_running_in_dead_thread() const noexcept;
+ virtual ThreadState* thread_state() const noexcept;
+ virtual OwnedObject g_switch();
+ virtual const OwnedObject& run() const
+ {
+ if (this->started() || !this->_run_callable) {
+ throw AttributeError("run");
+ }
+ return this->_run_callable;
+ }
+ virtual void run(const refs::BorrowedObject nrun);
+
+ virtual const OwnedGreenlet parent() const;
+ virtual void parent(const refs::BorrowedObject new_parent);
+
+ virtual const refs::BorrowedMainGreenlet main_greenlet() const;
+
+ virtual void murder_in_place();
+ virtual bool belongs_to_thread(const ThreadState* state) const;
+ virtual int tp_traverse(visitproc visit, void* arg);
+ virtual int tp_clear();
+ class ParentIsCurrentGuard
+ {
+ private:
+ OwnedGreenlet oldparent;
+ UserGreenlet* greenlet;
+ G_NO_COPIES_OF_CLS(ParentIsCurrentGuard);
+ public:
+ ParentIsCurrentGuard(UserGreenlet* p, const ThreadState& thread_state);
+ ~ParentIsCurrentGuard();
+ };
+ virtual OwnedObject throw_GreenletExit_during_dealloc(const ThreadState& current_thread_state);
+ protected:
+ virtual switchstack_result_t g_initialstub(void* mark);
+ private:
+ // This function isn't meant to return.
+ // This accepts raw pointers and the ownership of them at the
+ // same time. The caller should use ``inner_bootstrap(origin.relinquish_ownership())``.
+ void inner_bootstrap(PyGreenlet* origin_greenlet, PyObject* run);
+ };
+
+ class BrokenGreenlet : public UserGreenlet
+ {
+ private:
+ static greenlet::PythonAllocator allocator;
+ public:
+ bool _force_switch_error = false;
+ bool _force_slp_switch_error = false;
+
+ static void* operator new(size_t UNUSED(count));
+ static void operator delete(void* ptr);
+ BrokenGreenlet(PyGreenlet* p, BorrowedGreenlet the_parent)
+ : UserGreenlet(p, the_parent)
+ {}
+ virtual ~BrokenGreenlet()
+ {}
+
+ virtual switchstack_result_t g_switchstack(void);
+ virtual bool force_slp_switch_error() const noexcept;
+
+ };
+
+ class MainGreenlet : public Greenlet
+ {
+ private:
+ static greenlet::PythonAllocator allocator;
+ refs::BorrowedMainGreenlet _self;
+ ThreadState* _thread_state;
+ G_NO_COPIES_OF_CLS(MainGreenlet);
+ public:
+ static void* operator new(size_t UNUSED(count));
+ static void operator delete(void* ptr);
+
+ MainGreenlet(refs::BorrowedMainGreenlet::PyType*, ThreadState*);
+ virtual ~MainGreenlet();
+
+
+ virtual const OwnedObject& run() const;
+ virtual void run(const refs::BorrowedObject nrun);
+
+ virtual const OwnedGreenlet parent() const;
+ virtual void parent(const refs::BorrowedObject new_parent);
+
+ virtual const refs::BorrowedMainGreenlet main_greenlet() const;
+
+ virtual refs::BorrowedMainGreenlet find_main_greenlet_in_lineage() const;
+ virtual bool was_running_in_dead_thread() const noexcept;
+ virtual ThreadState* thread_state() const noexcept;
+ void thread_state(ThreadState*) noexcept;
+ virtual OwnedObject g_switch();
+ virtual int tp_traverse(visitproc visit, void* arg);
+ };
+
+ // Instantiate one on the stack to save the GC state,
+ // and then disable GC. When it goes out of scope, GC will be
+ // restored to its original state. Sadly, these APIs are only
+ // available on 3.10+; luckily, we only need them on 3.11+.
+#if GREENLET_PY310
+ class GCDisabledGuard
+ {
+ private:
+ int was_enabled = 0;
+ public:
+ GCDisabledGuard()
+ : was_enabled(PyGC_IsEnabled())
+ {
+ PyGC_Disable();
+ }
+
+ ~GCDisabledGuard()
+ {
+ if (this->was_enabled) {
+ PyGC_Enable();
+ }
+ }
+ };
+#endif
+
+ OwnedObject& operator<<=(OwnedObject& lhs, greenlet::SwitchingArgs& rhs) noexcept;
+
+ //TODO: Greenlet::g_switch() should call this automatically on its
+ //return value. As it is, the module code is calling it.
+ static inline OwnedObject
+ single_result(const OwnedObject& results)
+ {
+ if (results
+ && PyTuple_Check(results.borrow())
+ && PyTuple_GET_SIZE(results.borrow()) == 1) {
+ PyObject* result = PyTuple_GET_ITEM(results.borrow(), 0);
+ assert(result);
+ return OwnedObject::owning(result);
+ }
+ return results;
+ }
+
+
+ static OwnedObject
+ g_handle_exit(const OwnedObject& greenlet_result);
+
+
+ template
+ void operator<<(const PyThreadState *const lhs, T& rhs)
+ {
+ rhs.operator<<(lhs);
+ }
+
+} // namespace greenlet ;
+
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TGreenletGlobals.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TGreenletGlobals.cpp
new file mode 100644
index 0000000..0087d2f
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TGreenletGlobals.cpp
@@ -0,0 +1,94 @@
+/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
+/**
+ * Implementation of GreenletGlobals.
+ *
+ * Format with:
+ * clang-format -i --style=file src/greenlet/greenlet.c
+ *
+ *
+ * Fix missing braces with:
+ * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
+*/
+#ifndef T_GREENLET_GLOBALS
+#define T_GREENLET_GLOBALS
+
+#include "greenlet_refs.hpp"
+#include "greenlet_exceptions.hpp"
+#include "greenlet_thread_support.hpp"
+#include "greenlet_internal.hpp"
+
+namespace greenlet {
+
+// This encapsulates what were previously module global "constants"
+// established at init time.
+// This is a step towards Python3 style module state that allows
+// reloading.
+//
+// In an earlier iteration of this code, we used placement new to be
+// able to allocate this object statically still, so that references
+// to its members don't incur an extra pointer indirection.
+// But under some scenarios, that could result in crashes at
+// shutdown because apparently the destructor was getting run twice?
+class GreenletGlobals
+{
+
+public:
+ const greenlet::refs::ImmortalEventName event_switch;
+ const greenlet::refs::ImmortalEventName event_throw;
+ const greenlet::refs::ImmortalException PyExc_GreenletError;
+ const greenlet::refs::ImmortalException PyExc_GreenletExit;
+ const greenlet::refs::ImmortalObject empty_tuple;
+ const greenlet::refs::ImmortalObject empty_dict;
+ const greenlet::refs::ImmortalString str_run;
+ Mutex* const thread_states_to_destroy_lock;
+ greenlet::cleanup_queue_t thread_states_to_destroy;
+
+ GreenletGlobals() :
+ event_switch("switch"),
+ event_throw("throw"),
+ PyExc_GreenletError("greenlet.error"),
+ PyExc_GreenletExit("greenlet.GreenletExit", PyExc_BaseException),
+ empty_tuple(Require(PyTuple_New(0))),
+ empty_dict(Require(PyDict_New())),
+ str_run("run"),
+ thread_states_to_destroy_lock(new Mutex())
+ {}
+
+ ~GreenletGlobals()
+ {
+ // This object is (currently) effectively immortal, and not
+ // just because of those placement new tricks; if we try to
+ // deallocate the static object we allocated, and overwrote,
+ // we would be doing so at C++ teardown time, which is after
+ // the final Python GIL is released, and we can't use the API
+ // then.
+ // (The members will still be destructed, but they also don't
+ // do any deallocation.)
+ }
+
+ void queue_to_destroy(ThreadState* ts) const
+ {
+ // we're currently accessed through a static const object,
+ // implicitly marking our members as const, so code can't just
+ // call push_back (or pop_back) without casting away the
+ // const.
+ //
+ // Do that for callers.
+ greenlet::cleanup_queue_t& q = const_cast(this->thread_states_to_destroy);
+ q.push_back(ts);
+ }
+
+ ThreadState* take_next_to_destroy() const
+ {
+ greenlet::cleanup_queue_t& q = const_cast(this->thread_states_to_destroy);
+ ThreadState* result = q.back();
+ q.pop_back();
+ return result;
+ }
+};
+
+}; // namespace greenlet
+
+static const greenlet::GreenletGlobals* mod_globs;
+
+#endif // T_GREENLET_GLOBALS
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TMainGreenlet.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TMainGreenlet.cpp
new file mode 100644
index 0000000..a2a9cfe
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TMainGreenlet.cpp
@@ -0,0 +1,153 @@
+/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
+/**
+ * Implementation of greenlet::MainGreenlet.
+ *
+ * Format with:
+ * clang-format -i --style=file src/greenlet/greenlet.c
+ *
+ *
+ * Fix missing braces with:
+ * clang-tidy src/greenlet/greenlet.c -fix -checks="readability-braces-around-statements"
+*/
+#ifndef T_MAIN_GREENLET_CPP
+#define T_MAIN_GREENLET_CPP
+
+#include "TGreenlet.hpp"
+
+
+
+// Protected by the GIL. Incremented when we create a main greenlet,
+// in a new thread, decremented when it is destroyed.
+static Py_ssize_t G_TOTAL_MAIN_GREENLETS;
+
+namespace greenlet {
+greenlet::PythonAllocator MainGreenlet::allocator;
+
+void* MainGreenlet::operator new(size_t UNUSED(count))
+{
+ return allocator.allocate(1);
+}
+
+
+void MainGreenlet::operator delete(void* ptr)
+{
+ return allocator.deallocate(static_cast(ptr),
+ 1);
+}
+
+
+MainGreenlet::MainGreenlet(PyGreenlet* p, ThreadState* state)
+ : Greenlet(p, StackState::make_main()),
+ _self(p),
+ _thread_state(state)
+{
+ G_TOTAL_MAIN_GREENLETS++;
+}
+
+MainGreenlet::~MainGreenlet()
+{
+ G_TOTAL_MAIN_GREENLETS--;
+ this->tp_clear();
+}
+
+ThreadState*
+MainGreenlet::thread_state() const noexcept
+{
+ return this->_thread_state;
+}
+
+void
+MainGreenlet::thread_state(ThreadState* t) noexcept
+{
+ assert(!t);
+ this->_thread_state = t;
+}
+
+
+const BorrowedMainGreenlet
+MainGreenlet::main_greenlet() const
+{
+ return this->_self;
+}
+
+BorrowedMainGreenlet
+MainGreenlet::find_main_greenlet_in_lineage() const
+{
+ return BorrowedMainGreenlet(this->_self);
+}
+
+bool
+MainGreenlet::was_running_in_dead_thread() const noexcept
+{
+ return !this->_thread_state;
+}
+
+OwnedObject
+MainGreenlet::g_switch()
+{
+ try {
+ this->check_switch_allowed();
+ }
+ catch (const PyErrOccurred&) {
+ this->release_args();
+ throw;
+ }
+
+ switchstack_result_t err = this->g_switchstack();
+ if (err.status < 0) {
+ // XXX: This code path is untested, but it is shared
+ // with the UserGreenlet path that is tested.
+ return this->on_switchstack_or_initialstub_failure(
+ this,
+ err,
+ true, // target was me
+ false // was initial stub
+ );
+ }
+
+ return err.the_new_current_greenlet->g_switch_finish(err);
+}
+
+int
+MainGreenlet::tp_traverse(visitproc visit, void* arg)
+{
+ if (this->_thread_state) {
+ // we've already traversed main, (self), don't do it again.
+ int result = this->_thread_state->tp_traverse(visit, arg, false);
+ if (result) {
+ return result;
+ }
+ }
+ return Greenlet::tp_traverse(visit, arg);
+}
+
+const OwnedObject&
+MainGreenlet::run() const
+{
+ throw AttributeError("Main greenlets do not have a run attribute.");
+}
+
+void
+MainGreenlet::run(const BorrowedObject UNUSED(nrun))
+{
+ throw AttributeError("Main greenlets do not have a run attribute.");
+}
+
+void
+MainGreenlet::parent(const BorrowedObject raw_new_parent)
+{
+ if (!raw_new_parent) {
+ throw AttributeError("can't delete attribute");
+ }
+ throw AttributeError("cannot set the parent of a main greenlet");
+}
+
+const OwnedGreenlet
+MainGreenlet::parent() const
+{
+ return OwnedGreenlet(); // null becomes None
+}
+
+}; // namespace greenlet
+
+#endif
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TPythonState.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TPythonState.cpp
new file mode 100644
index 0000000..8833a80
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TPythonState.cpp
@@ -0,0 +1,406 @@
+#ifndef GREENLET_PYTHON_STATE_CPP
+#define GREENLET_PYTHON_STATE_CPP
+
+#include
+#include "TGreenlet.hpp"
+
+namespace greenlet {
+
+PythonState::PythonState()
+ : _top_frame()
+#if GREENLET_USE_CFRAME
+ ,cframe(nullptr)
+ ,use_tracing(0)
+#endif
+#if GREENLET_PY314
+ ,py_recursion_depth(0)
+ ,current_executor(nullptr)
+#elif GREENLET_PY312
+ ,py_recursion_depth(0)
+ ,c_recursion_depth(0)
+#else
+ ,recursion_depth(0)
+#endif
+#if GREENLET_PY313
+ ,delete_later(nullptr)
+#else
+ ,trash_delete_nesting(0)
+#endif
+#if GREENLET_PY311
+ ,current_frame(nullptr)
+ ,datastack_chunk(nullptr)
+ ,datastack_top(nullptr)
+ ,datastack_limit(nullptr)
+#endif
+{
+#if GREENLET_USE_CFRAME
+ /*
+ The PyThreadState->cframe pointer usually points to memory on
+ the stack, alloceted in a call into PyEval_EvalFrameDefault.
+
+ Initially, before any evaluation begins, it points to the
+ initial PyThreadState object's ``root_cframe`` object, which is
+ statically allocated for the lifetime of the thread.
+
+ A greenlet can last for longer than a call to
+ PyEval_EvalFrameDefault, so we can't set its ``cframe`` pointer
+ to be the current ``PyThreadState->cframe``; nor could we use
+ one from the greenlet parent for the same reason. Yet a further
+ no: we can't allocate one scoped to the greenlet and then
+ destroy it when the greenlet is deallocated, because inside the
+ interpreter the _PyCFrame objects form a linked list, and that too
+ can result in accessing memory beyond its dynamic lifetime (if
+ the greenlet doesn't actually finish before it dies, its entry
+ could still be in the list).
+
+ Using the ``root_cframe`` is problematic, though, because its
+ members are never modified by the interpreter and are set to 0,
+ meaning that its ``use_tracing`` flag is never updated. We don't
+ want to modify that value in the ``root_cframe`` ourself: it
+ *shouldn't* matter much because we should probably never get
+ back to the point where that's the only cframe on the stack;
+ even if it did matter, the major consequence of an incorrect
+ value for ``use_tracing`` is that if its true the interpreter
+ does some extra work --- however, it's just good code hygiene.
+
+ Our solution: before a greenlet runs, after its initial
+ creation, it uses the ``root_cframe`` just to have something to
+ put there. However, once the greenlet is actually switched to
+ for the first time, ``g_initialstub`` (which doesn't actually
+ "return" while the greenlet is running) stores a new _PyCFrame on
+ its local stack, and copies the appropriate values from the
+ currently running _PyCFrame; this is then made the _PyCFrame for the
+ newly-minted greenlet. ``g_initialstub`` then proceeds to call
+ ``glet.run()``, which results in ``PyEval_...`` adding the
+ _PyCFrame to the list. Switches continue as normal. Finally, when
+ the greenlet finishes, the call to ``glet.run()`` returns and
+ the _PyCFrame is taken out of the linked list and the stack value
+ is now unused and free to expire.
+
+ XXX: I think we can do better. If we're deallocing in the same
+ thread, can't we traverse the list and unlink our frame?
+ Can we just keep a reference to the thread state in case we
+ dealloc in another thread? (Is that even possible if we're still
+ running and haven't returned from g_initialstub?)
+ */
+ this->cframe = &PyThreadState_GET()->root_cframe;
+#endif
+}
+
+
+inline void PythonState::may_switch_away() noexcept
+{
+#if GREENLET_PY311
+ // PyThreadState_GetFrame is probably going to have to allocate a
+ // new frame object. That may trigger garbage collection. Because
+ // we call this during the early phases of a switch (it doesn't
+ // matter to which greenlet, as this has a global effect), if a GC
+ // triggers a switch away, two things can happen, both bad:
+ // - We might not get switched back to, halting forward progress.
+ // this is pathological, but possible.
+ // - We might get switched back to with a different set of
+ // arguments or a throw instead of a switch. That would corrupt
+ // our state (specifically, PyErr_Occurred() and this->args()
+ // would no longer agree).
+ //
+ // Thus, when we call this API, we need to have GC disabled.
+ // This method serves as a bottleneck we call when maybe beginning
+ // a switch. In this way, it is always safe -- no risk of GC -- to
+ // use ``_GetFrame()`` whenever we need to, just as it was in
+ // <=3.10 (because subsequent calls will be cached and not
+ // allocate memory).
+
+ GCDisabledGuard no_gc;
+ Py_XDECREF(PyThreadState_GetFrame(PyThreadState_GET()));
+#endif
+}
+
+void PythonState::operator<<(const PyThreadState *const tstate) noexcept
+{
+ this->_context.steal(tstate->context);
+#if GREENLET_USE_CFRAME
+ /*
+ IMPORTANT: ``cframe`` is a pointer into the STACK. Thus, because
+ the call to ``slp_switch()`` changes the contents of the stack,
+ you cannot read from ``ts_current->cframe`` after that call and
+ necessarily get the same values you get from reading it here.
+ Anything you need to restore from now to then must be saved in a
+ global/threadlocal variable (because we can't use stack
+ variables here either). For things that need to persist across
+ the switch, use `will_switch_from`.
+ */
+ this->cframe = tstate->cframe;
+ #if !GREENLET_PY312
+ this->use_tracing = tstate->cframe->use_tracing;
+ #endif
+#endif // GREENLET_USE_CFRAME
+#if GREENLET_PY311
+ #if GREENLET_PY314
+ this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
+ this->current_executor = tstate->current_executor;
+ #elif GREENLET_PY312
+ this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
+ this->c_recursion_depth = Py_C_RECURSION_LIMIT - tstate->c_recursion_remaining;
+ #else // not 312
+ this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining;
+ #endif // GREENLET_PY312
+ #if GREENLET_PY313
+ this->current_frame = tstate->current_frame;
+ #elif GREENLET_USE_CFRAME
+ this->current_frame = tstate->cframe->current_frame;
+ #endif
+ this->datastack_chunk = tstate->datastack_chunk;
+ this->datastack_top = tstate->datastack_top;
+ this->datastack_limit = tstate->datastack_limit;
+
+ PyFrameObject *frame = PyThreadState_GetFrame((PyThreadState *)tstate);
+ Py_XDECREF(frame); // PyThreadState_GetFrame gives us a new
+ // reference.
+ this->_top_frame.steal(frame);
+ #if GREENLET_PY313
+ this->delete_later = Py_XNewRef(tstate->delete_later);
+ #elif GREENLET_PY312
+ this->trash_delete_nesting = tstate->trash.delete_nesting;
+ #else // not 312
+ this->trash_delete_nesting = tstate->trash_delete_nesting;
+ #endif // GREENLET_PY312
+#else // Not 311
+ this->recursion_depth = tstate->recursion_depth;
+ this->_top_frame.steal(tstate->frame);
+ this->trash_delete_nesting = tstate->trash_delete_nesting;
+#endif // GREENLET_PY311
+}
+
+#if GREENLET_PY312
+void GREENLET_NOINLINE(PythonState::unexpose_frames)()
+{
+ if (!this->top_frame()) {
+ return;
+ }
+
+ // See GreenletState::expose_frames() and the comment on frames_were_exposed
+ // for more information about this logic.
+ _PyInterpreterFrame *iframe = this->_top_frame->f_frame;
+ while (iframe != nullptr) {
+ _PyInterpreterFrame *prev_exposed = iframe->previous;
+ assert(iframe->frame_obj);
+ memcpy(&iframe->previous, &iframe->frame_obj->_f_frame_data[0],
+ sizeof(void *));
+ iframe = prev_exposed;
+ }
+}
+#else
+void PythonState::unexpose_frames()
+{}
+#endif
+
+void PythonState::operator>>(PyThreadState *const tstate) noexcept
+{
+ tstate->context = this->_context.relinquish_ownership();
+ /* Incrementing this value invalidates the contextvars cache,
+ which would otherwise remain valid across switches */
+ tstate->context_ver++;
+#if GREENLET_USE_CFRAME
+ tstate->cframe = this->cframe;
+ /*
+ If we were tracing, we need to keep tracing.
+ There should never be the possibility of hitting the
+ root_cframe here. See note above about why we can't
+ just copy this from ``origin->cframe->use_tracing``.
+ */
+ #if !GREENLET_PY312
+ tstate->cframe->use_tracing = this->use_tracing;
+ #endif
+#endif // GREENLET_USE_CFRAME
+#if GREENLET_PY311
+ #if GREENLET_PY314
+ tstate->py_recursion_remaining = tstate->py_recursion_limit - this->py_recursion_depth;
+ tstate->current_executor = this->current_executor;
+ this->unexpose_frames();
+ #elif GREENLET_PY312
+ tstate->py_recursion_remaining = tstate->py_recursion_limit - this->py_recursion_depth;
+ tstate->c_recursion_remaining = Py_C_RECURSION_LIMIT - this->c_recursion_depth;
+ this->unexpose_frames();
+ #else // \/ 3.11
+ tstate->recursion_remaining = tstate->recursion_limit - this->recursion_depth;
+ #endif // GREENLET_PY312
+ #if GREENLET_PY313
+ tstate->current_frame = this->current_frame;
+ #elif GREENLET_USE_CFRAME
+ tstate->cframe->current_frame = this->current_frame;
+ #endif
+ tstate->datastack_chunk = this->datastack_chunk;
+ tstate->datastack_top = this->datastack_top;
+ tstate->datastack_limit = this->datastack_limit;
+ this->_top_frame.relinquish_ownership();
+ #if GREENLET_PY313
+ Py_XDECREF(tstate->delete_later);
+ tstate->delete_later = this->delete_later;
+ Py_CLEAR(this->delete_later);
+ #elif GREENLET_PY312
+ tstate->trash.delete_nesting = this->trash_delete_nesting;
+ #else // not 3.12
+ tstate->trash_delete_nesting = this->trash_delete_nesting;
+ #endif // GREENLET_PY312
+#else // not 3.11
+ tstate->frame = this->_top_frame.relinquish_ownership();
+ tstate->recursion_depth = this->recursion_depth;
+ tstate->trash_delete_nesting = this->trash_delete_nesting;
+#endif // GREENLET_PY311
+}
+
+inline void PythonState::will_switch_from(PyThreadState *const origin_tstate) noexcept
+{
+#if GREENLET_USE_CFRAME && !GREENLET_PY312
+ // The weird thing is, we don't actually save this for an
+ // effect on the current greenlet, it's saved for an
+ // effect on the target greenlet. That is, we want
+ // continuity of this setting across the greenlet switch.
+ this->use_tracing = origin_tstate->cframe->use_tracing;
+#endif
+}
+
+void PythonState::set_initial_state(const PyThreadState* const tstate) noexcept
+{
+ this->_top_frame = nullptr;
+#if GREENLET_PY314
+ this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
+ this->current_executor = tstate->current_executor;
+#elif GREENLET_PY312
+ this->py_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
+ // XXX: TODO: Comment from a reviewer:
+ // Should this be ``Py_C_RECURSION_LIMIT - tstate->c_recursion_remaining``?
+ // But to me it looks more like that might not be the right
+ // initialization either?
+ this->c_recursion_depth = tstate->py_recursion_limit - tstate->py_recursion_remaining;
+#elif GREENLET_PY311
+ this->recursion_depth = tstate->recursion_limit - tstate->recursion_remaining;
+#else
+ this->recursion_depth = tstate->recursion_depth;
+#endif
+}
+// TODO: Better state management about when we own the top frame.
+int PythonState::tp_traverse(visitproc visit, void* arg, bool own_top_frame) noexcept
+{
+ Py_VISIT(this->_context.borrow());
+ if (own_top_frame) {
+ Py_VISIT(this->_top_frame.borrow());
+ }
+ return 0;
+}
+
+void PythonState::tp_clear(bool own_top_frame) noexcept
+{
+ PythonStateContext::tp_clear();
+ // If we get here owning a frame,
+ // we got dealloc'd without being finished. We may or may not be
+ // in the same thread.
+ if (own_top_frame) {
+ this->_top_frame.CLEAR();
+ }
+}
+
+#if GREENLET_USE_CFRAME
+void PythonState::set_new_cframe(_PyCFrame& frame) noexcept
+{
+ frame = *PyThreadState_GET()->cframe;
+ /* Make the target greenlet refer to the stack value. */
+ this->cframe = &frame;
+ /*
+ And restore the link to the previous frame so this one gets
+ unliked appropriately.
+ */
+ this->cframe->previous = &PyThreadState_GET()->root_cframe;
+}
+#endif
+
+const PythonState::OwnedFrame& PythonState::top_frame() const noexcept
+{
+ return this->_top_frame;
+}
+
+void PythonState::did_finish(PyThreadState* tstate) noexcept
+{
+#if GREENLET_PY311
+ // See https://github.com/gevent/gevent/issues/1924 and
+ // https://github.com/python-greenlet/greenlet/issues/328. In
+ // short, Python 3.11 allocates memory for frames as a sort of
+ // linked list that's kept as part of PyThreadState in the
+ // ``datastack_chunk`` member and friends. These are saved and
+ // restored as part of switching greenlets.
+ //
+ // When we initially switch to a greenlet, we set those to NULL.
+ // That causes the frame management code to treat this like a
+ // brand new thread and start a fresh list of chunks, beginning
+ // with a new "root" chunk. As we make calls in this greenlet,
+ // those chunks get added, and as calls return, they get popped.
+ // But the frame code (pystate.c) is careful to make sure that the
+ // root chunk never gets popped.
+ //
+ // Thus, when a greenlet exits for the last time, there will be at
+ // least a single root chunk that we must be responsible for
+ // deallocating.
+ //
+ // The complex part is that these chunks are allocated and freed
+ // using ``_PyObject_VirtualAlloc``/``Free``. Those aren't public
+ // functions, and they aren't exported for linking. It so happens
+ // that we know they are just thin wrappers around the Arena
+ // allocator, so we can use that directly to deallocate in a
+ // compatible way.
+ //
+ // CAUTION: Check this implementation detail on every major version.
+ //
+ // It might be nice to be able to do this in our destructor, but
+ // can we be sure that no one else is using that memory? Plus, as
+ // described below, our pointers may not even be valid anymore. As
+ // a special case, there is one time that we know we can do this,
+ // and that's from the destructor of the associated UserGreenlet
+ // (NOT main greenlet)
+ PyObjectArenaAllocator alloc;
+ _PyStackChunk* chunk = nullptr;
+ if (tstate) {
+ // We really did finish, we can never be switched to again.
+ chunk = tstate->datastack_chunk;
+ // Unfortunately, we can't do much sanity checking. Our
+ // this->datastack_chunk pointer is out of date (evaluation may
+ // have popped down through it already) so we can't verify that
+ // we deallocate it. I don't think we can even check datastack_top
+ // for the same reason.
+
+ PyObject_GetArenaAllocator(&alloc);
+ tstate->datastack_chunk = nullptr;
+ tstate->datastack_limit = nullptr;
+ tstate->datastack_top = nullptr;
+
+ }
+ else if (this->datastack_chunk) {
+ // The UserGreenlet (NOT the main greenlet!) is being deallocated. If we're
+ // still holding a stack chunk, it's garbage because we know
+ // we can never switch back to let cPython clean it up.
+ // Because the last time we got switched away from, and we
+ // haven't run since then, we know our chain is valid and can
+ // be dealloced.
+ chunk = this->datastack_chunk;
+ PyObject_GetArenaAllocator(&alloc);
+ }
+
+ if (alloc.free && chunk) {
+ // In case the arena mechanism has been torn down already.
+ while (chunk) {
+ _PyStackChunk *prev = chunk->previous;
+ chunk->previous = nullptr;
+ alloc.free(alloc.ctx, chunk, chunk->size);
+ chunk = prev;
+ }
+ }
+
+ this->datastack_chunk = nullptr;
+ this->datastack_limit = nullptr;
+ this->datastack_top = nullptr;
+#endif
+}
+
+
+}; // namespace greenlet
+
+#endif // GREENLET_PYTHON_STATE_CPP
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TStackState.cpp b/tapdown/lib/python3.11/site-packages/greenlet/TStackState.cpp
new file mode 100644
index 0000000..9743ab5
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TStackState.cpp
@@ -0,0 +1,265 @@
+#ifndef GREENLET_STACK_STATE_CPP
+#define GREENLET_STACK_STATE_CPP
+
+#include "TGreenlet.hpp"
+
+namespace greenlet {
+
+#ifdef GREENLET_USE_STDIO
+#include
+using std::cerr;
+using std::endl;
+
+std::ostream& operator<<(std::ostream& os, const StackState& s)
+{
+ os << "StackState(stack_start=" << (void*)s._stack_start
+ << ", stack_stop=" << (void*)s.stack_stop
+ << ", stack_copy=" << (void*)s.stack_copy
+ << ", stack_saved=" << s._stack_saved
+ << ", stack_prev=" << s.stack_prev
+ << ", addr=" << &s
+ << ")";
+ return os;
+}
+#endif
+
+StackState::StackState(void* mark, StackState& current)
+ : _stack_start(nullptr),
+ stack_stop((char*)mark),
+ stack_copy(nullptr),
+ _stack_saved(0),
+ /* Skip a dying greenlet */
+ stack_prev(current._stack_start
+ ? ¤t
+ : current.stack_prev)
+{
+}
+
+StackState::StackState()
+ : _stack_start(nullptr),
+ stack_stop(nullptr),
+ stack_copy(nullptr),
+ _stack_saved(0),
+ stack_prev(nullptr)
+{
+}
+
+StackState::StackState(const StackState& other)
+// can't use a delegating constructor because of
+// MSVC for Python 2.7
+ : _stack_start(nullptr),
+ stack_stop(nullptr),
+ stack_copy(nullptr),
+ _stack_saved(0),
+ stack_prev(nullptr)
+{
+ this->operator=(other);
+}
+
+StackState& StackState::operator=(const StackState& other)
+{
+ if (&other == this) {
+ return *this;
+ }
+ if (other._stack_saved) {
+ throw std::runtime_error("Refusing to steal memory.");
+ }
+
+ //If we have memory allocated, dispose of it
+ this->free_stack_copy();
+
+ this->_stack_start = other._stack_start;
+ this->stack_stop = other.stack_stop;
+ this->stack_copy = other.stack_copy;
+ this->_stack_saved = other._stack_saved;
+ this->stack_prev = other.stack_prev;
+ return *this;
+}
+
+inline void StackState::free_stack_copy() noexcept
+{
+ PyMem_Free(this->stack_copy);
+ this->stack_copy = nullptr;
+ this->_stack_saved = 0;
+}
+
+inline void StackState::copy_heap_to_stack(const StackState& current) noexcept
+{
+
+ /* Restore the heap copy back into the C stack */
+ if (this->_stack_saved != 0) {
+ memcpy(this->_stack_start, this->stack_copy, this->_stack_saved);
+ this->free_stack_copy();
+ }
+ StackState* owner = const_cast(¤t);
+ if (!owner->_stack_start) {
+ owner = owner->stack_prev; /* greenlet is dying, skip it */
+ }
+ while (owner && owner->stack_stop <= this->stack_stop) {
+ // cerr << "\tOwner: " << owner << endl;
+ owner = owner->stack_prev; /* find greenlet with more stack */
+ }
+ this->stack_prev = owner;
+ // cerr << "\tFinished with: " << *this << endl;
+}
+
+inline int StackState::copy_stack_to_heap_up_to(const char* const stop) noexcept
+{
+ /* Save more of g's stack into the heap -- at least up to 'stop'
+ g->stack_stop |________|
+ | |
+ | __ stop . . . . .
+ | | ==> . .
+ |________| _______
+ | | | |
+ | | | |
+ g->stack_start | | |_______| g->stack_copy
+ */
+ intptr_t sz1 = this->_stack_saved;
+ intptr_t sz2 = stop - this->_stack_start;
+ assert(this->_stack_start);
+ if (sz2 > sz1) {
+ char* c = (char*)PyMem_Realloc(this->stack_copy, sz2);
+ if (!c) {
+ PyErr_NoMemory();
+ return -1;
+ }
+ memcpy(c + sz1, this->_stack_start + sz1, sz2 - sz1);
+ this->stack_copy = c;
+ this->_stack_saved = sz2;
+ }
+ return 0;
+}
+
+inline int StackState::copy_stack_to_heap(char* const stackref,
+ const StackState& current) noexcept
+{
+ /* must free all the C stack up to target_stop */
+ const char* const target_stop = this->stack_stop;
+
+ StackState* owner = const_cast(¤t);
+ assert(owner->_stack_saved == 0); // everything is present on the stack
+ if (!owner->_stack_start) {
+ owner = owner->stack_prev; /* not saved if dying */
+ }
+ else {
+ owner->_stack_start = stackref;
+ }
+
+ while (owner->stack_stop < target_stop) {
+ /* ts_current is entierely within the area to free */
+ if (owner->copy_stack_to_heap_up_to(owner->stack_stop)) {
+ return -1; /* XXX */
+ }
+ owner = owner->stack_prev;
+ }
+ if (owner != this) {
+ if (owner->copy_stack_to_heap_up_to(target_stop)) {
+ return -1; /* XXX */
+ }
+ }
+ return 0;
+}
+
+inline bool StackState::started() const noexcept
+{
+ return this->stack_stop != nullptr;
+}
+
+inline bool StackState::main() const noexcept
+{
+ return this->stack_stop == (char*)-1;
+}
+
+inline bool StackState::active() const noexcept
+{
+ return this->_stack_start != nullptr;
+}
+
+inline void StackState::set_active() noexcept
+{
+ assert(this->_stack_start == nullptr);
+ this->_stack_start = (char*)1;
+}
+
+inline void StackState::set_inactive() noexcept
+{
+ this->_stack_start = nullptr;
+ // XXX: What if we still have memory out there?
+ // That case is actually triggered by
+ // test_issue251_issue252_explicit_reference_not_collectable (greenlet.tests.test_leaks.TestLeaks)
+ // and
+ // test_issue251_issue252_need_to_collect_in_background
+ // (greenlet.tests.test_leaks.TestLeaks)
+ //
+ // Those objects never get deallocated, so the destructor never
+ // runs.
+ // It *seems* safe to clean up the memory here?
+ if (this->_stack_saved) {
+ this->free_stack_copy();
+ }
+}
+
+inline intptr_t StackState::stack_saved() const noexcept
+{
+ return this->_stack_saved;
+}
+
+inline char* StackState::stack_start() const noexcept
+{
+ return this->_stack_start;
+}
+
+
+inline StackState StackState::make_main() noexcept
+{
+ StackState s;
+ s._stack_start = (char*)1;
+ s.stack_stop = (char*)-1;
+ return s;
+}
+
+StackState::~StackState()
+{
+ if (this->_stack_saved != 0) {
+ this->free_stack_copy();
+ }
+}
+
+void StackState::copy_from_stack(void* vdest, const void* vsrc, size_t n) const
+{
+ char* dest = static_cast(vdest);
+ const char* src = static_cast(vsrc);
+ if (src + n <= this->_stack_start
+ || src >= this->_stack_start + this->_stack_saved
+ || this->_stack_saved == 0) {
+ // Nothing we're copying was spilled from the stack
+ memcpy(dest, src, n);
+ return;
+ }
+
+ if (src < this->_stack_start) {
+ // Copy the part before the saved stack.
+ // We know src + n > _stack_start due to the test above.
+ const size_t nbefore = this->_stack_start - src;
+ memcpy(dest, src, nbefore);
+ dest += nbefore;
+ src += nbefore;
+ n -= nbefore;
+ }
+ // We know src >= _stack_start after the before-copy, and
+ // src < _stack_start + _stack_saved due to the first if condition
+ size_t nspilled = std::min(n, this->_stack_start + this->_stack_saved - src);
+ memcpy(dest, this->stack_copy + (src - this->_stack_start), nspilled);
+ dest += nspilled;
+ src += nspilled;
+ n -= nspilled;
+ if (n > 0) {
+ // Copy the part after the saved stack
+ memcpy(dest, src, n);
+ }
+}
+
+}; // namespace greenlet
+
+#endif // GREENLET_STACK_STATE_CPP
diff --git a/tapdown/lib/python3.11/site-packages/greenlet/TThreadState.hpp b/tapdown/lib/python3.11/site-packages/greenlet/TThreadState.hpp
new file mode 100644
index 0000000..e4e6f6c
--- /dev/null
+++ b/tapdown/lib/python3.11/site-packages/greenlet/TThreadState.hpp
@@ -0,0 +1,497 @@
+#ifndef GREENLET_THREAD_STATE_HPP
+#define GREENLET_THREAD_STATE_HPP
+
+#include