summaryrefslogtreecommitdiff
path: root/venv/lib/python3.11/site-packages/setuptools/_vendor
diff options
context:
space:
mode:
authorcyfraeviolae <cyfraeviolae>2024-04-03 03:17:55 -0400
committercyfraeviolae <cyfraeviolae>2024-04-03 03:17:55 -0400
commit12cf076118570eebbff08c6b3090e0d4798447a1 (patch)
tree3ba25e17e3c3a5e82316558ba3864b955919ff72 /venv/lib/python3.11/site-packages/setuptools/_vendor
parentc45662ff3923b34614ddcc8feb9195541166dcc5 (diff)
no venv
Diffstat (limited to 'venv/lib/python3.11/site-packages/setuptools/_vendor')
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/__init__.py0
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-311.pycbin202 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/ordered_set.cpython-311.pycbin21788 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-311.pycbin107619 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/zipp.cpython-311.pycbin15995 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__init__.py1047
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/__init__.cpython-311.pycbin58241 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_adapters.cpython-311.pycbin3854 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_collections.cpython-311.pycbin2201 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_compat.cpython-311.pycbin2723 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_functools.cpython-311.pycbin3641 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_itertools.cpython-311.pycbin2604 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_meta.cpython-311.pycbin3008 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_text.cpython-311.pycbin4399 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py68
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_collections.py30
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_compat.py71
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py104
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py73
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_meta.py48
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_text.py99
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py36
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/__init__.cpython-311.pycbin836 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_adapters.cpython-311.pycbin10753 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_common.cpython-311.pycbin4280 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_compat.cpython-311.pycbin5565 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_itertools.cpython-311.pycbin1398 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_legacy.cpython-311.pycbin6496 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/abc.cpython-311.pycbin7497 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/readers.cpython-311.pycbin8371 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/simple.cpython-311.pycbin6393 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_adapters.py170
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_common.py104
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_compat.py98
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_itertools.py35
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_legacy.py121
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/abc.py137
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py122
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/simple.py116
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__init__.py0
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/__init__.cpython-311.pycbin209 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/context.cpython-311.pycbin9432 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/functools.cpython-311.pycbin20289 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/context.py213
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/functools.py525
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__init__.py599
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__pycache__/__init__.cpython-311.pycbin26603 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py4
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/__init__.cpython-311.pycbin302 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-311.pycbin149189 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/recipes.cpython-311.pycbin23771 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/more.py3824
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py620
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/ordered_set.py488
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__about__.py26
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__init__.py25
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-311.pycbin653 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-311.pycbin574 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_manylinux.cpython-311.pycbin13240 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_musllinux.cpython-311.pycbin8008 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-311.pycbin3696 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-311.pycbin16542 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-311.pycbin7657 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-311.pycbin34374 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/tags.cpython-311.pycbin21359 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-311.pycbin6694 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-311.pycbin21886 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_manylinux.py301
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_musllinux.py136
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_structures.py61
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/markers.py304
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/requirements.py146
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/specifiers.py802
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py487
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py136
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/version.py504
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__init__.py331
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/__init__.cpython-311.pycbin8347 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/actions.cpython-311.pycbin8473 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/common.cpython-311.pycbin14795 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/core.cpython-311.pycbin277647 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/exceptions.cpython-311.pycbin12937 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/helpers.cpython-311.pycbin53638 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/results.cpython-311.pycbin36321 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/testing.cpython-311.pycbin19517 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/unicode.cpython-311.pycbin15375 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/util.cpython-311.pycbin14274 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py207
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/common.py424
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/core.py5814
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py642
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-311.pycbin28010 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/exceptions.py267
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/helpers.py1088
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/results.py760
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py331
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/unicode.py352
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/util.py235
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__init__.py11
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/__init__.cpython-311.pycbin424 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_parser.cpython-311.pycbin30863 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_re.cpython-311.pycbin4503 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_types.cpython-311.pycbin416 -> 0 bytes
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_parser.py691
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_re.py107
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py10
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/typing_extensions.py2296
-rw-r--r--venv/lib/python3.11/site-packages/setuptools/_vendor/zipp.py329
108 files changed, 0 insertions, 25575 deletions
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/__init__.py
+++ /dev/null
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 5daf15d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/ordered_set.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/ordered_set.cpython-311.pyc
deleted file mode 100644
index c472157..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/ordered_set.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-311.pyc
deleted file mode 100644
index c3a355e..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/typing_extensions.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/zipp.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/zipp.cpython-311.pyc
deleted file mode 100644
index a1c81e9..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/__pycache__/zipp.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__init__.py
deleted file mode 100644
index 292e0c6..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__init__.py
+++ /dev/null
@@ -1,1047 +0,0 @@
-import os
-import re
-import abc
-import csv
-import sys
-from .. import zipp
-import email
-import pathlib
-import operator
-import textwrap
-import warnings
-import functools
-import itertools
-import posixpath
-import collections
-
-from . import _adapters, _meta
-from ._collections import FreezableDefaultDict, Pair
-from ._compat import (
- NullFinder,
- install,
- pypy_partial,
-)
-from ._functools import method_cache, pass_none
-from ._itertools import always_iterable, unique_everseen
-from ._meta import PackageMetadata, SimplePath
-
-from contextlib import suppress
-from importlib import import_module
-from importlib.abc import MetaPathFinder
-from itertools import starmap
-from typing import List, Mapping, Optional, Union
-
-
-__all__ = [
- 'Distribution',
- 'DistributionFinder',
- 'PackageMetadata',
- 'PackageNotFoundError',
- 'distribution',
- 'distributions',
- 'entry_points',
- 'files',
- 'metadata',
- 'packages_distributions',
- 'requires',
- 'version',
-]
-
-
-class PackageNotFoundError(ModuleNotFoundError):
- """The package was not found."""
-
- def __str__(self):
- return f"No package metadata was found for {self.name}"
-
- @property
- def name(self):
- (name,) = self.args
- return name
-
-
-class Sectioned:
- """
- A simple entry point config parser for performance
-
- >>> for item in Sectioned.read(Sectioned._sample):
- ... print(item)
- Pair(name='sec1', value='# comments ignored')
- Pair(name='sec1', value='a = 1')
- Pair(name='sec1', value='b = 2')
- Pair(name='sec2', value='a = 2')
-
- >>> res = Sectioned.section_pairs(Sectioned._sample)
- >>> item = next(res)
- >>> item.name
- 'sec1'
- >>> item.value
- Pair(name='a', value='1')
- >>> item = next(res)
- >>> item.value
- Pair(name='b', value='2')
- >>> item = next(res)
- >>> item.name
- 'sec2'
- >>> item.value
- Pair(name='a', value='2')
- >>> list(res)
- []
- """
-
- _sample = textwrap.dedent(
- """
- [sec1]
- # comments ignored
- a = 1
- b = 2
-
- [sec2]
- a = 2
- """
- ).lstrip()
-
- @classmethod
- def section_pairs(cls, text):
- return (
- section._replace(value=Pair.parse(section.value))
- for section in cls.read(text, filter_=cls.valid)
- if section.name is not None
- )
-
- @staticmethod
- def read(text, filter_=None):
- lines = filter(filter_, map(str.strip, text.splitlines()))
- name = None
- for value in lines:
- section_match = value.startswith('[') and value.endswith(']')
- if section_match:
- name = value.strip('[]')
- continue
- yield Pair(name, value)
-
- @staticmethod
- def valid(line):
- return line and not line.startswith('#')
-
-
-class DeprecatedTuple:
- """
- Provide subscript item access for backward compatibility.
-
- >>> recwarn = getfixture('recwarn')
- >>> ep = EntryPoint(name='name', value='value', group='group')
- >>> ep[:]
- ('name', 'value', 'group')
- >>> ep[0]
- 'name'
- >>> len(recwarn)
- 1
- """
-
- _warn = functools.partial(
- warnings.warn,
- "EntryPoint tuple interface is deprecated. Access members by name.",
- DeprecationWarning,
- stacklevel=pypy_partial(2),
- )
-
- def __getitem__(self, item):
- self._warn()
- return self._key()[item]
-
-
-class EntryPoint(DeprecatedTuple):
- """An entry point as defined by Python packaging conventions.
-
- See `the packaging docs on entry points
- <https://packaging.python.org/specifications/entry-points/>`_
- for more information.
- """
-
- pattern = re.compile(
- r'(?P<module>[\w.]+)\s*'
- r'(:\s*(?P<attr>[\w.]+)\s*)?'
- r'((?P<extras>\[.*\])\s*)?$'
- )
- """
- A regular expression describing the syntax for an entry point,
- which might look like:
-
- - module
- - package.module
- - package.module:attribute
- - package.module:object.attribute
- - package.module:attr [extra1, extra2]
-
- Other combinations are possible as well.
-
- The expression is lenient about whitespace around the ':',
- following the attr, and following any extras.
- """
-
- dist: Optional['Distribution'] = None
-
- def __init__(self, name, value, group):
- vars(self).update(name=name, value=value, group=group)
-
- def load(self):
- """Load the entry point from its definition. If only a module
- is indicated by the value, return that module. Otherwise,
- return the named object.
- """
- match = self.pattern.match(self.value)
- module = import_module(match.group('module'))
- attrs = filter(None, (match.group('attr') or '').split('.'))
- return functools.reduce(getattr, attrs, module)
-
- @property
- def module(self):
- match = self.pattern.match(self.value)
- return match.group('module')
-
- @property
- def attr(self):
- match = self.pattern.match(self.value)
- return match.group('attr')
-
- @property
- def extras(self):
- match = self.pattern.match(self.value)
- return list(re.finditer(r'\w+', match.group('extras') or ''))
-
- def _for(self, dist):
- vars(self).update(dist=dist)
- return self
-
- def __iter__(self):
- """
- Supply iter so one may construct dicts of EntryPoints by name.
- """
- msg = (
- "Construction of dict of EntryPoints is deprecated in "
- "favor of EntryPoints."
- )
- warnings.warn(msg, DeprecationWarning)
- return iter((self.name, self))
-
- def matches(self, **params):
- attrs = (getattr(self, param) for param in params)
- return all(map(operator.eq, params.values(), attrs))
-
- def _key(self):
- return self.name, self.value, self.group
-
- def __lt__(self, other):
- return self._key() < other._key()
-
- def __eq__(self, other):
- return self._key() == other._key()
-
- def __setattr__(self, name, value):
- raise AttributeError("EntryPoint objects are immutable.")
-
- def __repr__(self):
- return (
- f'EntryPoint(name={self.name!r}, value={self.value!r}, '
- f'group={self.group!r})'
- )
-
- def __hash__(self):
- return hash(self._key())
-
-
-class DeprecatedList(list):
- """
- Allow an otherwise immutable object to implement mutability
- for compatibility.
-
- >>> recwarn = getfixture('recwarn')
- >>> dl = DeprecatedList(range(3))
- >>> dl[0] = 1
- >>> dl.append(3)
- >>> del dl[3]
- >>> dl.reverse()
- >>> dl.sort()
- >>> dl.extend([4])
- >>> dl.pop(-1)
- 4
- >>> dl.remove(1)
- >>> dl += [5]
- >>> dl + [6]
- [1, 2, 5, 6]
- >>> dl + (6,)
- [1, 2, 5, 6]
- >>> dl.insert(0, 0)
- >>> dl
- [0, 1, 2, 5]
- >>> dl == [0, 1, 2, 5]
- True
- >>> dl == (0, 1, 2, 5)
- True
- >>> len(recwarn)
- 1
- """
-
- __slots__ = ()
-
- _warn = functools.partial(
- warnings.warn,
- "EntryPoints list interface is deprecated. Cast to list if needed.",
- DeprecationWarning,
- stacklevel=pypy_partial(2),
- )
-
- def _wrap_deprecated_method(method_name: str): # type: ignore
- def wrapped(self, *args, **kwargs):
- self._warn()
- return getattr(super(), method_name)(*args, **kwargs)
-
- return method_name, wrapped
-
- locals().update(
- map(
- _wrap_deprecated_method,
- '__setitem__ __delitem__ append reverse extend pop remove '
- '__iadd__ insert sort'.split(),
- )
- )
-
- def __add__(self, other):
- if not isinstance(other, tuple):
- self._warn()
- other = tuple(other)
- return self.__class__(tuple(self) + other)
-
- def __eq__(self, other):
- if not isinstance(other, tuple):
- self._warn()
- other = tuple(other)
-
- return tuple(self).__eq__(other)
-
-
-class EntryPoints(DeprecatedList):
- """
- An immutable collection of selectable EntryPoint objects.
- """
-
- __slots__ = ()
-
- def __getitem__(self, name): # -> EntryPoint:
- """
- Get the EntryPoint in self matching name.
- """
- if isinstance(name, int):
- warnings.warn(
- "Accessing entry points by index is deprecated. "
- "Cast to tuple if needed.",
- DeprecationWarning,
- stacklevel=2,
- )
- return super().__getitem__(name)
- try:
- return next(iter(self.select(name=name)))
- except StopIteration:
- raise KeyError(name)
-
- def select(self, **params):
- """
- Select entry points from self that match the
- given parameters (typically group and/or name).
- """
- return EntryPoints(ep for ep in self if ep.matches(**params))
-
- @property
- def names(self):
- """
- Return the set of all names of all entry points.
- """
- return {ep.name for ep in self}
-
- @property
- def groups(self):
- """
- Return the set of all groups of all entry points.
-
- For coverage while SelectableGroups is present.
- >>> EntryPoints().groups
- set()
- """
- return {ep.group for ep in self}
-
- @classmethod
- def _from_text_for(cls, text, dist):
- return cls(ep._for(dist) for ep in cls._from_text(text))
-
- @staticmethod
- def _from_text(text):
- return (
- EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
- for item in Sectioned.section_pairs(text or '')
- )
-
-
-class Deprecated:
- """
- Compatibility add-in for mapping to indicate that
- mapping behavior is deprecated.
-
- >>> recwarn = getfixture('recwarn')
- >>> class DeprecatedDict(Deprecated, dict): pass
- >>> dd = DeprecatedDict(foo='bar')
- >>> dd.get('baz', None)
- >>> dd['foo']
- 'bar'
- >>> list(dd)
- ['foo']
- >>> list(dd.keys())
- ['foo']
- >>> 'foo' in dd
- True
- >>> list(dd.values())
- ['bar']
- >>> len(recwarn)
- 1
- """
-
- _warn = functools.partial(
- warnings.warn,
- "SelectableGroups dict interface is deprecated. Use select.",
- DeprecationWarning,
- stacklevel=pypy_partial(2),
- )
-
- def __getitem__(self, name):
- self._warn()
- return super().__getitem__(name)
-
- def get(self, name, default=None):
- self._warn()
- return super().get(name, default)
-
- def __iter__(self):
- self._warn()
- return super().__iter__()
-
- def __contains__(self, *args):
- self._warn()
- return super().__contains__(*args)
-
- def keys(self):
- self._warn()
- return super().keys()
-
- def values(self):
- self._warn()
- return super().values()
-
-
-class SelectableGroups(Deprecated, dict):
- """
- A backward- and forward-compatible result from
- entry_points that fully implements the dict interface.
- """
-
- @classmethod
- def load(cls, eps):
- by_group = operator.attrgetter('group')
- ordered = sorted(eps, key=by_group)
- grouped = itertools.groupby(ordered, by_group)
- return cls((group, EntryPoints(eps)) for group, eps in grouped)
-
- @property
- def _all(self):
- """
- Reconstruct a list of all entrypoints from the groups.
- """
- groups = super(Deprecated, self).values()
- return EntryPoints(itertools.chain.from_iterable(groups))
-
- @property
- def groups(self):
- return self._all.groups
-
- @property
- def names(self):
- """
- for coverage:
- >>> SelectableGroups().names
- set()
- """
- return self._all.names
-
- def select(self, **params):
- if not params:
- return self
- return self._all.select(**params)
-
-
-class PackagePath(pathlib.PurePosixPath):
- """A reference to a path in a package"""
-
- def read_text(self, encoding='utf-8'):
- with self.locate().open(encoding=encoding) as stream:
- return stream.read()
-
- def read_binary(self):
- with self.locate().open('rb') as stream:
- return stream.read()
-
- def locate(self):
- """Return a path-like object for this path"""
- return self.dist.locate_file(self)
-
-
-class FileHash:
- def __init__(self, spec):
- self.mode, _, self.value = spec.partition('=')
-
- def __repr__(self):
- return f'<FileHash mode: {self.mode} value: {self.value}>'
-
-
-class Distribution:
- """A Python distribution package."""
-
- @abc.abstractmethod
- def read_text(self, filename):
- """Attempt to load metadata file given by the name.
-
- :param filename: The name of the file in the distribution info.
- :return: The text if found, otherwise None.
- """
-
- @abc.abstractmethod
- def locate_file(self, path):
- """
- Given a path to a file in this distribution, return a path
- to it.
- """
-
- @classmethod
- def from_name(cls, name):
- """Return the Distribution for the given package name.
-
- :param name: The name of the distribution package to search for.
- :return: The Distribution instance (or subclass thereof) for the named
- package, if found.
- :raises PackageNotFoundError: When the named package's distribution
- metadata cannot be found.
- """
- for resolver in cls._discover_resolvers():
- dists = resolver(DistributionFinder.Context(name=name))
- dist = next(iter(dists), None)
- if dist is not None:
- return dist
- else:
- raise PackageNotFoundError(name)
-
- @classmethod
- def discover(cls, **kwargs):
- """Return an iterable of Distribution objects for all packages.
-
- Pass a ``context`` or pass keyword arguments for constructing
- a context.
-
- :context: A ``DistributionFinder.Context`` object.
- :return: Iterable of Distribution objects for all packages.
- """
- context = kwargs.pop('context', None)
- if context and kwargs:
- raise ValueError("cannot accept context and kwargs")
- context = context or DistributionFinder.Context(**kwargs)
- return itertools.chain.from_iterable(
- resolver(context) for resolver in cls._discover_resolvers()
- )
-
- @staticmethod
- def at(path):
- """Return a Distribution for the indicated metadata path
-
- :param path: a string or path-like object
- :return: a concrete Distribution instance for the path
- """
- return PathDistribution(pathlib.Path(path))
-
- @staticmethod
- def _discover_resolvers():
- """Search the meta_path for resolvers."""
- declared = (
- getattr(finder, 'find_distributions', None) for finder in sys.meta_path
- )
- return filter(None, declared)
-
- @property
- def metadata(self) -> _meta.PackageMetadata:
- """Return the parsed metadata for this Distribution.
-
- The returned object will have keys that name the various bits of
- metadata. See PEP 566 for details.
- """
- text = (
- self.read_text('METADATA')
- or self.read_text('PKG-INFO')
- # This last clause is here to support old egg-info files. Its
- # effect is to just end up using the PathDistribution's self._path
- # (which points to the egg-info file) attribute unchanged.
- or self.read_text('')
- )
- return _adapters.Message(email.message_from_string(text))
-
- @property
- def name(self):
- """Return the 'Name' metadata for the distribution package."""
- return self.metadata['Name']
-
- @property
- def _normalized_name(self):
- """Return a normalized version of the name."""
- return Prepared.normalize(self.name)
-
- @property
- def version(self):
- """Return the 'Version' metadata for the distribution package."""
- return self.metadata['Version']
-
- @property
- def entry_points(self):
- return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
-
- @property
- def files(self):
- """Files in this distribution.
-
- :return: List of PackagePath for this distribution or None
-
- Result is `None` if the metadata file that enumerates files
- (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
- missing.
- Result may be empty if the metadata exists but is empty.
- """
-
- def make_file(name, hash=None, size_str=None):
- result = PackagePath(name)
- result.hash = FileHash(hash) if hash else None
- result.size = int(size_str) if size_str else None
- result.dist = self
- return result
-
- @pass_none
- def make_files(lines):
- return list(starmap(make_file, csv.reader(lines)))
-
- return make_files(self._read_files_distinfo() or self._read_files_egginfo())
-
- def _read_files_distinfo(self):
- """
- Read the lines of RECORD
- """
- text = self.read_text('RECORD')
- return text and text.splitlines()
-
- def _read_files_egginfo(self):
- """
- SOURCES.txt might contain literal commas, so wrap each line
- in quotes.
- """
- text = self.read_text('SOURCES.txt')
- return text and map('"{}"'.format, text.splitlines())
-
- @property
- def requires(self):
- """Generated requirements specified for this Distribution"""
- reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
- return reqs and list(reqs)
-
- def _read_dist_info_reqs(self):
- return self.metadata.get_all('Requires-Dist')
-
- def _read_egg_info_reqs(self):
- source = self.read_text('requires.txt')
- return pass_none(self._deps_from_requires_text)(source)
-
- @classmethod
- def _deps_from_requires_text(cls, source):
- return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
-
- @staticmethod
- def _convert_egg_info_reqs_to_simple_reqs(sections):
- """
- Historically, setuptools would solicit and store 'extra'
- requirements, including those with environment markers,
- in separate sections. More modern tools expect each
- dependency to be defined separately, with any relevant
- extras and environment markers attached directly to that
- requirement. This method converts the former to the
- latter. See _test_deps_from_requires_text for an example.
- """
-
- def make_condition(name):
- return name and f'extra == "{name}"'
-
- def quoted_marker(section):
- section = section or ''
- extra, sep, markers = section.partition(':')
- if extra and markers:
- markers = f'({markers})'
- conditions = list(filter(None, [markers, make_condition(extra)]))
- return '; ' + ' and '.join(conditions) if conditions else ''
-
- def url_req_space(req):
- """
- PEP 508 requires a space between the url_spec and the quoted_marker.
- Ref python/importlib_metadata#357.
- """
- # '@' is uniquely indicative of a url_req.
- return ' ' * ('@' in req)
-
- for section in sections:
- space = url_req_space(section.value)
- yield section.value + space + quoted_marker(section.name)
-
-
-class DistributionFinder(MetaPathFinder):
- """
- A MetaPathFinder capable of discovering installed distributions.
- """
-
- class Context:
- """
- Keyword arguments presented by the caller to
- ``distributions()`` or ``Distribution.discover()``
- to narrow the scope of a search for distributions
- in all DistributionFinders.
-
- Each DistributionFinder may expect any parameters
- and should attempt to honor the canonical
- parameters defined below when appropriate.
- """
-
- name = None
- """
- Specific name for which a distribution finder should match.
- A name of ``None`` matches all distributions.
- """
-
- def __init__(self, **kwargs):
- vars(self).update(kwargs)
-
- @property
- def path(self):
- """
- The sequence of directory path that a distribution finder
- should search.
-
- Typically refers to Python installed package paths such as
- "site-packages" directories and defaults to ``sys.path``.
- """
- return vars(self).get('path', sys.path)
-
- @abc.abstractmethod
- def find_distributions(self, context=Context()):
- """
- Find distributions.
-
- Return an iterable of all Distribution instances capable of
- loading the metadata for packages matching the ``context``,
- a DistributionFinder.Context instance.
- """
-
-
-class FastPath:
- """
- Micro-optimized class for searching a path for
- children.
-
- >>> FastPath('').children()
- ['...']
- """
-
- @functools.lru_cache() # type: ignore
- def __new__(cls, root):
- return super().__new__(cls)
-
- def __init__(self, root):
- self.root = str(root)
-
- def joinpath(self, child):
- return pathlib.Path(self.root, child)
-
- def children(self):
- with suppress(Exception):
- return os.listdir(self.root or '.')
- with suppress(Exception):
- return self.zip_children()
- return []
-
- def zip_children(self):
- zip_path = zipp.Path(self.root)
- names = zip_path.root.namelist()
- self.joinpath = zip_path.joinpath
-
- return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
-
- def search(self, name):
- return self.lookup(self.mtime).search(name)
-
- @property
- def mtime(self):
- with suppress(OSError):
- return os.stat(self.root).st_mtime
- self.lookup.cache_clear()
-
- @method_cache
- def lookup(self, mtime):
- return Lookup(self)
-
-
-class Lookup:
- def __init__(self, path: FastPath):
- base = os.path.basename(path.root).lower()
- base_is_egg = base.endswith(".egg")
- self.infos = FreezableDefaultDict(list)
- self.eggs = FreezableDefaultDict(list)
-
- for child in path.children():
- low = child.lower()
- if low.endswith((".dist-info", ".egg-info")):
- # rpartition is faster than splitext and suitable for this purpose.
- name = low.rpartition(".")[0].partition("-")[0]
- normalized = Prepared.normalize(name)
- self.infos[normalized].append(path.joinpath(child))
- elif base_is_egg and low == "egg-info":
- name = base.rpartition(".")[0].partition("-")[0]
- legacy_normalized = Prepared.legacy_normalize(name)
- self.eggs[legacy_normalized].append(path.joinpath(child))
-
- self.infos.freeze()
- self.eggs.freeze()
-
- def search(self, prepared):
- infos = (
- self.infos[prepared.normalized]
- if prepared
- else itertools.chain.from_iterable(self.infos.values())
- )
- eggs = (
- self.eggs[prepared.legacy_normalized]
- if prepared
- else itertools.chain.from_iterable(self.eggs.values())
- )
- return itertools.chain(infos, eggs)
-
-
-class Prepared:
- """
- A prepared search for metadata on a possibly-named package.
- """
-
- normalized = None
- legacy_normalized = None
-
- def __init__(self, name):
- self.name = name
- if name is None:
- return
- self.normalized = self.normalize(name)
- self.legacy_normalized = self.legacy_normalize(name)
-
- @staticmethod
- def normalize(name):
- """
- PEP 503 normalization plus dashes as underscores.
- """
- return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
-
- @staticmethod
- def legacy_normalize(name):
- """
- Normalize the package name as found in the convention in
- older packaging tools versions and specs.
- """
- return name.lower().replace('-', '_')
-
- def __bool__(self):
- return bool(self.name)
-
-
-@install
-class MetadataPathFinder(NullFinder, DistributionFinder):
- """A degenerate finder for distribution packages on the file system.
-
- This finder supplies only a find_distributions() method for versions
- of Python that do not have a PathFinder find_distributions().
- """
-
- def find_distributions(self, context=DistributionFinder.Context()):
- """
- Find distributions.
-
- Return an iterable of all Distribution instances capable of
- loading the metadata for packages matching ``context.name``
- (or all names if ``None`` indicated) along the paths in the list
- of directories ``context.path``.
- """
- found = self._search_paths(context.name, context.path)
- return map(PathDistribution, found)
-
- @classmethod
- def _search_paths(cls, name, paths):
- """Find metadata directories in paths heuristically."""
- prepared = Prepared(name)
- return itertools.chain.from_iterable(
- path.search(prepared) for path in map(FastPath, paths)
- )
-
- def invalidate_caches(cls):
- FastPath.__new__.cache_clear()
-
-
-class PathDistribution(Distribution):
- def __init__(self, path: SimplePath):
- """Construct a distribution.
-
- :param path: SimplePath indicating the metadata directory.
- """
- self._path = path
-
- def read_text(self, filename):
- with suppress(
- FileNotFoundError,
- IsADirectoryError,
- KeyError,
- NotADirectoryError,
- PermissionError,
- ):
- return self._path.joinpath(filename).read_text(encoding='utf-8')
-
- read_text.__doc__ = Distribution.read_text.__doc__
-
- def locate_file(self, path):
- return self._path.parent / path
-
- @property
- def _normalized_name(self):
- """
- Performance optimization: where possible, resolve the
- normalized name from the file system path.
- """
- stem = os.path.basename(str(self._path))
- return self._name_from_stem(stem) or super()._normalized_name
-
- def _name_from_stem(self, stem):
- name, ext = os.path.splitext(stem)
- if ext not in ('.dist-info', '.egg-info'):
- return
- name, sep, rest = stem.partition('-')
- return name
-
-
-def distribution(distribution_name):
- """Get the ``Distribution`` instance for the named package.
-
- :param distribution_name: The name of the distribution package as a string.
- :return: A ``Distribution`` instance (or subclass thereof).
- """
- return Distribution.from_name(distribution_name)
-
-
-def distributions(**kwargs):
- """Get all ``Distribution`` instances in the current environment.
-
- :return: An iterable of ``Distribution`` instances.
- """
- return Distribution.discover(**kwargs)
-
-
-def metadata(distribution_name) -> _meta.PackageMetadata:
- """Get the metadata for the named package.
-
- :param distribution_name: The name of the distribution package to query.
- :return: A PackageMetadata containing the parsed metadata.
- """
- return Distribution.from_name(distribution_name).metadata
-
-
-def version(distribution_name):
- """Get the version string for the named package.
-
- :param distribution_name: The name of the distribution package to query.
- :return: The version string for the package as defined in the package's
- "Version" metadata key.
- """
- return distribution(distribution_name).version
-
-
-def entry_points(**params) -> Union[EntryPoints, SelectableGroups]:
- """Return EntryPoint objects for all installed packages.
-
- Pass selection parameters (group or name) to filter the
- result to entry points matching those properties (see
- EntryPoints.select()).
-
- For compatibility, returns ``SelectableGroups`` object unless
- selection parameters are supplied. In the future, this function
- will return ``EntryPoints`` instead of ``SelectableGroups``
- even when no selection parameters are supplied.
-
- For maximum future compatibility, pass selection parameters
- or invoke ``.select`` with parameters on the result.
-
- :return: EntryPoints or SelectableGroups for all installed packages.
- """
- norm_name = operator.attrgetter('_normalized_name')
- unique = functools.partial(unique_everseen, key=norm_name)
- eps = itertools.chain.from_iterable(
- dist.entry_points for dist in unique(distributions())
- )
- return SelectableGroups.load(eps).select(**params)
-
-
-def files(distribution_name):
- """Return a list of files for the named package.
-
- :param distribution_name: The name of the distribution package to query.
- :return: List of files composing the distribution.
- """
- return distribution(distribution_name).files
-
-
-def requires(distribution_name):
- """
- Return a list of requirements for the named package.
-
- :return: An iterator of requirements, suitable for
- packaging.requirement.Requirement.
- """
- return distribution(distribution_name).requires
-
-
-def packages_distributions() -> Mapping[str, List[str]]:
- """
- Return a mapping of top-level packages to their
- distributions.
-
- >>> import collections.abc
- >>> pkgs = packages_distributions()
- >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
- True
- """
- pkg_to_dist = collections.defaultdict(list)
- for dist in distributions():
- for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
- pkg_to_dist[pkg].append(dist.metadata['Name'])
- return dict(pkg_to_dist)
-
-
-def _top_level_declared(dist):
- return (dist.read_text('top_level.txt') or '').split()
-
-
-def _top_level_inferred(dist):
- return {
- f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
- for f in always_iterable(dist.files)
- if f.suffix == ".py"
- }
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 2a8cc3f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_adapters.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_adapters.cpython-311.pyc
deleted file mode 100644
index 8c3fe8d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_adapters.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_collections.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_collections.cpython-311.pyc
deleted file mode 100644
index e59495f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_collections.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_compat.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_compat.cpython-311.pyc
deleted file mode 100644
index eb7d7d9..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_compat.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_functools.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_functools.cpython-311.pyc
deleted file mode 100644
index bff85b7..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_functools.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_itertools.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_itertools.cpython-311.pyc
deleted file mode 100644
index d0c57ee..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_itertools.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_meta.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_meta.cpython-311.pyc
deleted file mode 100644
index b07e41c..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_meta.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_text.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_text.cpython-311.pyc
deleted file mode 100644
index 32f6749..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/__pycache__/_text.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py
deleted file mode 100644
index aa460d3..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_adapters.py
+++ /dev/null
@@ -1,68 +0,0 @@
-import re
-import textwrap
-import email.message
-
-from ._text import FoldedCase
-
-
-class Message(email.message.Message):
- multiple_use_keys = set(
- map(
- FoldedCase,
- [
- 'Classifier',
- 'Obsoletes-Dist',
- 'Platform',
- 'Project-URL',
- 'Provides-Dist',
- 'Provides-Extra',
- 'Requires-Dist',
- 'Requires-External',
- 'Supported-Platform',
- 'Dynamic',
- ],
- )
- )
- """
- Keys that may be indicated multiple times per PEP 566.
- """
-
- def __new__(cls, orig: email.message.Message):
- res = super().__new__(cls)
- vars(res).update(vars(orig))
- return res
-
- def __init__(self, *args, **kwargs):
- self._headers = self._repair_headers()
-
- # suppress spurious error from mypy
- def __iter__(self):
- return super().__iter__()
-
- def _repair_headers(self):
- def redent(value):
- "Correct for RFC822 indentation"
- if not value or '\n' not in value:
- return value
- return textwrap.dedent(' ' * 8 + value)
-
- headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
- if self._payload:
- headers.append(('Description', self.get_payload()))
- return headers
-
- @property
- def json(self):
- """
- Convert PackageMetadata to a JSON-compatible format
- per PEP 0566.
- """
-
- def transform(key):
- value = self.get_all(key) if key in self.multiple_use_keys else self[key]
- if key == 'Keywords':
- value = re.split(r'\s+', value)
- tk = key.lower().replace('-', '_')
- return tk, value
-
- return dict(map(transform, map(FoldedCase, self)))
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_collections.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_collections.py
deleted file mode 100644
index cf0954e..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_collections.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import collections
-
-
-# from jaraco.collections 3.3
-class FreezableDefaultDict(collections.defaultdict):
- """
- Often it is desirable to prevent the mutation of
- a default dict after its initial construction, such
- as to prevent mutation during iteration.
-
- >>> dd = FreezableDefaultDict(list)
- >>> dd[0].append('1')
- >>> dd.freeze()
- >>> dd[1]
- []
- >>> len(dd)
- 1
- """
-
- def __missing__(self, key):
- return getattr(self, '_frozen', super().__missing__)(key)
-
- def freeze(self):
- self._frozen = lambda key: self.default_factory()
-
-
-class Pair(collections.namedtuple('Pair', 'name value')):
- @classmethod
- def parse(cls, text):
- return cls(*map(str.strip, text.split("=", 1)))
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_compat.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_compat.py
deleted file mode 100644
index ef3136f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_compat.py
+++ /dev/null
@@ -1,71 +0,0 @@
-import sys
-import platform
-
-
-__all__ = ['install', 'NullFinder', 'Protocol']
-
-
-try:
- from typing import Protocol
-except ImportError: # pragma: no cover
- from ..typing_extensions import Protocol # type: ignore
-
-
-def install(cls):
- """
- Class decorator for installation on sys.meta_path.
-
- Adds the backport DistributionFinder to sys.meta_path and
- attempts to disable the finder functionality of the stdlib
- DistributionFinder.
- """
- sys.meta_path.append(cls())
- disable_stdlib_finder()
- return cls
-
-
-def disable_stdlib_finder():
- """
- Give the backport primacy for discovering path-based distributions
- by monkey-patching the stdlib O_O.
-
- See #91 for more background for rationale on this sketchy
- behavior.
- """
-
- def matches(finder):
- return getattr(
- finder, '__module__', None
- ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
-
- for finder in filter(matches, sys.meta_path): # pragma: nocover
- del finder.find_distributions
-
-
-class NullFinder:
- """
- A "Finder" (aka "MetaClassFinder") that never finds any modules,
- but may find distributions.
- """
-
- @staticmethod
- def find_spec(*args, **kwargs):
- return None
-
- # In Python 2, the import system requires finders
- # to have a find_module() method, but this usage
- # is deprecated in Python 3 in favor of find_spec().
- # For the purposes of this finder (i.e. being present
- # on sys.meta_path but having no other import
- # system functionality), the two methods are identical.
- find_module = find_spec
-
-
-def pypy_partial(val):
- """
- Adjust for variable stacklevel on partial under PyPy.
-
- Workaround for #327.
- """
- is_pypy = platform.python_implementation() == 'PyPy'
- return val + is_pypy
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py
deleted file mode 100644
index 71f66bd..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_functools.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import types
-import functools
-
-
-# from jaraco.functools 3.3
-def method_cache(method, cache_wrapper=None):
- """
- Wrap lru_cache to support storing the cache data in the object instances.
-
- Abstracts the common paradigm where the method explicitly saves an
- underscore-prefixed protected property on first call and returns that
- subsequently.
-
- >>> class MyClass:
- ... calls = 0
- ...
- ... @method_cache
- ... def method(self, value):
- ... self.calls += 1
- ... return value
-
- >>> a = MyClass()
- >>> a.method(3)
- 3
- >>> for x in range(75):
- ... res = a.method(x)
- >>> a.calls
- 75
-
- Note that the apparent behavior will be exactly like that of lru_cache
- except that the cache is stored on each instance, so values in one
- instance will not flush values from another, and when an instance is
- deleted, so are the cached values for that instance.
-
- >>> b = MyClass()
- >>> for x in range(35):
- ... res = b.method(x)
- >>> b.calls
- 35
- >>> a.method(0)
- 0
- >>> a.calls
- 75
-
- Note that if method had been decorated with ``functools.lru_cache()``,
- a.calls would have been 76 (due to the cached value of 0 having been
- flushed by the 'b' instance).
-
- Clear the cache with ``.cache_clear()``
-
- >>> a.method.cache_clear()
-
- Same for a method that hasn't yet been called.
-
- >>> c = MyClass()
- >>> c.method.cache_clear()
-
- Another cache wrapper may be supplied:
-
- >>> cache = functools.lru_cache(maxsize=2)
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
- >>> a = MyClass()
- >>> a.method2()
- 3
-
- Caution - do not subsequently wrap the method with another decorator, such
- as ``@property``, which changes the semantics of the function.
-
- See also
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
- for another implementation and additional justification.
- """
- cache_wrapper = cache_wrapper or functools.lru_cache()
-
- def wrapper(self, *args, **kwargs):
- # it's the first call, replace the method with a cached, bound method
- bound_method = types.MethodType(method, self)
- cached_method = cache_wrapper(bound_method)
- setattr(self, method.__name__, cached_method)
- return cached_method(*args, **kwargs)
-
- # Support cache clear even before cache has been created.
- wrapper.cache_clear = lambda: None
-
- return wrapper
-
-
-# From jaraco.functools 3.3
-def pass_none(func):
- """
- Wrap func so it's not called if its first param is None
-
- >>> print_text = pass_none(print)
- >>> print_text('text')
- text
- >>> print_text(None)
- """
-
- @functools.wraps(func)
- def wrapper(param, *args, **kwargs):
- if param is not None:
- return func(param, *args, **kwargs)
-
- return wrapper
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py
deleted file mode 100644
index d4ca9b9..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_itertools.py
+++ /dev/null
@@ -1,73 +0,0 @@
-from itertools import filterfalse
-
-
-def unique_everseen(iterable, key=None):
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen = set()
- seen_add = seen.add
- if key is None:
- for element in filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
-
-
-# copied from more_itertools 8.8
-def always_iterable(obj, base_type=(str, bytes)):
- """If *obj* is iterable, return an iterator over its items::
-
- >>> obj = (1, 2, 3)
- >>> list(always_iterable(obj))
- [1, 2, 3]
-
- If *obj* is not iterable, return a one-item iterable containing *obj*::
-
- >>> obj = 1
- >>> list(always_iterable(obj))
- [1]
-
- If *obj* is ``None``, return an empty iterable:
-
- >>> obj = None
- >>> list(always_iterable(None))
- []
-
- By default, binary and text strings are not considered iterable::
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj))
- ['foo']
-
- If *base_type* is set, objects for which ``isinstance(obj, base_type)``
- returns ``True`` won't be considered iterable.
-
- >>> obj = {'a': 1}
- >>> list(always_iterable(obj)) # Iterate over the dict's keys
- ['a']
- >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
- [{'a': 1}]
-
- Set *base_type* to ``None`` to avoid any special handling and treat objects
- Python considers iterable as iterable:
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj, base_type=None))
- ['f', 'o', 'o']
- """
- if obj is None:
- return iter(())
-
- if (base_type is not None) and isinstance(obj, base_type):
- return iter((obj,))
-
- try:
- return iter(obj)
- except TypeError:
- return iter((obj,))
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_meta.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_meta.py
deleted file mode 100644
index 37ee43e..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_meta.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from ._compat import Protocol
-from typing import Any, Dict, Iterator, List, TypeVar, Union
-
-
-_T = TypeVar("_T")
-
-
-class PackageMetadata(Protocol):
- def __len__(self) -> int:
- ... # pragma: no cover
-
- def __contains__(self, item: str) -> bool:
- ... # pragma: no cover
-
- def __getitem__(self, key: str) -> str:
- ... # pragma: no cover
-
- def __iter__(self) -> Iterator[str]:
- ... # pragma: no cover
-
- def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
- """
- Return all values associated with a possibly multi-valued key.
- """
-
- @property
- def json(self) -> Dict[str, Union[str, List[str]]]:
- """
- A JSON-compatible form of the metadata.
- """
-
-
-class SimplePath(Protocol):
- """
- A minimal subset of pathlib.Path required by PathDistribution.
- """
-
- def joinpath(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def __truediv__(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def parent(self) -> 'SimplePath':
- ... # pragma: no cover
-
- def read_text(self) -> str:
- ... # pragma: no cover
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_text.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_text.py
deleted file mode 100644
index c88cfbb..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_metadata/_text.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import re
-
-from ._functools import method_cache
-
-
-# from jaraco.text 3.5
-class FoldedCase(str):
- """
- A case insensitive string class; behaves just like str
- except compares equal when the only variation is case.
-
- >>> s = FoldedCase('hello world')
-
- >>> s == 'Hello World'
- True
-
- >>> 'Hello World' == s
- True
-
- >>> s != 'Hello World'
- False
-
- >>> s.index('O')
- 4
-
- >>> s.split('O')
- ['hell', ' w', 'rld']
-
- >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
- ['alpha', 'Beta', 'GAMMA']
-
- Sequence membership is straightforward.
-
- >>> "Hello World" in [s]
- True
- >>> s in ["Hello World"]
- True
-
- You may test for set inclusion, but candidate and elements
- must both be folded.
-
- >>> FoldedCase("Hello World") in {s}
- True
- >>> s in {FoldedCase("Hello World")}
- True
-
- String inclusion works as long as the FoldedCase object
- is on the right.
-
- >>> "hello" in FoldedCase("Hello World")
- True
-
- But not if the FoldedCase object is on the left:
-
- >>> FoldedCase('hello') in 'Hello World'
- False
-
- In that case, use in_:
-
- >>> FoldedCase('hello').in_('Hello World')
- True
-
- >>> FoldedCase('hello') > FoldedCase('Hello')
- False
- """
-
- def __lt__(self, other):
- return self.lower() < other.lower()
-
- def __gt__(self, other):
- return self.lower() > other.lower()
-
- def __eq__(self, other):
- return self.lower() == other.lower()
-
- def __ne__(self, other):
- return self.lower() != other.lower()
-
- def __hash__(self):
- return hash(self.lower())
-
- def __contains__(self, other):
- return super().lower().__contains__(other.lower())
-
- def in_(self, other):
- "Does self appear in other?"
- return self in FoldedCase(other)
-
- # cache lower since it's likely to be called frequently.
- @method_cache
- def lower(self):
- return super().lower()
-
- def index(self, sub):
- return self.lower().index(sub.lower())
-
- def split(self, splitter=' ', maxsplit=0):
- pattern = re.compile(re.escape(splitter), re.I)
- return pattern.split(self, maxsplit)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py
deleted file mode 100644
index 34e3a99..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""Read resources contained within a package."""
-
-from ._common import (
- as_file,
- files,
- Package,
-)
-
-from ._legacy import (
- contents,
- open_binary,
- read_binary,
- open_text,
- read_text,
- is_resource,
- path,
- Resource,
-)
-
-from .abc import ResourceReader
-
-
-__all__ = [
- 'Package',
- 'Resource',
- 'ResourceReader',
- 'as_file',
- 'contents',
- 'files',
- 'is_resource',
- 'open_binary',
- 'open_text',
- 'path',
- 'read_binary',
- 'read_text',
-]
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index c079896..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_adapters.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_adapters.cpython-311.pyc
deleted file mode 100644
index a0a601a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_adapters.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_common.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_common.cpython-311.pyc
deleted file mode 100644
index 7655825..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_common.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_compat.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_compat.cpython-311.pyc
deleted file mode 100644
index 2c240c2..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_compat.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_itertools.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_itertools.cpython-311.pyc
deleted file mode 100644
index 973d41b..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_itertools.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_legacy.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_legacy.cpython-311.pyc
deleted file mode 100644
index 16a93cc..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/_legacy.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/abc.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/abc.cpython-311.pyc
deleted file mode 100644
index e68e197..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/abc.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/readers.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/readers.cpython-311.pyc
deleted file mode 100644
index 2403f51..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/readers.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/simple.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/simple.cpython-311.pyc
deleted file mode 100644
index 56b77c3..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/__pycache__/simple.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_adapters.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_adapters.py
deleted file mode 100644
index ea363d8..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_adapters.py
+++ /dev/null
@@ -1,170 +0,0 @@
-from contextlib import suppress
-from io import TextIOWrapper
-
-from . import abc
-
-
-class SpecLoaderAdapter:
- """
- Adapt a package spec to adapt the underlying loader.
- """
-
- def __init__(self, spec, adapter=lambda spec: spec.loader):
- self.spec = spec
- self.loader = adapter(spec)
-
- def __getattr__(self, name):
- return getattr(self.spec, name)
-
-
-class TraversableResourcesLoader:
- """
- Adapt a loader to provide TraversableResources.
- """
-
- def __init__(self, spec):
- self.spec = spec
-
- def get_resource_reader(self, name):
- return CompatibilityFiles(self.spec)._native()
-
-
-def _io_wrapper(file, mode='r', *args, **kwargs):
- if mode == 'r':
- return TextIOWrapper(file, *args, **kwargs)
- elif mode == 'rb':
- return file
- raise ValueError(
- "Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
- )
-
-
-class CompatibilityFiles:
- """
- Adapter for an existing or non-existent resource reader
- to provide a compatibility .files().
- """
-
- class SpecPath(abc.Traversable):
- """
- Path tied to a module spec.
- Can be read and exposes the resource reader children.
- """
-
- def __init__(self, spec, reader):
- self._spec = spec
- self._reader = reader
-
- def iterdir(self):
- if not self._reader:
- return iter(())
- return iter(
- CompatibilityFiles.ChildPath(self._reader, path)
- for path in self._reader.contents()
- )
-
- def is_file(self):
- return False
-
- is_dir = is_file
-
- def joinpath(self, other):
- if not self._reader:
- return CompatibilityFiles.OrphanPath(other)
- return CompatibilityFiles.ChildPath(self._reader, other)
-
- @property
- def name(self):
- return self._spec.name
-
- def open(self, mode='r', *args, **kwargs):
- return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
-
- class ChildPath(abc.Traversable):
- """
- Path tied to a resource reader child.
- Can be read but doesn't expose any meaningful children.
- """
-
- def __init__(self, reader, name):
- self._reader = reader
- self._name = name
-
- def iterdir(self):
- return iter(())
-
- def is_file(self):
- return self._reader.is_resource(self.name)
-
- def is_dir(self):
- return not self.is_file()
-
- def joinpath(self, other):
- return CompatibilityFiles.OrphanPath(self.name, other)
-
- @property
- def name(self):
- return self._name
-
- def open(self, mode='r', *args, **kwargs):
- return _io_wrapper(
- self._reader.open_resource(self.name), mode, *args, **kwargs
- )
-
- class OrphanPath(abc.Traversable):
- """
- Orphan path, not tied to a module spec or resource reader.
- Can't be read and doesn't expose any meaningful children.
- """
-
- def __init__(self, *path_parts):
- if len(path_parts) < 1:
- raise ValueError('Need at least one path part to construct a path')
- self._path = path_parts
-
- def iterdir(self):
- return iter(())
-
- def is_file(self):
- return False
-
- is_dir = is_file
-
- def joinpath(self, other):
- return CompatibilityFiles.OrphanPath(*self._path, other)
-
- @property
- def name(self):
- return self._path[-1]
-
- def open(self, mode='r', *args, **kwargs):
- raise FileNotFoundError("Can't open orphan path")
-
- def __init__(self, spec):
- self.spec = spec
-
- @property
- def _reader(self):
- with suppress(AttributeError):
- return self.spec.loader.get_resource_reader(self.spec.name)
-
- def _native(self):
- """
- Return the native reader if it supports files().
- """
- reader = self._reader
- return reader if hasattr(reader, 'files') else self
-
- def __getattr__(self, attr):
- return getattr(self._reader, attr)
-
- def files(self):
- return CompatibilityFiles.SpecPath(self.spec, self._reader)
-
-
-def wrap_spec(package):
- """
- Construct a package spec with traversable compatibility
- on the spec/loader/reader.
- """
- return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_common.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_common.py
deleted file mode 100644
index a12e2c7..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_common.py
+++ /dev/null
@@ -1,104 +0,0 @@
-import os
-import pathlib
-import tempfile
-import functools
-import contextlib
-import types
-import importlib
-
-from typing import Union, Optional
-from .abc import ResourceReader, Traversable
-
-from ._compat import wrap_spec
-
-Package = Union[types.ModuleType, str]
-
-
-def files(package):
- # type: (Package) -> Traversable
- """
- Get a Traversable resource from a package
- """
- return from_package(get_package(package))
-
-
-def get_resource_reader(package):
- # type: (types.ModuleType) -> Optional[ResourceReader]
- """
- Return the package's loader if it's a ResourceReader.
- """
- # We can't use
- # a issubclass() check here because apparently abc.'s __subclasscheck__()
- # hook wants to create a weak reference to the object, but
- # zipimport.zipimporter does not support weak references, resulting in a
- # TypeError. That seems terrible.
- spec = package.__spec__
- reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
- if reader is None:
- return None
- return reader(spec.name) # type: ignore
-
-
-def resolve(cand):
- # type: (Package) -> types.ModuleType
- return cand if isinstance(cand, types.ModuleType) else importlib.import_module(cand)
-
-
-def get_package(package):
- # type: (Package) -> types.ModuleType
- """Take a package name or module object and return the module.
-
- Raise an exception if the resolved module is not a package.
- """
- resolved = resolve(package)
- if wrap_spec(resolved).submodule_search_locations is None:
- raise TypeError(f'{package!r} is not a package')
- return resolved
-
-
-def from_package(package):
- """
- Return a Traversable object for the given package.
-
- """
- spec = wrap_spec(package)
- reader = spec.loader.get_resource_reader(spec.name)
- return reader.files()
-
-
-@contextlib.contextmanager
-def _tempfile(reader, suffix=''):
- # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
- # blocks due to the need to close the temporary file to work on Windows
- # properly.
- fd, raw_path = tempfile.mkstemp(suffix=suffix)
- try:
- try:
- os.write(fd, reader())
- finally:
- os.close(fd)
- del reader
- yield pathlib.Path(raw_path)
- finally:
- try:
- os.remove(raw_path)
- except FileNotFoundError:
- pass
-
-
-@functools.singledispatch
-def as_file(path):
- """
- Given a Traversable object, return that object as a
- path on the local file system in a context manager.
- """
- return _tempfile(path.read_bytes, suffix=path.name)
-
-
-@as_file.register(pathlib.Path)
-@contextlib.contextmanager
-def _(path):
- """
- Degenerate behavior for pathlib.Path objects.
- """
- yield path
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_compat.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_compat.py
deleted file mode 100644
index cb9fc82..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_compat.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# flake8: noqa
-
-import abc
-import sys
-import pathlib
-from contextlib import suppress
-
-if sys.version_info >= (3, 10):
- from zipfile import Path as ZipPath # type: ignore
-else:
- from ..zipp import Path as ZipPath # type: ignore
-
-
-try:
- from typing import runtime_checkable # type: ignore
-except ImportError:
-
- def runtime_checkable(cls): # type: ignore
- return cls
-
-
-try:
- from typing import Protocol # type: ignore
-except ImportError:
- Protocol = abc.ABC # type: ignore
-
-
-class TraversableResourcesLoader:
- """
- Adapt loaders to provide TraversableResources and other
- compatibility.
-
- Used primarily for Python 3.9 and earlier where the native
- loaders do not yet implement TraversableResources.
- """
-
- def __init__(self, spec):
- self.spec = spec
-
- @property
- def path(self):
- return self.spec.origin
-
- def get_resource_reader(self, name):
- from . import readers, _adapters
-
- def _zip_reader(spec):
- with suppress(AttributeError):
- return readers.ZipReader(spec.loader, spec.name)
-
- def _namespace_reader(spec):
- with suppress(AttributeError, ValueError):
- return readers.NamespaceReader(spec.submodule_search_locations)
-
- def _available_reader(spec):
- with suppress(AttributeError):
- return spec.loader.get_resource_reader(spec.name)
-
- def _native_reader(spec):
- reader = _available_reader(spec)
- return reader if hasattr(reader, 'files') else None
-
- def _file_reader(spec):
- try:
- path = pathlib.Path(self.path)
- except TypeError:
- return None
- if path.exists():
- return readers.FileReader(self)
-
- return (
- # native reader if it supplies 'files'
- _native_reader(self.spec)
- or
- # local ZipReader if a zip module
- _zip_reader(self.spec)
- or
- # local NamespaceReader if a namespace module
- _namespace_reader(self.spec)
- or
- # local FileReader
- _file_reader(self.spec)
- # fallback - adapt the spec ResourceReader to TraversableReader
- or _adapters.CompatibilityFiles(self.spec)
- )
-
-
-def wrap_spec(package):
- """
- Construct a package spec with traversable compatibility
- on the spec/loader/reader.
-
- Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
- from above for older Python compatibility (<3.10).
- """
- from . import _adapters
-
- return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_itertools.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_itertools.py
deleted file mode 100644
index cce0558..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_itertools.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from itertools import filterfalse
-
-from typing import (
- Callable,
- Iterable,
- Iterator,
- Optional,
- Set,
- TypeVar,
- Union,
-)
-
-# Type and type variable definitions
-_T = TypeVar('_T')
-_U = TypeVar('_U')
-
-
-def unique_everseen(
- iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
-) -> Iterator[_T]:
- "List unique elements, preserving order. Remember all elements ever seen."
- # unique_everseen('AAAABBBCCDAABBB') --> A B C D
- # unique_everseen('ABBCcAD', str.lower) --> A B C D
- seen: Set[Union[_T, _U]] = set()
- seen_add = seen.add
- if key is None:
- for element in filterfalse(seen.__contains__, iterable):
- seen_add(element)
- yield element
- else:
- for element in iterable:
- k = key(element)
- if k not in seen:
- seen_add(k)
- yield element
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_legacy.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_legacy.py
deleted file mode 100644
index 1d5d3f1..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/_legacy.py
+++ /dev/null
@@ -1,121 +0,0 @@
-import functools
-import os
-import pathlib
-import types
-import warnings
-
-from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
-
-from . import _common
-
-Package = Union[types.ModuleType, str]
-Resource = str
-
-
-def deprecated(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- warnings.warn(
- f"{func.__name__} is deprecated. Use files() instead. "
- "Refer to https://importlib-resources.readthedocs.io"
- "/en/latest/using.html#migrating-from-legacy for migration advice.",
- DeprecationWarning,
- stacklevel=2,
- )
- return func(*args, **kwargs)
-
- return wrapper
-
-
-def normalize_path(path):
- # type: (Any) -> str
- """Normalize a path by ensuring it is a string.
-
- If the resulting string contains path separators, an exception is raised.
- """
- str_path = str(path)
- parent, file_name = os.path.split(str_path)
- if parent:
- raise ValueError(f'{path!r} must be only a file name')
- return file_name
-
-
-@deprecated
-def open_binary(package: Package, resource: Resource) -> BinaryIO:
- """Return a file-like object opened for binary reading of the resource."""
- return (_common.files(package) / normalize_path(resource)).open('rb')
-
-
-@deprecated
-def read_binary(package: Package, resource: Resource) -> bytes:
- """Return the binary contents of the resource."""
- return (_common.files(package) / normalize_path(resource)).read_bytes()
-
-
-@deprecated
-def open_text(
- package: Package,
- resource: Resource,
- encoding: str = 'utf-8',
- errors: str = 'strict',
-) -> TextIO:
- """Return a file-like object opened for text reading of the resource."""
- return (_common.files(package) / normalize_path(resource)).open(
- 'r', encoding=encoding, errors=errors
- )
-
-
-@deprecated
-def read_text(
- package: Package,
- resource: Resource,
- encoding: str = 'utf-8',
- errors: str = 'strict',
-) -> str:
- """Return the decoded string of the resource.
-
- The decoding-related arguments have the same semantics as those of
- bytes.decode().
- """
- with open_text(package, resource, encoding, errors) as fp:
- return fp.read()
-
-
-@deprecated
-def contents(package: Package) -> Iterable[str]:
- """Return an iterable of entries in `package`.
-
- Note that not all entries are resources. Specifically, directories are
- not considered resources. Use `is_resource()` on each entry returned here
- to check if it is a resource or not.
- """
- return [path.name for path in _common.files(package).iterdir()]
-
-
-@deprecated
-def is_resource(package: Package, name: str) -> bool:
- """True if `name` is a resource inside `package`.
-
- Directories are *not* resources.
- """
- resource = normalize_path(name)
- return any(
- traversable.name == resource and traversable.is_file()
- for traversable in _common.files(package).iterdir()
- )
-
-
-@deprecated
-def path(
- package: Package,
- resource: Resource,
-) -> ContextManager[pathlib.Path]:
- """A context manager providing a file path object to the resource.
-
- If the resource does not already exist on its own on the file system,
- a temporary file will be created. If the file was created, the file
- will be deleted upon exiting the context manager (no exception is
- raised if the file was deleted prior to the context manager
- exiting).
- """
- return _common.as_file(_common.files(package) / normalize_path(resource))
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/abc.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/abc.py
deleted file mode 100644
index d39dc1a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/abc.py
+++ /dev/null
@@ -1,137 +0,0 @@
-import abc
-from typing import BinaryIO, Iterable, Text
-
-from ._compat import runtime_checkable, Protocol
-
-
-class ResourceReader(metaclass=abc.ABCMeta):
- """Abstract base class for loaders to provide resource reading support."""
-
- @abc.abstractmethod
- def open_resource(self, resource: Text) -> BinaryIO:
- """Return an opened, file-like object for binary reading.
-
- The 'resource' argument is expected to represent only a file name.
- If the resource cannot be found, FileNotFoundError is raised.
- """
- # This deliberately raises FileNotFoundError instead of
- # NotImplementedError so that if this method is accidentally called,
- # it'll still do the right thing.
- raise FileNotFoundError
-
- @abc.abstractmethod
- def resource_path(self, resource: Text) -> Text:
- """Return the file system path to the specified resource.
-
- The 'resource' argument is expected to represent only a file name.
- If the resource does not exist on the file system, raise
- FileNotFoundError.
- """
- # This deliberately raises FileNotFoundError instead of
- # NotImplementedError so that if this method is accidentally called,
- # it'll still do the right thing.
- raise FileNotFoundError
-
- @abc.abstractmethod
- def is_resource(self, path: Text) -> bool:
- """Return True if the named 'path' is a resource.
-
- Files are resources, directories are not.
- """
- raise FileNotFoundError
-
- @abc.abstractmethod
- def contents(self) -> Iterable[str]:
- """Return an iterable of entries in `package`."""
- raise FileNotFoundError
-
-
-@runtime_checkable
-class Traversable(Protocol):
- """
- An object with a subset of pathlib.Path methods suitable for
- traversing directories and opening files.
- """
-
- @abc.abstractmethod
- def iterdir(self):
- """
- Yield Traversable objects in self
- """
-
- def read_bytes(self):
- """
- Read contents of self as bytes
- """
- with self.open('rb') as strm:
- return strm.read()
-
- def read_text(self, encoding=None):
- """
- Read contents of self as text
- """
- with self.open(encoding=encoding) as strm:
- return strm.read()
-
- @abc.abstractmethod
- def is_dir(self) -> bool:
- """
- Return True if self is a directory
- """
-
- @abc.abstractmethod
- def is_file(self) -> bool:
- """
- Return True if self is a file
- """
-
- @abc.abstractmethod
- def joinpath(self, child):
- """
- Return Traversable child in self
- """
-
- def __truediv__(self, child):
- """
- Return Traversable child in self
- """
- return self.joinpath(child)
-
- @abc.abstractmethod
- def open(self, mode='r', *args, **kwargs):
- """
- mode may be 'r' or 'rb' to open as text or binary. Return a handle
- suitable for reading (same as pathlib.Path.open).
-
- When opening as text, accepts encoding parameters such as those
- accepted by io.TextIOWrapper.
- """
-
- @abc.abstractproperty
- def name(self) -> str:
- """
- The base name of this object without any parent references.
- """
-
-
-class TraversableResources(ResourceReader):
- """
- The required interface for providing traversable
- resources.
- """
-
- @abc.abstractmethod
- def files(self):
- """Return a Traversable object for the loaded package."""
-
- def open_resource(self, resource):
- return self.files().joinpath(resource).open('rb')
-
- def resource_path(self, resource):
- raise FileNotFoundError(resource)
-
- def is_resource(self, path):
- return self.files().joinpath(path).is_file()
-
- def contents(self):
- return (item.name for item in self.files().iterdir())
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py
deleted file mode 100644
index f1190ca..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/readers.py
+++ /dev/null
@@ -1,122 +0,0 @@
-import collections
-import pathlib
-import operator
-
-from . import abc
-
-from ._itertools import unique_everseen
-from ._compat import ZipPath
-
-
-def remove_duplicates(items):
- return iter(collections.OrderedDict.fromkeys(items))
-
-
-class FileReader(abc.TraversableResources):
- def __init__(self, loader):
- self.path = pathlib.Path(loader.path).parent
-
- def resource_path(self, resource):
- """
- Return the file system path to prevent
- `resources.path()` from creating a temporary
- copy.
- """
- return str(self.path.joinpath(resource))
-
- def files(self):
- return self.path
-
-
-class ZipReader(abc.TraversableResources):
- def __init__(self, loader, module):
- _, _, name = module.rpartition('.')
- self.prefix = loader.prefix.replace('\\', '/') + name + '/'
- self.archive = loader.archive
-
- def open_resource(self, resource):
- try:
- return super().open_resource(resource)
- except KeyError as exc:
- raise FileNotFoundError(exc.args[0])
-
- def is_resource(self, path):
- # workaround for `zipfile.Path.is_file` returning true
- # for non-existent paths.
- target = self.files().joinpath(path)
- return target.is_file() and target.exists()
-
- def files(self):
- return ZipPath(self.archive, self.prefix)
-
-
-class MultiplexedPath(abc.Traversable):
- """
- Given a series of Traversable objects, implement a merged
- version of the interface across all objects. Useful for
- namespace packages which may be multihomed at a single
- name.
- """
-
- def __init__(self, *paths):
- self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
- if not self._paths:
- message = 'MultiplexedPath must contain at least one path'
- raise FileNotFoundError(message)
- if not all(path.is_dir() for path in self._paths):
- raise NotADirectoryError('MultiplexedPath only supports directories')
-
- def iterdir(self):
- files = (file for path in self._paths for file in path.iterdir())
- return unique_everseen(files, key=operator.attrgetter('name'))
-
- def read_bytes(self):
- raise FileNotFoundError(f'{self} is not a file')
-
- def read_text(self, *args, **kwargs):
- raise FileNotFoundError(f'{self} is not a file')
-
- def is_dir(self):
- return True
-
- def is_file(self):
- return False
-
- def joinpath(self, child):
- # first try to find child in current paths
- for file in self.iterdir():
- if file.name == child:
- return file
- # if it does not exist, construct it with the first path
- return self._paths[0] / child
-
- __truediv__ = joinpath
-
- def open(self, *args, **kwargs):
- raise FileNotFoundError(f'{self} is not a file')
-
- @property
- def name(self):
- return self._paths[0].name
-
- def __repr__(self):
- paths = ', '.join(f"'{path}'" for path in self._paths)
- return f'MultiplexedPath({paths})'
-
-
-class NamespaceReader(abc.TraversableResources):
- def __init__(self, namespace_path):
- if 'NamespacePath' not in str(namespace_path):
- raise ValueError('Invalid path')
- self.path = MultiplexedPath(*list(namespace_path))
-
- def resource_path(self, resource):
- """
- Return the file system path to prevent
- `resources.path()` from creating a temporary
- copy.
- """
- return str(self.path.joinpath(resource))
-
- def files(self):
- return self.path
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/simple.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/simple.py
deleted file mode 100644
index da073cb..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/importlib_resources/simple.py
+++ /dev/null
@@ -1,116 +0,0 @@
-"""
-Interface adapters for low-level readers.
-"""
-
-import abc
-import io
-import itertools
-from typing import BinaryIO, List
-
-from .abc import Traversable, TraversableResources
-
-
-class SimpleReader(abc.ABC):
- """
- The minimum, low-level interface required from a resource
- provider.
- """
-
- @abc.abstractproperty
- def package(self):
- # type: () -> str
- """
- The name of the package for which this reader loads resources.
- """
-
- @abc.abstractmethod
- def children(self):
- # type: () -> List['SimpleReader']
- """
- Obtain an iterable of SimpleReader for available
- child containers (e.g. directories).
- """
-
- @abc.abstractmethod
- def resources(self):
- # type: () -> List[str]
- """
- Obtain available named resources for this virtual package.
- """
-
- @abc.abstractmethod
- def open_binary(self, resource):
- # type: (str) -> BinaryIO
- """
- Obtain a File-like for a named resource.
- """
-
- @property
- def name(self):
- return self.package.split('.')[-1]
-
-
-class ResourceHandle(Traversable):
- """
- Handle to a named resource in a ResourceReader.
- """
-
- def __init__(self, parent, name):
- # type: (ResourceContainer, str) -> None
- self.parent = parent
- self.name = name # type: ignore
-
- def is_file(self):
- return True
-
- def is_dir(self):
- return False
-
- def open(self, mode='r', *args, **kwargs):
- stream = self.parent.reader.open_binary(self.name)
- if 'b' not in mode:
- stream = io.TextIOWrapper(*args, **kwargs)
- return stream
-
- def joinpath(self, name):
- raise RuntimeError("Cannot traverse into a resource")
-
-
-class ResourceContainer(Traversable):
- """
- Traversable container for a package's resources via its reader.
- """
-
- def __init__(self, reader):
- # type: (SimpleReader) -> None
- self.reader = reader
-
- def is_dir(self):
- return True
-
- def is_file(self):
- return False
-
- def iterdir(self):
- files = (ResourceHandle(self, name) for name in self.reader.resources)
- dirs = map(ResourceContainer, self.reader.children())
- return itertools.chain(files, dirs)
-
- def open(self, *args, **kwargs):
- raise IsADirectoryError()
-
- def joinpath(self, name):
- return next(
- traversable for traversable in self.iterdir() if traversable.name == name
- )
-
-
-class TraversableReader(TraversableResources, SimpleReader):
- """
- A TraversableResources based on SimpleReader. Resource providers
- may derive from this class to provide the TraversableResources
- interface by supplying the SimpleReader interface.
- """
-
- def files(self):
- return ResourceContainer(self)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__init__.py
+++ /dev/null
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 67aa412..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/context.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/context.cpython-311.pyc
deleted file mode 100644
index 0cb1a60..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/context.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/functools.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/functools.cpython-311.pyc
deleted file mode 100644
index 4682c17..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/__pycache__/functools.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/context.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/context.py
deleted file mode 100644
index 87a4e3d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/context.py
+++ /dev/null
@@ -1,213 +0,0 @@
-import os
-import subprocess
-import contextlib
-import functools
-import tempfile
-import shutil
-import operator
-
-
-@contextlib.contextmanager
-def pushd(dir):
- orig = os.getcwd()
- os.chdir(dir)
- try:
- yield dir
- finally:
- os.chdir(orig)
-
-
-@contextlib.contextmanager
-def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
- """
- Get a tarball, extract it, change to that directory, yield, then
- clean up.
- `runner` is the function to invoke commands.
- `pushd` is a context manager for changing the directory.
- """
- if target_dir is None:
- target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
- if runner is None:
- runner = functools.partial(subprocess.check_call, shell=True)
- # In the tar command, use --strip-components=1 to strip the first path and
- # then
- # use -C to cause the files to be extracted to {target_dir}. This ensures
- # that we always know where the files were extracted.
- runner('mkdir {target_dir}'.format(**vars()))
- try:
- getter = 'wget {url} -O -'
- extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
- cmd = ' | '.join((getter, extract))
- runner(cmd.format(compression=infer_compression(url), **vars()))
- with pushd(target_dir):
- yield target_dir
- finally:
- runner('rm -Rf {target_dir}'.format(**vars()))
-
-
-def infer_compression(url):
- """
- Given a URL or filename, infer the compression code for tar.
- """
- # cheat and just assume it's the last two characters
- compression_indicator = url[-2:]
- mapping = dict(gz='z', bz='j', xz='J')
- # Assume 'z' (gzip) if no match
- return mapping.get(compression_indicator, 'z')
-
-
-@contextlib.contextmanager
-def temp_dir(remover=shutil.rmtree):
- """
- Create a temporary directory context. Pass a custom remover
- to override the removal behavior.
- """
- temp_dir = tempfile.mkdtemp()
- try:
- yield temp_dir
- finally:
- remover(temp_dir)
-
-
-@contextlib.contextmanager
-def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
- """
- Check out the repo indicated by url.
-
- If dest_ctx is supplied, it should be a context manager
- to yield the target directory for the check out.
- """
- exe = 'git' if 'git' in url else 'hg'
- with dest_ctx() as repo_dir:
- cmd = [exe, 'clone', url, repo_dir]
- if branch:
- cmd.extend(['--branch', branch])
- devnull = open(os.path.devnull, 'w')
- stdout = devnull if quiet else None
- subprocess.check_call(cmd, stdout=stdout)
- yield repo_dir
-
-
-@contextlib.contextmanager
-def null():
- yield
-
-
-class ExceptionTrap:
- """
- A context manager that will catch certain exceptions and provide an
- indication they occurred.
-
- >>> with ExceptionTrap() as trap:
- ... raise Exception()
- >>> bool(trap)
- True
-
- >>> with ExceptionTrap() as trap:
- ... pass
- >>> bool(trap)
- False
-
- >>> with ExceptionTrap(ValueError) as trap:
- ... raise ValueError("1 + 1 is not 3")
- >>> bool(trap)
- True
-
- >>> with ExceptionTrap(ValueError) as trap:
- ... raise Exception()
- Traceback (most recent call last):
- ...
- Exception
-
- >>> bool(trap)
- False
- """
-
- exc_info = None, None, None
-
- def __init__(self, exceptions=(Exception,)):
- self.exceptions = exceptions
-
- def __enter__(self):
- return self
-
- @property
- def type(self):
- return self.exc_info[0]
-
- @property
- def value(self):
- return self.exc_info[1]
-
- @property
- def tb(self):
- return self.exc_info[2]
-
- def __exit__(self, *exc_info):
- type = exc_info[0]
- matches = type and issubclass(type, self.exceptions)
- if matches:
- self.exc_info = exc_info
- return matches
-
- def __bool__(self):
- return bool(self.type)
-
- def raises(self, func, *, _test=bool):
- """
- Wrap func and replace the result with the truth
- value of the trap (True if an exception occurred).
-
- First, give the decorator an alias to support Python 3.8
- Syntax.
-
- >>> raises = ExceptionTrap(ValueError).raises
-
- Now decorate a function that always fails.
-
- >>> @raises
- ... def fail():
- ... raise ValueError('failed')
- >>> fail()
- True
- """
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- with ExceptionTrap(self.exceptions) as trap:
- func(*args, **kwargs)
- return _test(trap)
-
- return wrapper
-
- def passes(self, func):
- """
- Wrap func and replace the result with the truth
- value of the trap (True if no exception).
-
- First, give the decorator an alias to support Python 3.8
- Syntax.
-
- >>> passes = ExceptionTrap(ValueError).passes
-
- Now decorate a function that always fails.
-
- >>> @passes
- ... def fail():
- ... raise ValueError('failed')
-
- >>> fail()
- False
- """
- return self.raises(func, _test=operator.not_)
-
-
-class suppress(contextlib.suppress, contextlib.ContextDecorator):
- """
- A version of contextlib.suppress with decorator support.
-
- >>> @suppress(KeyError)
- ... def key_error():
- ... {}['']
- >>> key_error()
- """
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/functools.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/functools.py
deleted file mode 100644
index bbd8b29..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/functools.py
+++ /dev/null
@@ -1,525 +0,0 @@
-import functools
-import time
-import inspect
-import collections
-import types
-import itertools
-
-import setuptools.extern.more_itertools
-
-from typing import Callable, TypeVar
-
-
-CallableT = TypeVar("CallableT", bound=Callable[..., object])
-
-
-def compose(*funcs):
- """
- Compose any number of unary functions into a single unary function.
-
- >>> import textwrap
- >>> expected = str.strip(textwrap.dedent(compose.__doc__))
- >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
- >>> strip_and_dedent(compose.__doc__) == expected
- True
-
- Compose also allows the innermost function to take arbitrary arguments.
-
- >>> round_three = lambda x: round(x, ndigits=3)
- >>> f = compose(round_three, int.__truediv__)
- >>> [f(3*x, x+1) for x in range(1,10)]
- [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
- """
-
- def compose_two(f1, f2):
- return lambda *args, **kwargs: f1(f2(*args, **kwargs))
-
- return functools.reduce(compose_two, funcs)
-
-
-def method_caller(method_name, *args, **kwargs):
- """
- Return a function that will call a named method on the
- target object with optional positional and keyword
- arguments.
-
- >>> lower = method_caller('lower')
- >>> lower('MyString')
- 'mystring'
- """
-
- def call_method(target):
- func = getattr(target, method_name)
- return func(*args, **kwargs)
-
- return call_method
-
-
-def once(func):
- """
- Decorate func so it's only ever called the first time.
-
- This decorator can ensure that an expensive or non-idempotent function
- will not be expensive on subsequent calls and is idempotent.
-
- >>> add_three = once(lambda a: a+3)
- >>> add_three(3)
- 6
- >>> add_three(9)
- 6
- >>> add_three('12')
- 6
-
- To reset the stored value, simply clear the property ``saved_result``.
-
- >>> del add_three.saved_result
- >>> add_three(9)
- 12
- >>> add_three(8)
- 12
-
- Or invoke 'reset()' on it.
-
- >>> add_three.reset()
- >>> add_three(-3)
- 0
- >>> add_three(0)
- 0
- """
-
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- if not hasattr(wrapper, 'saved_result'):
- wrapper.saved_result = func(*args, **kwargs)
- return wrapper.saved_result
-
- wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
- return wrapper
-
-
-def method_cache(
- method: CallableT,
- cache_wrapper: Callable[
- [CallableT], CallableT
- ] = functools.lru_cache(), # type: ignore[assignment]
-) -> CallableT:
- """
- Wrap lru_cache to support storing the cache data in the object instances.
-
- Abstracts the common paradigm where the method explicitly saves an
- underscore-prefixed protected property on first call and returns that
- subsequently.
-
- >>> class MyClass:
- ... calls = 0
- ...
- ... @method_cache
- ... def method(self, value):
- ... self.calls += 1
- ... return value
-
- >>> a = MyClass()
- >>> a.method(3)
- 3
- >>> for x in range(75):
- ... res = a.method(x)
- >>> a.calls
- 75
-
- Note that the apparent behavior will be exactly like that of lru_cache
- except that the cache is stored on each instance, so values in one
- instance will not flush values from another, and when an instance is
- deleted, so are the cached values for that instance.
-
- >>> b = MyClass()
- >>> for x in range(35):
- ... res = b.method(x)
- >>> b.calls
- 35
- >>> a.method(0)
- 0
- >>> a.calls
- 75
-
- Note that if method had been decorated with ``functools.lru_cache()``,
- a.calls would have been 76 (due to the cached value of 0 having been
- flushed by the 'b' instance).
-
- Clear the cache with ``.cache_clear()``
-
- >>> a.method.cache_clear()
-
- Same for a method that hasn't yet been called.
-
- >>> c = MyClass()
- >>> c.method.cache_clear()
-
- Another cache wrapper may be supplied:
-
- >>> cache = functools.lru_cache(maxsize=2)
- >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
- >>> a = MyClass()
- >>> a.method2()
- 3
-
- Caution - do not subsequently wrap the method with another decorator, such
- as ``@property``, which changes the semantics of the function.
-
- See also
- http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
- for another implementation and additional justification.
- """
-
- def wrapper(self: object, *args: object, **kwargs: object) -> object:
- # it's the first call, replace the method with a cached, bound method
- bound_method: CallableT = types.MethodType( # type: ignore[assignment]
- method, self
- )
- cached_method = cache_wrapper(bound_method)
- setattr(self, method.__name__, cached_method)
- return cached_method(*args, **kwargs)
-
- # Support cache clear even before cache has been created.
- wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
-
- return ( # type: ignore[return-value]
- _special_method_cache(method, cache_wrapper) or wrapper
- )
-
-
-def _special_method_cache(method, cache_wrapper):
- """
- Because Python treats special methods differently, it's not
- possible to use instance attributes to implement the cached
- methods.
-
- Instead, install the wrapper method under a different name
- and return a simple proxy to that wrapper.
-
- https://github.com/jaraco/jaraco.functools/issues/5
- """
- name = method.__name__
- special_names = '__getattr__', '__getitem__'
- if name not in special_names:
- return
-
- wrapper_name = '__cached' + name
-
- def proxy(self, *args, **kwargs):
- if wrapper_name not in vars(self):
- bound = types.MethodType(method, self)
- cache = cache_wrapper(bound)
- setattr(self, wrapper_name, cache)
- else:
- cache = getattr(self, wrapper_name)
- return cache(*args, **kwargs)
-
- return proxy
-
-
-def apply(transform):
- """
- Decorate a function with a transform function that is
- invoked on results returned from the decorated function.
-
- >>> @apply(reversed)
- ... def get_numbers(start):
- ... "doc for get_numbers"
- ... return range(start, start+3)
- >>> list(get_numbers(4))
- [6, 5, 4]
- >>> get_numbers.__doc__
- 'doc for get_numbers'
- """
-
- def wrap(func):
- return functools.wraps(func)(compose(transform, func))
-
- return wrap
-
-
-def result_invoke(action):
- r"""
- Decorate a function with an action function that is
- invoked on the results returned from the decorated
- function (for its side-effect), then return the original
- result.
-
- >>> @result_invoke(print)
- ... def add_two(a, b):
- ... return a + b
- >>> x = add_two(2, 3)
- 5
- >>> x
- 5
- """
-
- def wrap(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- result = func(*args, **kwargs)
- action(result)
- return result
-
- return wrapper
-
- return wrap
-
-
-def call_aside(f, *args, **kwargs):
- """
- Call a function for its side effect after initialization.
-
- >>> @call_aside
- ... def func(): print("called")
- called
- >>> func()
- called
-
- Use functools.partial to pass parameters to the initial call
-
- >>> @functools.partial(call_aside, name='bingo')
- ... def func(name): print("called with", name)
- called with bingo
- """
- f(*args, **kwargs)
- return f
-
-
-class Throttler:
- """
- Rate-limit a function (or other callable)
- """
-
- def __init__(self, func, max_rate=float('Inf')):
- if isinstance(func, Throttler):
- func = func.func
- self.func = func
- self.max_rate = max_rate
- self.reset()
-
- def reset(self):
- self.last_called = 0
-
- def __call__(self, *args, **kwargs):
- self._wait()
- return self.func(*args, **kwargs)
-
- def _wait(self):
- "ensure at least 1/max_rate seconds from last call"
- elapsed = time.time() - self.last_called
- must_wait = 1 / self.max_rate - elapsed
- time.sleep(max(0, must_wait))
- self.last_called = time.time()
-
- def __get__(self, obj, type=None):
- return first_invoke(self._wait, functools.partial(self.func, obj))
-
-
-def first_invoke(func1, func2):
- """
- Return a function that when invoked will invoke func1 without
- any parameters (for its side-effect) and then invoke func2
- with whatever parameters were passed, returning its result.
- """
-
- def wrapper(*args, **kwargs):
- func1()
- return func2(*args, **kwargs)
-
- return wrapper
-
-
-def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
- """
- Given a callable func, trap the indicated exceptions
- for up to 'retries' times, invoking cleanup on the
- exception. On the final attempt, allow any exceptions
- to propagate.
- """
- attempts = itertools.count() if retries == float('inf') else range(retries)
- for attempt in attempts:
- try:
- return func()
- except trap:
- cleanup()
-
- return func()
-
-
-def retry(*r_args, **r_kwargs):
- """
- Decorator wrapper for retry_call. Accepts arguments to retry_call
- except func and then returns a decorator for the decorated function.
-
- Ex:
-
- >>> @retry(retries=3)
- ... def my_func(a, b):
- ... "this is my funk"
- ... print(a, b)
- >>> my_func.__doc__
- 'this is my funk'
- """
-
- def decorate(func):
- @functools.wraps(func)
- def wrapper(*f_args, **f_kwargs):
- bound = functools.partial(func, *f_args, **f_kwargs)
- return retry_call(bound, *r_args, **r_kwargs)
-
- return wrapper
-
- return decorate
-
-
-def print_yielded(func):
- """
- Convert a generator into a function that prints all yielded elements
-
- >>> @print_yielded
- ... def x():
- ... yield 3; yield None
- >>> x()
- 3
- None
- """
- print_all = functools.partial(map, print)
- print_results = compose(more_itertools.consume, print_all, func)
- return functools.wraps(func)(print_results)
-
-
-def pass_none(func):
- """
- Wrap func so it's not called if its first param is None
-
- >>> print_text = pass_none(print)
- >>> print_text('text')
- text
- >>> print_text(None)
- """
-
- @functools.wraps(func)
- def wrapper(param, *args, **kwargs):
- if param is not None:
- return func(param, *args, **kwargs)
-
- return wrapper
-
-
-def assign_params(func, namespace):
- """
- Assign parameters from namespace where func solicits.
-
- >>> def func(x, y=3):
- ... print(x, y)
- >>> assigned = assign_params(func, dict(x=2, z=4))
- >>> assigned()
- 2 3
-
- The usual errors are raised if a function doesn't receive
- its required parameters:
-
- >>> assigned = assign_params(func, dict(y=3, z=4))
- >>> assigned()
- Traceback (most recent call last):
- TypeError: func() ...argument...
-
- It even works on methods:
-
- >>> class Handler:
- ... def meth(self, arg):
- ... print(arg)
- >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
- crystal
- """
- sig = inspect.signature(func)
- params = sig.parameters.keys()
- call_ns = {k: namespace[k] for k in params if k in namespace}
- return functools.partial(func, **call_ns)
-
-
-def save_method_args(method):
- """
- Wrap a method such that when it is called, the args and kwargs are
- saved on the method.
-
- >>> class MyClass:
- ... @save_method_args
- ... def method(self, a, b):
- ... print(a, b)
- >>> my_ob = MyClass()
- >>> my_ob.method(1, 2)
- 1 2
- >>> my_ob._saved_method.args
- (1, 2)
- >>> my_ob._saved_method.kwargs
- {}
- >>> my_ob.method(a=3, b='foo')
- 3 foo
- >>> my_ob._saved_method.args
- ()
- >>> my_ob._saved_method.kwargs == dict(a=3, b='foo')
- True
-
- The arguments are stored on the instance, allowing for
- different instance to save different args.
-
- >>> your_ob = MyClass()
- >>> your_ob.method({str('x'): 3}, b=[4])
- {'x': 3} [4]
- >>> your_ob._saved_method.args
- ({'x': 3},)
- >>> my_ob._saved_method.args
- ()
- """
- args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs')
-
- @functools.wraps(method)
- def wrapper(self, *args, **kwargs):
- attr_name = '_saved_' + method.__name__
- attr = args_and_kwargs(args, kwargs)
- setattr(self, attr_name, attr)
- return method(self, *args, **kwargs)
-
- return wrapper
-
-
-def except_(*exceptions, replace=None, use=None):
- """
- Replace the indicated exceptions, if raised, with the indicated
- literal replacement or evaluated expression (if present).
-
- >>> safe_int = except_(ValueError)(int)
- >>> safe_int('five')
- >>> safe_int('5')
- 5
-
- Specify a literal replacement with ``replace``.
-
- >>> safe_int_r = except_(ValueError, replace=0)(int)
- >>> safe_int_r('five')
- 0
-
- Provide an expression to ``use`` to pass through particular parameters.
-
- >>> safe_int_pt = except_(ValueError, use='args[0]')(int)
- >>> safe_int_pt('five')
- 'five'
-
- """
-
- def decorate(func):
- @functools.wraps(func)
- def wrapper(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except exceptions:
- try:
- return eval(use)
- except TypeError:
- return replace
-
- return wrapper
-
- return decorate
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__init__.py
deleted file mode 100644
index a0306d5..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__init__.py
+++ /dev/null
@@ -1,599 +0,0 @@
-import re
-import itertools
-import textwrap
-import functools
-
-try:
- from importlib.resources import files # type: ignore
-except ImportError: # pragma: nocover
- from setuptools.extern.importlib_resources import files # type: ignore
-
-from setuptools.extern.jaraco.functools import compose, method_cache
-from setuptools.extern.jaraco.context import ExceptionTrap
-
-
-def substitution(old, new):
- """
- Return a function that will perform a substitution on a string
- """
- return lambda s: s.replace(old, new)
-
-
-def multi_substitution(*substitutions):
- """
- Take a sequence of pairs specifying substitutions, and create
- a function that performs those substitutions.
-
- >>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
- 'baz'
- """
- substitutions = itertools.starmap(substitution, substitutions)
- # compose function applies last function first, so reverse the
- # substitutions to get the expected order.
- substitutions = reversed(tuple(substitutions))
- return compose(*substitutions)
-
-
-class FoldedCase(str):
- """
- A case insensitive string class; behaves just like str
- except compares equal when the only variation is case.
-
- >>> s = FoldedCase('hello world')
-
- >>> s == 'Hello World'
- True
-
- >>> 'Hello World' == s
- True
-
- >>> s != 'Hello World'
- False
-
- >>> s.index('O')
- 4
-
- >>> s.split('O')
- ['hell', ' w', 'rld']
-
- >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
- ['alpha', 'Beta', 'GAMMA']
-
- Sequence membership is straightforward.
-
- >>> "Hello World" in [s]
- True
- >>> s in ["Hello World"]
- True
-
- You may test for set inclusion, but candidate and elements
- must both be folded.
-
- >>> FoldedCase("Hello World") in {s}
- True
- >>> s in {FoldedCase("Hello World")}
- True
-
- String inclusion works as long as the FoldedCase object
- is on the right.
-
- >>> "hello" in FoldedCase("Hello World")
- True
-
- But not if the FoldedCase object is on the left:
-
- >>> FoldedCase('hello') in 'Hello World'
- False
-
- In that case, use ``in_``:
-
- >>> FoldedCase('hello').in_('Hello World')
- True
-
- >>> FoldedCase('hello') > FoldedCase('Hello')
- False
- """
-
- def __lt__(self, other):
- return self.lower() < other.lower()
-
- def __gt__(self, other):
- return self.lower() > other.lower()
-
- def __eq__(self, other):
- return self.lower() == other.lower()
-
- def __ne__(self, other):
- return self.lower() != other.lower()
-
- def __hash__(self):
- return hash(self.lower())
-
- def __contains__(self, other):
- return super().lower().__contains__(other.lower())
-
- def in_(self, other):
- "Does self appear in other?"
- return self in FoldedCase(other)
-
- # cache lower since it's likely to be called frequently.
- @method_cache
- def lower(self):
- return super().lower()
-
- def index(self, sub):
- return self.lower().index(sub.lower())
-
- def split(self, splitter=' ', maxsplit=0):
- pattern = re.compile(re.escape(splitter), re.I)
- return pattern.split(self, maxsplit)
-
-
-# Python 3.8 compatibility
-_unicode_trap = ExceptionTrap(UnicodeDecodeError)
-
-
-@_unicode_trap.passes
-def is_decodable(value):
- r"""
- Return True if the supplied value is decodable (using the default
- encoding).
-
- >>> is_decodable(b'\xff')
- False
- >>> is_decodable(b'\x32')
- True
- """
- value.decode()
-
-
-def is_binary(value):
- r"""
- Return True if the value appears to be binary (that is, it's a byte
- string and isn't decodable).
-
- >>> is_binary(b'\xff')
- True
- >>> is_binary('\xff')
- False
- """
- return isinstance(value, bytes) and not is_decodable(value)
-
-
-def trim(s):
- r"""
- Trim something like a docstring to remove the whitespace that
- is common due to indentation and formatting.
-
- >>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
- 'foo = bar\n\tbar = baz'
- """
- return textwrap.dedent(s).strip()
-
-
-def wrap(s):
- """
- Wrap lines of text, retaining existing newlines as
- paragraph markers.
-
- >>> print(wrap(lorem_ipsum))
- Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
- eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
- minim veniam, quis nostrud exercitation ullamco laboris nisi ut
- aliquip ex ea commodo consequat. Duis aute irure dolor in
- reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
- pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
- culpa qui officia deserunt mollit anim id est laborum.
- <BLANKLINE>
- Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
- varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
- magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
- gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
- risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
- eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
- fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
- a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
- neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
- sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
- nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
- quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
- molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
- """
- paragraphs = s.splitlines()
- wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
- return '\n\n'.join(wrapped)
-
-
-def unwrap(s):
- r"""
- Given a multi-line string, return an unwrapped version.
-
- >>> wrapped = wrap(lorem_ipsum)
- >>> wrapped.count('\n')
- 20
- >>> unwrapped = unwrap(wrapped)
- >>> unwrapped.count('\n')
- 1
- >>> print(unwrapped)
- Lorem ipsum dolor sit amet, consectetur adipiscing ...
- Curabitur pretium tincidunt lacus. Nulla gravida orci ...
-
- """
- paragraphs = re.split(r'\n\n+', s)
- cleaned = (para.replace('\n', ' ') for para in paragraphs)
- return '\n'.join(cleaned)
-
-
-
-
-class Splitter(object):
- """object that will split a string with the given arguments for each call
-
- >>> s = Splitter(',')
- >>> s('hello, world, this is your, master calling')
- ['hello', ' world', ' this is your', ' master calling']
- """
-
- def __init__(self, *args):
- self.args = args
-
- def __call__(self, s):
- return s.split(*self.args)
-
-
-def indent(string, prefix=' ' * 4):
- """
- >>> indent('foo')
- ' foo'
- """
- return prefix + string
-
-
-class WordSet(tuple):
- """
- Given an identifier, return the words that identifier represents,
- whether in camel case, underscore-separated, etc.
-
- >>> WordSet.parse("camelCase")
- ('camel', 'Case')
-
- >>> WordSet.parse("under_sep")
- ('under', 'sep')
-
- Acronyms should be retained
-
- >>> WordSet.parse("firstSNL")
- ('first', 'SNL')
-
- >>> WordSet.parse("you_and_I")
- ('you', 'and', 'I')
-
- >>> WordSet.parse("A simple test")
- ('A', 'simple', 'test')
-
- Multiple caps should not interfere with the first cap of another word.
-
- >>> WordSet.parse("myABCClass")
- ('my', 'ABC', 'Class')
-
- The result is a WordSet, so you can get the form you need.
-
- >>> WordSet.parse("myABCClass").underscore_separated()
- 'my_ABC_Class'
-
- >>> WordSet.parse('a-command').camel_case()
- 'ACommand'
-
- >>> WordSet.parse('someIdentifier').lowered().space_separated()
- 'some identifier'
-
- Slices of the result should return another WordSet.
-
- >>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
- 'out_of_context'
-
- >>> WordSet.from_class_name(WordSet()).lowered().space_separated()
- 'word set'
-
- >>> example = WordSet.parse('figured it out')
- >>> example.headless_camel_case()
- 'figuredItOut'
- >>> example.dash_separated()
- 'figured-it-out'
-
- """
-
- _pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
-
- def capitalized(self):
- return WordSet(word.capitalize() for word in self)
-
- def lowered(self):
- return WordSet(word.lower() for word in self)
-
- def camel_case(self):
- return ''.join(self.capitalized())
-
- def headless_camel_case(self):
- words = iter(self)
- first = next(words).lower()
- new_words = itertools.chain((first,), WordSet(words).camel_case())
- return ''.join(new_words)
-
- def underscore_separated(self):
- return '_'.join(self)
-
- def dash_separated(self):
- return '-'.join(self)
-
- def space_separated(self):
- return ' '.join(self)
-
- def trim_right(self, item):
- """
- Remove the item from the end of the set.
-
- >>> WordSet.parse('foo bar').trim_right('foo')
- ('foo', 'bar')
- >>> WordSet.parse('foo bar').trim_right('bar')
- ('foo',)
- >>> WordSet.parse('').trim_right('bar')
- ()
- """
- return self[:-1] if self and self[-1] == item else self
-
- def trim_left(self, item):
- """
- Remove the item from the beginning of the set.
-
- >>> WordSet.parse('foo bar').trim_left('foo')
- ('bar',)
- >>> WordSet.parse('foo bar').trim_left('bar')
- ('foo', 'bar')
- >>> WordSet.parse('').trim_left('bar')
- ()
- """
- return self[1:] if self and self[0] == item else self
-
- def trim(self, item):
- """
- >>> WordSet.parse('foo bar').trim('foo')
- ('bar',)
- """
- return self.trim_left(item).trim_right(item)
-
- def __getitem__(self, item):
- result = super(WordSet, self).__getitem__(item)
- if isinstance(item, slice):
- result = WordSet(result)
- return result
-
- @classmethod
- def parse(cls, identifier):
- matches = cls._pattern.finditer(identifier)
- return WordSet(match.group(0) for match in matches)
-
- @classmethod
- def from_class_name(cls, subject):
- return cls.parse(subject.__class__.__name__)
-
-
-# for backward compatibility
-words = WordSet.parse
-
-
-def simple_html_strip(s):
- r"""
- Remove HTML from the string `s`.
-
- >>> str(simple_html_strip(''))
- ''
-
- >>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
- A stormy day in paradise
-
- >>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
- Somebody tell the truth.
-
- >>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
- What about
- multiple lines?
- """
- html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
- texts = (match.group(3) or '' for match in html_stripper.finditer(s))
- return ''.join(texts)
-
-
-class SeparatedValues(str):
- """
- A string separated by a separator. Overrides __iter__ for getting
- the values.
-
- >>> list(SeparatedValues('a,b,c'))
- ['a', 'b', 'c']
-
- Whitespace is stripped and empty values are discarded.
-
- >>> list(SeparatedValues(' a, b , c, '))
- ['a', 'b', 'c']
- """
-
- separator = ','
-
- def __iter__(self):
- parts = self.split(self.separator)
- return filter(None, (part.strip() for part in parts))
-
-
-class Stripper:
- r"""
- Given a series of lines, find the common prefix and strip it from them.
-
- >>> lines = [
- ... 'abcdefg\n',
- ... 'abc\n',
- ... 'abcde\n',
- ... ]
- >>> res = Stripper.strip_prefix(lines)
- >>> res.prefix
- 'abc'
- >>> list(res.lines)
- ['defg\n', '\n', 'de\n']
-
- If no prefix is common, nothing should be stripped.
-
- >>> lines = [
- ... 'abcd\n',
- ... '1234\n',
- ... ]
- >>> res = Stripper.strip_prefix(lines)
- >>> res.prefix = ''
- >>> list(res.lines)
- ['abcd\n', '1234\n']
- """
-
- def __init__(self, prefix, lines):
- self.prefix = prefix
- self.lines = map(self, lines)
-
- @classmethod
- def strip_prefix(cls, lines):
- prefix_lines, lines = itertools.tee(lines)
- prefix = functools.reduce(cls.common_prefix, prefix_lines)
- return cls(prefix, lines)
-
- def __call__(self, line):
- if not self.prefix:
- return line
- null, prefix, rest = line.partition(self.prefix)
- return rest
-
- @staticmethod
- def common_prefix(s1, s2):
- """
- Return the common prefix of two lines.
- """
- index = min(len(s1), len(s2))
- while s1[:index] != s2[:index]:
- index -= 1
- return s1[:index]
-
-
-def remove_prefix(text, prefix):
- """
- Remove the prefix from the text if it exists.
-
- >>> remove_prefix('underwhelming performance', 'underwhelming ')
- 'performance'
-
- >>> remove_prefix('something special', 'sample')
- 'something special'
- """
- null, prefix, rest = text.rpartition(prefix)
- return rest
-
-
-def remove_suffix(text, suffix):
- """
- Remove the suffix from the text if it exists.
-
- >>> remove_suffix('name.git', '.git')
- 'name'
-
- >>> remove_suffix('something special', 'sample')
- 'something special'
- """
- rest, suffix, null = text.partition(suffix)
- return rest
-
-
-def normalize_newlines(text):
- r"""
- Replace alternate newlines with the canonical newline.
-
- >>> normalize_newlines('Lorem Ipsum\u2029')
- 'Lorem Ipsum\n'
- >>> normalize_newlines('Lorem Ipsum\r\n')
- 'Lorem Ipsum\n'
- >>> normalize_newlines('Lorem Ipsum\x85')
- 'Lorem Ipsum\n'
- """
- newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
- pattern = '|'.join(newlines)
- return re.sub(pattern, '\n', text)
-
-
-def _nonblank(str):
- return str and not str.startswith('#')
-
-
-@functools.singledispatch
-def yield_lines(iterable):
- r"""
- Yield valid lines of a string or iterable.
-
- >>> list(yield_lines(''))
- []
- >>> list(yield_lines(['foo', 'bar']))
- ['foo', 'bar']
- >>> list(yield_lines('foo\nbar'))
- ['foo', 'bar']
- >>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
- ['foo', 'baz #comment']
- >>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
- ['foo', 'bar', 'baz', 'bing']
- """
- return itertools.chain.from_iterable(map(yield_lines, iterable))
-
-
-@yield_lines.register(str)
-def _(text):
- return filter(_nonblank, map(str.strip, text.splitlines()))
-
-
-def drop_comment(line):
- """
- Drop comments.
-
- >>> drop_comment('foo # bar')
- 'foo'
-
- A hash without a space may be in a URL.
-
- >>> drop_comment('http://example.com/foo#bar')
- 'http://example.com/foo#bar'
- """
- return line.partition(' #')[0]
-
-
-def join_continuation(lines):
- r"""
- Join lines continued by a trailing backslash.
-
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar', 'baz']))
- ['foobar', 'baz']
- >>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
- ['foobarbaz']
-
- Not sure why, but...
- The character preceeding the backslash is also elided.
-
- >>> list(join_continuation(['goo\\', 'dly']))
- ['godly']
-
- A terrible idea, but...
- If no line is available to continue, suppress the lines.
-
- >>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
- ['foo']
- """
- lines = iter(lines)
- for item in lines:
- while item.endswith('\\'):
- try:
- item = item[:-2].strip() + next(lines)
- except StopIteration:
- return
- yield item
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 1303ba4..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/jaraco/text/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py
deleted file mode 100644
index 19a169f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from .more import * # noqa
-from .recipes import * # noqa
-
-__version__ = '8.8.0'
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index a22e50d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-311.pyc
deleted file mode 100644
index d13ab0c..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/more.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/recipes.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/recipes.cpython-311.pyc
deleted file mode 100644
index 875ec3a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/__pycache__/recipes.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/more.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/more.py
deleted file mode 100644
index e6fca4d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/more.py
+++ /dev/null
@@ -1,3824 +0,0 @@
-import warnings
-
-from collections import Counter, defaultdict, deque, abc
-from collections.abc import Sequence
-from functools import partial, reduce, wraps
-from heapq import merge, heapify, heapreplace, heappop
-from itertools import (
- chain,
- compress,
- count,
- cycle,
- dropwhile,
- groupby,
- islice,
- repeat,
- starmap,
- takewhile,
- tee,
- zip_longest,
-)
-from math import exp, factorial, floor, log
-from queue import Empty, Queue
-from random import random, randrange, uniform
-from operator import itemgetter, mul, sub, gt, lt
-from sys import hexversion, maxsize
-from time import monotonic
-
-from .recipes import (
- consume,
- flatten,
- pairwise,
- powerset,
- take,
- unique_everseen,
-)
-
-__all__ = [
- 'AbortThread',
- 'adjacent',
- 'always_iterable',
- 'always_reversible',
- 'bucket',
- 'callback_iter',
- 'chunked',
- 'circular_shifts',
- 'collapse',
- 'collate',
- 'consecutive_groups',
- 'consumer',
- 'countable',
- 'count_cycle',
- 'mark_ends',
- 'difference',
- 'distinct_combinations',
- 'distinct_permutations',
- 'distribute',
- 'divide',
- 'exactly_n',
- 'filter_except',
- 'first',
- 'groupby_transform',
- 'ilen',
- 'interleave_longest',
- 'interleave',
- 'intersperse',
- 'islice_extended',
- 'iterate',
- 'ichunked',
- 'is_sorted',
- 'last',
- 'locate',
- 'lstrip',
- 'make_decorator',
- 'map_except',
- 'map_reduce',
- 'nth_or_last',
- 'nth_permutation',
- 'nth_product',
- 'numeric_range',
- 'one',
- 'only',
- 'padded',
- 'partitions',
- 'set_partitions',
- 'peekable',
- 'repeat_last',
- 'replace',
- 'rlocate',
- 'rstrip',
- 'run_length',
- 'sample',
- 'seekable',
- 'SequenceView',
- 'side_effect',
- 'sliced',
- 'sort_together',
- 'split_at',
- 'split_after',
- 'split_before',
- 'split_when',
- 'split_into',
- 'spy',
- 'stagger',
- 'strip',
- 'substrings',
- 'substrings_indexes',
- 'time_limited',
- 'unique_to_each',
- 'unzip',
- 'windowed',
- 'with_iter',
- 'UnequalIterablesError',
- 'zip_equal',
- 'zip_offset',
- 'windowed_complete',
- 'all_unique',
- 'value_chain',
- 'product_index',
- 'combination_index',
- 'permutation_index',
-]
-
-_marker = object()
-
-
-def chunked(iterable, n, strict=False):
- """Break *iterable* into lists of length *n*:
-
- >>> list(chunked([1, 2, 3, 4, 5, 6], 3))
- [[1, 2, 3], [4, 5, 6]]
-
- By the default, the last yielded list will have fewer than *n* elements
- if the length of *iterable* is not divisible by *n*:
-
- >>> list(chunked([1, 2, 3, 4, 5, 6, 7, 8], 3))
- [[1, 2, 3], [4, 5, 6], [7, 8]]
-
- To use a fill-in value instead, see the :func:`grouper` recipe.
-
- If the length of *iterable* is not divisible by *n* and *strict* is
- ``True``, then ``ValueError`` will be raised before the last
- list is yielded.
-
- """
- iterator = iter(partial(take, n, iter(iterable)), [])
- if strict:
-
- def ret():
- for chunk in iterator:
- if len(chunk) != n:
- raise ValueError('iterable is not divisible by n.')
- yield chunk
-
- return iter(ret())
- else:
- return iterator
-
-
-def first(iterable, default=_marker):
- """Return the first item of *iterable*, or *default* if *iterable* is
- empty.
-
- >>> first([0, 1, 2, 3])
- 0
- >>> first([], 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
-
- :func:`first` is useful when you have a generator of expensive-to-retrieve
- values and want any arbitrary one. It is marginally shorter than
- ``next(iter(iterable), default)``.
-
- """
- try:
- return next(iter(iterable))
- except StopIteration as e:
- if default is _marker:
- raise ValueError(
- 'first() was called on an empty iterable, and no '
- 'default value was provided.'
- ) from e
- return default
-
-
-def last(iterable, default=_marker):
- """Return the last item of *iterable*, or *default* if *iterable* is
- empty.
-
- >>> last([0, 1, 2, 3])
- 3
- >>> last([], 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
- """
- try:
- if isinstance(iterable, Sequence):
- return iterable[-1]
- # Work around https://bugs.python.org/issue38525
- elif hasattr(iterable, '__reversed__') and (hexversion != 0x030800F0):
- return next(reversed(iterable))
- else:
- return deque(iterable, maxlen=1)[-1]
- except (IndexError, TypeError, StopIteration):
- if default is _marker:
- raise ValueError(
- 'last() was called on an empty iterable, and no default was '
- 'provided.'
- )
- return default
-
-
-def nth_or_last(iterable, n, default=_marker):
- """Return the nth or the last item of *iterable*,
- or *default* if *iterable* is empty.
-
- >>> nth_or_last([0, 1, 2, 3], 2)
- 2
- >>> nth_or_last([0, 1], 2)
- 1
- >>> nth_or_last([], 0, 'some default')
- 'some default'
-
- If *default* is not provided and there are no items in the iterable,
- raise ``ValueError``.
- """
- return last(islice(iterable, n + 1), default=default)
-
-
-class peekable:
- """Wrap an iterator to allow lookahead and prepending elements.
-
- Call :meth:`peek` on the result to get the value that will be returned
- by :func:`next`. This won't advance the iterator:
-
- >>> p = peekable(['a', 'b'])
- >>> p.peek()
- 'a'
- >>> next(p)
- 'a'
-
- Pass :meth:`peek` a default value to return that instead of raising
- ``StopIteration`` when the iterator is exhausted.
-
- >>> p = peekable([])
- >>> p.peek('hi')
- 'hi'
-
- peekables also offer a :meth:`prepend` method, which "inserts" items
- at the head of the iterable:
-
- >>> p = peekable([1, 2, 3])
- >>> p.prepend(10, 11, 12)
- >>> next(p)
- 10
- >>> p.peek()
- 11
- >>> list(p)
- [11, 12, 1, 2, 3]
-
- peekables can be indexed. Index 0 is the item that will be returned by
- :func:`next`, index 1 is the item after that, and so on:
- The values up to the given index will be cached.
-
- >>> p = peekable(['a', 'b', 'c', 'd'])
- >>> p[0]
- 'a'
- >>> p[1]
- 'b'
- >>> next(p)
- 'a'
-
- Negative indexes are supported, but be aware that they will cache the
- remaining items in the source iterator, which may require significant
- storage.
-
- To check whether a peekable is exhausted, check its truth value:
-
- >>> p = peekable(['a', 'b'])
- >>> if p: # peekable has items
- ... list(p)
- ['a', 'b']
- >>> if not p: # peekable is exhausted
- ... list(p)
- []
-
- """
-
- def __init__(self, iterable):
- self._it = iter(iterable)
- self._cache = deque()
-
- def __iter__(self):
- return self
-
- def __bool__(self):
- try:
- self.peek()
- except StopIteration:
- return False
- return True
-
- def peek(self, default=_marker):
- """Return the item that will be next returned from ``next()``.
-
- Return ``default`` if there are no items left. If ``default`` is not
- provided, raise ``StopIteration``.
-
- """
- if not self._cache:
- try:
- self._cache.append(next(self._it))
- except StopIteration:
- if default is _marker:
- raise
- return default
- return self._cache[0]
-
- def prepend(self, *items):
- """Stack up items to be the next ones returned from ``next()`` or
- ``self.peek()``. The items will be returned in
- first in, first out order::
-
- >>> p = peekable([1, 2, 3])
- >>> p.prepend(10, 11, 12)
- >>> next(p)
- 10
- >>> list(p)
- [11, 12, 1, 2, 3]
-
- It is possible, by prepending items, to "resurrect" a peekable that
- previously raised ``StopIteration``.
-
- >>> p = peekable([])
- >>> next(p)
- Traceback (most recent call last):
- ...
- StopIteration
- >>> p.prepend(1)
- >>> next(p)
- 1
- >>> next(p)
- Traceback (most recent call last):
- ...
- StopIteration
-
- """
- self._cache.extendleft(reversed(items))
-
- def __next__(self):
- if self._cache:
- return self._cache.popleft()
-
- return next(self._it)
-
- def _get_slice(self, index):
- # Normalize the slice's arguments
- step = 1 if (index.step is None) else index.step
- if step > 0:
- start = 0 if (index.start is None) else index.start
- stop = maxsize if (index.stop is None) else index.stop
- elif step < 0:
- start = -1 if (index.start is None) else index.start
- stop = (-maxsize - 1) if (index.stop is None) else index.stop
- else:
- raise ValueError('slice step cannot be zero')
-
- # If either the start or stop index is negative, we'll need to cache
- # the rest of the iterable in order to slice from the right side.
- if (start < 0) or (stop < 0):
- self._cache.extend(self._it)
- # Otherwise we'll need to find the rightmost index and cache to that
- # point.
- else:
- n = min(max(start, stop) + 1, maxsize)
- cache_len = len(self._cache)
- if n >= cache_len:
- self._cache.extend(islice(self._it, n - cache_len))
-
- return list(self._cache)[index]
-
- def __getitem__(self, index):
- if isinstance(index, slice):
- return self._get_slice(index)
-
- cache_len = len(self._cache)
- if index < 0:
- self._cache.extend(self._it)
- elif index >= cache_len:
- self._cache.extend(islice(self._it, index + 1 - cache_len))
-
- return self._cache[index]
-
-
-def collate(*iterables, **kwargs):
- """Return a sorted merge of the items from each of several already-sorted
- *iterables*.
-
- >>> list(collate('ACDZ', 'AZ', 'JKL'))
- ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z']
-
- Works lazily, keeping only the next value from each iterable in memory. Use
- :func:`collate` to, for example, perform a n-way mergesort of items that
- don't fit in memory.
-
- If a *key* function is specified, the iterables will be sorted according
- to its result:
-
- >>> key = lambda s: int(s) # Sort by numeric value, not by string
- >>> list(collate(['1', '10'], ['2', '11'], key=key))
- ['1', '2', '10', '11']
-
-
- If the *iterables* are sorted in descending order, set *reverse* to
- ``True``:
-
- >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True))
- [5, 4, 3, 2, 1, 0]
-
- If the elements of the passed-in iterables are out of order, you might get
- unexpected results.
-
- On Python 3.5+, this function is an alias for :func:`heapq.merge`.
-
- """
- warnings.warn(
- "collate is no longer part of more_itertools, use heapq.merge",
- DeprecationWarning,
- )
- return merge(*iterables, **kwargs)
-
-
-def consumer(func):
- """Decorator that automatically advances a PEP-342-style "reverse iterator"
- to its first yield point so you don't have to call ``next()`` on it
- manually.
-
- >>> @consumer
- ... def tally():
- ... i = 0
- ... while True:
- ... print('Thing number %s is %s.' % (i, (yield)))
- ... i += 1
- ...
- >>> t = tally()
- >>> t.send('red')
- Thing number 0 is red.
- >>> t.send('fish')
- Thing number 1 is fish.
-
- Without the decorator, you would have to call ``next(t)`` before
- ``t.send()`` could be used.
-
- """
-
- @wraps(func)
- def wrapper(*args, **kwargs):
- gen = func(*args, **kwargs)
- next(gen)
- return gen
-
- return wrapper
-
-
-def ilen(iterable):
- """Return the number of items in *iterable*.
-
- >>> ilen(x for x in range(1000000) if x % 3 == 0)
- 333334
-
- This consumes the iterable, so handle with care.
-
- """
- # This approach was selected because benchmarks showed it's likely the
- # fastest of the known implementations at the time of writing.
- # See GitHub tracker: #236, #230.
- counter = count()
- deque(zip(iterable, counter), maxlen=0)
- return next(counter)
-
-
-def iterate(func, start):
- """Return ``start``, ``func(start)``, ``func(func(start))``, ...
-
- >>> from itertools import islice
- >>> list(islice(iterate(lambda x: 2*x, 1), 10))
- [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]
-
- """
- while True:
- yield start
- start = func(start)
-
-
-def with_iter(context_manager):
- """Wrap an iterable in a ``with`` statement, so it closes once exhausted.
-
- For example, this will close the file when the iterator is exhausted::
-
- upper_lines = (line.upper() for line in with_iter(open('foo')))
-
- Any context manager which returns an iterable is a candidate for
- ``with_iter``.
-
- """
- with context_manager as iterable:
- yield from iterable
-
-
-def one(iterable, too_short=None, too_long=None):
- """Return the first item from *iterable*, which is expected to contain only
- that item. Raise an exception if *iterable* is empty or has more than one
- item.
-
- :func:`one` is useful for ensuring that an iterable contains only one item.
- For example, it can be used to retrieve the result of a database query
- that is expected to return a single row.
-
- If *iterable* is empty, ``ValueError`` will be raised. You may specify a
- different exception with the *too_short* keyword:
-
- >>> it = []
- >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: too many items in iterable (expected 1)'
- >>> too_short = IndexError('too few items')
- >>> one(it, too_short=too_short) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- IndexError: too few items
-
- Similarly, if *iterable* contains more than one item, ``ValueError`` will
- be raised. You may specify a different exception with the *too_long*
- keyword:
-
- >>> it = ['too', 'many']
- >>> one(it) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: Expected exactly one item in iterable, but got 'too',
- 'many', and perhaps more.
- >>> too_long = RuntimeError
- >>> one(it, too_long=too_long) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- RuntimeError
-
- Note that :func:`one` attempts to advance *iterable* twice to ensure there
- is only one item. See :func:`spy` or :func:`peekable` to check iterable
- contents less destructively.
-
- """
- it = iter(iterable)
-
- try:
- first_value = next(it)
- except StopIteration as e:
- raise (
- too_short or ValueError('too few items in iterable (expected 1)')
- ) from e
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- 'Expected exactly one item in iterable, but got {!r}, {!r}, '
- 'and perhaps more.'.format(first_value, second_value)
- )
- raise too_long or ValueError(msg)
-
- return first_value
-
-
-def distinct_permutations(iterable, r=None):
- """Yield successive distinct permutations of the elements in *iterable*.
-
- >>> sorted(distinct_permutations([1, 0, 1]))
- [(0, 1, 1), (1, 0, 1), (1, 1, 0)]
-
- Equivalent to ``set(permutations(iterable))``, except duplicates are not
- generated and thrown away. For larger input sequences this is much more
- efficient.
-
- Duplicate permutations arise when there are duplicated elements in the
- input iterable. The number of items returned is
- `n! / (x_1! * x_2! * ... * x_n!)`, where `n` is the total number of
- items input, and each `x_i` is the count of a distinct item in the input
- sequence.
-
- If *r* is given, only the *r*-length permutations are yielded.
-
- >>> sorted(distinct_permutations([1, 0, 1], r=2))
- [(0, 1), (1, 0), (1, 1)]
- >>> sorted(distinct_permutations(range(3), r=2))
- [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
-
- """
- # Algorithm: https://w.wiki/Qai
- def _full(A):
- while True:
- # Yield the permutation we have
- yield tuple(A)
-
- # Find the largest index i such that A[i] < A[i + 1]
- for i in range(size - 2, -1, -1):
- if A[i] < A[i + 1]:
- break
- # If no such index exists, this permutation is the last one
- else:
- return
-
- # Find the largest index j greater than j such that A[i] < A[j]
- for j in range(size - 1, i, -1):
- if A[i] < A[j]:
- break
-
- # Swap the value of A[i] with that of A[j], then reverse the
- # sequence from A[i + 1] to form the new permutation
- A[i], A[j] = A[j], A[i]
- A[i + 1 :] = A[: i - size : -1] # A[i + 1:][::-1]
-
- # Algorithm: modified from the above
- def _partial(A, r):
- # Split A into the first r items and the last r items
- head, tail = A[:r], A[r:]
- right_head_indexes = range(r - 1, -1, -1)
- left_tail_indexes = range(len(tail))
-
- while True:
- # Yield the permutation we have
- yield tuple(head)
-
- # Starting from the right, find the first index of the head with
- # value smaller than the maximum value of the tail - call it i.
- pivot = tail[-1]
- for i in right_head_indexes:
- if head[i] < pivot:
- break
- pivot = head[i]
- else:
- return
-
- # Starting from the left, find the first value of the tail
- # with a value greater than head[i] and swap.
- for j in left_tail_indexes:
- if tail[j] > head[i]:
- head[i], tail[j] = tail[j], head[i]
- break
- # If we didn't find one, start from the right and find the first
- # index of the head with a value greater than head[i] and swap.
- else:
- for j in right_head_indexes:
- if head[j] > head[i]:
- head[i], head[j] = head[j], head[i]
- break
-
- # Reverse head[i + 1:] and swap it with tail[:r - (i + 1)]
- tail += head[: i - r : -1] # head[i + 1:][::-1]
- i += 1
- head[i:], tail[:] = tail[: r - i], tail[r - i :]
-
- items = sorted(iterable)
-
- size = len(items)
- if r is None:
- r = size
-
- if 0 < r <= size:
- return _full(items) if (r == size) else _partial(items, r)
-
- return iter(() if r else ((),))
-
-
-def intersperse(e, iterable, n=1):
- """Intersperse filler element *e* among the items in *iterable*, leaving
- *n* items between each filler element.
-
- >>> list(intersperse('!', [1, 2, 3, 4, 5]))
- [1, '!', 2, '!', 3, '!', 4, '!', 5]
-
- >>> list(intersperse(None, [1, 2, 3, 4, 5], n=2))
- [1, 2, None, 3, 4, None, 5]
-
- """
- if n == 0:
- raise ValueError('n must be > 0')
- elif n == 1:
- # interleave(repeat(e), iterable) -> e, x_0, e, e, x_1, e, x_2...
- # islice(..., 1, None) -> x_0, e, e, x_1, e, x_2...
- return islice(interleave(repeat(e), iterable), 1, None)
- else:
- # interleave(filler, chunks) -> [e], [x_0, x_1], [e], [x_2, x_3]...
- # islice(..., 1, None) -> [x_0, x_1], [e], [x_2, x_3]...
- # flatten(...) -> x_0, x_1, e, x_2, x_3...
- filler = repeat([e])
- chunks = chunked(iterable, n)
- return flatten(islice(interleave(filler, chunks), 1, None))
-
-
-def unique_to_each(*iterables):
- """Return the elements from each of the input iterables that aren't in the
- other input iterables.
-
- For example, suppose you have a set of packages, each with a set of
- dependencies::
-
- {'pkg_1': {'A', 'B'}, 'pkg_2': {'B', 'C'}, 'pkg_3': {'B', 'D'}}
-
- If you remove one package, which dependencies can also be removed?
-
- If ``pkg_1`` is removed, then ``A`` is no longer necessary - it is not
- associated with ``pkg_2`` or ``pkg_3``. Similarly, ``C`` is only needed for
- ``pkg_2``, and ``D`` is only needed for ``pkg_3``::
-
- >>> unique_to_each({'A', 'B'}, {'B', 'C'}, {'B', 'D'})
- [['A'], ['C'], ['D']]
-
- If there are duplicates in one input iterable that aren't in the others
- they will be duplicated in the output. Input order is preserved::
-
- >>> unique_to_each("mississippi", "missouri")
- [['p', 'p'], ['o', 'u', 'r']]
-
- It is assumed that the elements of each iterable are hashable.
-
- """
- pool = [list(it) for it in iterables]
- counts = Counter(chain.from_iterable(map(set, pool)))
- uniques = {element for element in counts if counts[element] == 1}
- return [list(filter(uniques.__contains__, it)) for it in pool]
-
-
-def windowed(seq, n, fillvalue=None, step=1):
- """Return a sliding window of width *n* over the given iterable.
-
- >>> all_windows = windowed([1, 2, 3, 4, 5], 3)
- >>> list(all_windows)
- [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
-
- When the window is larger than the iterable, *fillvalue* is used in place
- of missing values:
-
- >>> list(windowed([1, 2, 3], 4))
- [(1, 2, 3, None)]
-
- Each window will advance in increments of *step*:
-
- >>> list(windowed([1, 2, 3, 4, 5, 6], 3, fillvalue='!', step=2))
- [(1, 2, 3), (3, 4, 5), (5, 6, '!')]
-
- To slide into the iterable's items, use :func:`chain` to add filler items
- to the left:
-
- >>> iterable = [1, 2, 3, 4]
- >>> n = 3
- >>> padding = [None] * (n - 1)
- >>> list(windowed(chain(padding, iterable), 3))
- [(None, None, 1), (None, 1, 2), (1, 2, 3), (2, 3, 4)]
- """
- if n < 0:
- raise ValueError('n must be >= 0')
- if n == 0:
- yield tuple()
- return
- if step < 1:
- raise ValueError('step must be >= 1')
-
- window = deque(maxlen=n)
- i = n
- for _ in map(window.append, seq):
- i -= 1
- if not i:
- i = step
- yield tuple(window)
-
- size = len(window)
- if size < n:
- yield tuple(chain(window, repeat(fillvalue, n - size)))
- elif 0 < i < min(step, n):
- window += (fillvalue,) * i
- yield tuple(window)
-
-
-def substrings(iterable):
- """Yield all of the substrings of *iterable*.
-
- >>> [''.join(s) for s in substrings('more')]
- ['m', 'o', 'r', 'e', 'mo', 'or', 're', 'mor', 'ore', 'more']
-
- Note that non-string iterables can also be subdivided.
-
- >>> list(substrings([0, 1, 2]))
- [(0,), (1,), (2,), (0, 1), (1, 2), (0, 1, 2)]
-
- """
- # The length-1 substrings
- seq = []
- for item in iter(iterable):
- seq.append(item)
- yield (item,)
- seq = tuple(seq)
- item_count = len(seq)
-
- # And the rest
- for n in range(2, item_count + 1):
- for i in range(item_count - n + 1):
- yield seq[i : i + n]
-
-
-def substrings_indexes(seq, reverse=False):
- """Yield all substrings and their positions in *seq*
-
- The items yielded will be a tuple of the form ``(substr, i, j)``, where
- ``substr == seq[i:j]``.
-
- This function only works for iterables that support slicing, such as
- ``str`` objects.
-
- >>> for item in substrings_indexes('more'):
- ... print(item)
- ('m', 0, 1)
- ('o', 1, 2)
- ('r', 2, 3)
- ('e', 3, 4)
- ('mo', 0, 2)
- ('or', 1, 3)
- ('re', 2, 4)
- ('mor', 0, 3)
- ('ore', 1, 4)
- ('more', 0, 4)
-
- Set *reverse* to ``True`` to yield the same items in the opposite order.
-
-
- """
- r = range(1, len(seq) + 1)
- if reverse:
- r = reversed(r)
- return (
- (seq[i : i + L], i, i + L) for L in r for i in range(len(seq) - L + 1)
- )
-
-
-class bucket:
- """Wrap *iterable* and return an object that buckets it iterable into
- child iterables based on a *key* function.
-
- >>> iterable = ['a1', 'b1', 'c1', 'a2', 'b2', 'c2', 'b3']
- >>> s = bucket(iterable, key=lambda x: x[0]) # Bucket by 1st character
- >>> sorted(list(s)) # Get the keys
- ['a', 'b', 'c']
- >>> a_iterable = s['a']
- >>> next(a_iterable)
- 'a1'
- >>> next(a_iterable)
- 'a2'
- >>> list(s['b'])
- ['b1', 'b2', 'b3']
-
- The original iterable will be advanced and its items will be cached until
- they are used by the child iterables. This may require significant storage.
-
- By default, attempting to select a bucket to which no items belong will
- exhaust the iterable and cache all values.
- If you specify a *validator* function, selected buckets will instead be
- checked against it.
-
- >>> from itertools import count
- >>> it = count(1, 2) # Infinite sequence of odd numbers
- >>> key = lambda x: x % 10 # Bucket by last digit
- >>> validator = lambda x: x in {1, 3, 5, 7, 9} # Odd digits only
- >>> s = bucket(it, key=key, validator=validator)
- >>> 2 in s
- False
- >>> list(s[2])
- []
-
- """
-
- def __init__(self, iterable, key, validator=None):
- self._it = iter(iterable)
- self._key = key
- self._cache = defaultdict(deque)
- self._validator = validator or (lambda x: True)
-
- def __contains__(self, value):
- if not self._validator(value):
- return False
-
- try:
- item = next(self[value])
- except StopIteration:
- return False
- else:
- self._cache[value].appendleft(item)
-
- return True
-
- def _get_values(self, value):
- """
- Helper to yield items from the parent iterator that match *value*.
- Items that don't match are stored in the local cache as they
- are encountered.
- """
- while True:
- # If we've cached some items that match the target value, emit
- # the first one and evict it from the cache.
- if self._cache[value]:
- yield self._cache[value].popleft()
- # Otherwise we need to advance the parent iterator to search for
- # a matching item, caching the rest.
- else:
- while True:
- try:
- item = next(self._it)
- except StopIteration:
- return
- item_value = self._key(item)
- if item_value == value:
- yield item
- break
- elif self._validator(item_value):
- self._cache[item_value].append(item)
-
- def __iter__(self):
- for item in self._it:
- item_value = self._key(item)
- if self._validator(item_value):
- self._cache[item_value].append(item)
-
- yield from self._cache.keys()
-
- def __getitem__(self, value):
- if not self._validator(value):
- return iter(())
-
- return self._get_values(value)
-
-
-def spy(iterable, n=1):
- """Return a 2-tuple with a list containing the first *n* elements of
- *iterable*, and an iterator with the same items as *iterable*.
- This allows you to "look ahead" at the items in the iterable without
- advancing it.
-
- There is one item in the list by default:
-
- >>> iterable = 'abcdefg'
- >>> head, iterable = spy(iterable)
- >>> head
- ['a']
- >>> list(iterable)
- ['a', 'b', 'c', 'd', 'e', 'f', 'g']
-
- You may use unpacking to retrieve items instead of lists:
-
- >>> (head,), iterable = spy('abcdefg')
- >>> head
- 'a'
- >>> (first, second), iterable = spy('abcdefg', 2)
- >>> first
- 'a'
- >>> second
- 'b'
-
- The number of items requested can be larger than the number of items in
- the iterable:
-
- >>> iterable = [1, 2, 3, 4, 5]
- >>> head, iterable = spy(iterable, 10)
- >>> head
- [1, 2, 3, 4, 5]
- >>> list(iterable)
- [1, 2, 3, 4, 5]
-
- """
- it = iter(iterable)
- head = take(n, it)
-
- return head.copy(), chain(head, it)
-
-
-def interleave(*iterables):
- """Return a new iterable yielding from each iterable in turn,
- until the shortest is exhausted.
-
- >>> list(interleave([1, 2, 3], [4, 5], [6, 7, 8]))
- [1, 4, 6, 2, 5, 7]
-
- For a version that doesn't terminate after the shortest iterable is
- exhausted, see :func:`interleave_longest`.
-
- """
- return chain.from_iterable(zip(*iterables))
-
-
-def interleave_longest(*iterables):
- """Return a new iterable yielding from each iterable in turn,
- skipping any that are exhausted.
-
- >>> list(interleave_longest([1, 2, 3], [4, 5], [6, 7, 8]))
- [1, 4, 6, 2, 5, 7, 3, 8]
-
- This function produces the same output as :func:`roundrobin`, but may
- perform better for some inputs (in particular when the number of iterables
- is large).
-
- """
- i = chain.from_iterable(zip_longest(*iterables, fillvalue=_marker))
- return (x for x in i if x is not _marker)
-
-
-def collapse(iterable, base_type=None, levels=None):
- """Flatten an iterable with multiple levels of nesting (e.g., a list of
- lists of tuples) into non-iterable types.
-
- >>> iterable = [(1, 2), ([3, 4], [[5], [6]])]
- >>> list(collapse(iterable))
- [1, 2, 3, 4, 5, 6]
-
- Binary and text strings are not considered iterable and
- will not be collapsed.
-
- To avoid collapsing other types, specify *base_type*:
-
- >>> iterable = ['ab', ('cd', 'ef'), ['gh', 'ij']]
- >>> list(collapse(iterable, base_type=tuple))
- ['ab', ('cd', 'ef'), 'gh', 'ij']
-
- Specify *levels* to stop flattening after a certain level:
-
- >>> iterable = [('a', ['b']), ('c', ['d'])]
- >>> list(collapse(iterable)) # Fully flattened
- ['a', 'b', 'c', 'd']
- >>> list(collapse(iterable, levels=1)) # Only one level flattened
- ['a', ['b'], 'c', ['d']]
-
- """
-
- def walk(node, level):
- if (
- ((levels is not None) and (level > levels))
- or isinstance(node, (str, bytes))
- or ((base_type is not None) and isinstance(node, base_type))
- ):
- yield node
- return
-
- try:
- tree = iter(node)
- except TypeError:
- yield node
- return
- else:
- for child in tree:
- yield from walk(child, level + 1)
-
- yield from walk(iterable, 0)
-
-
-def side_effect(func, iterable, chunk_size=None, before=None, after=None):
- """Invoke *func* on each item in *iterable* (or on each *chunk_size* group
- of items) before yielding the item.
-
- `func` must be a function that takes a single argument. Its return value
- will be discarded.
-
- *before* and *after* are optional functions that take no arguments. They
- will be executed before iteration starts and after it ends, respectively.
-
- `side_effect` can be used for logging, updating progress bars, or anything
- that is not functionally "pure."
-
- Emitting a status message:
-
- >>> from more_itertools import consume
- >>> func = lambda item: print('Received {}'.format(item))
- >>> consume(side_effect(func, range(2)))
- Received 0
- Received 1
-
- Operating on chunks of items:
-
- >>> pair_sums = []
- >>> func = lambda chunk: pair_sums.append(sum(chunk))
- >>> list(side_effect(func, [0, 1, 2, 3, 4, 5], 2))
- [0, 1, 2, 3, 4, 5]
- >>> list(pair_sums)
- [1, 5, 9]
-
- Writing to a file-like object:
-
- >>> from io import StringIO
- >>> from more_itertools import consume
- >>> f = StringIO()
- >>> func = lambda x: print(x, file=f)
- >>> before = lambda: print(u'HEADER', file=f)
- >>> after = f.close
- >>> it = [u'a', u'b', u'c']
- >>> consume(side_effect(func, it, before=before, after=after))
- >>> f.closed
- True
-
- """
- try:
- if before is not None:
- before()
-
- if chunk_size is None:
- for item in iterable:
- func(item)
- yield item
- else:
- for chunk in chunked(iterable, chunk_size):
- func(chunk)
- yield from chunk
- finally:
- if after is not None:
- after()
-
-
-def sliced(seq, n, strict=False):
- """Yield slices of length *n* from the sequence *seq*.
-
- >>> list(sliced((1, 2, 3, 4, 5, 6), 3))
- [(1, 2, 3), (4, 5, 6)]
-
- By the default, the last yielded slice will have fewer than *n* elements
- if the length of *seq* is not divisible by *n*:
-
- >>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
- [(1, 2, 3), (4, 5, 6), (7, 8)]
-
- If the length of *seq* is not divisible by *n* and *strict* is
- ``True``, then ``ValueError`` will be raised before the last
- slice is yielded.
-
- This function will only work for iterables that support slicing.
- For non-sliceable iterables, see :func:`chunked`.
-
- """
- iterator = takewhile(len, (seq[i : i + n] for i in count(0, n)))
- if strict:
-
- def ret():
- for _slice in iterator:
- if len(_slice) != n:
- raise ValueError("seq is not divisible by n.")
- yield _slice
-
- return iter(ret())
- else:
- return iterator
-
-
-def split_at(iterable, pred, maxsplit=-1, keep_separator=False):
- """Yield lists of items from *iterable*, where each list is delimited by
- an item where callable *pred* returns ``True``.
-
- >>> list(split_at('abcdcba', lambda x: x == 'b'))
- [['a'], ['c', 'd', 'c'], ['a']]
-
- >>> list(split_at(range(10), lambda n: n % 2 == 1))
- [[0], [2], [4], [6], [8], []]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_at(range(10), lambda n: n % 2 == 1, maxsplit=2))
- [[0], [2], [4, 5, 6, 7, 8, 9]]
-
- By default, the delimiting items are not included in the output.
- The include them, set *keep_separator* to ``True``.
-
- >>> list(split_at('abcdcba', lambda x: x == 'b', keep_separator=True))
- [['a'], ['b'], ['c', 'd', 'c'], ['b'], ['a']]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- if pred(item):
- yield buf
- if keep_separator:
- yield [item]
- if maxsplit == 1:
- yield list(it)
- return
- buf = []
- maxsplit -= 1
- else:
- buf.append(item)
- yield buf
-
-
-def split_before(iterable, pred, maxsplit=-1):
- """Yield lists of items from *iterable*, where each list ends just before
- an item for which callable *pred* returns ``True``:
-
- >>> list(split_before('OneTwo', lambda s: s.isupper()))
- [['O', 'n', 'e'], ['T', 'w', 'o']]
-
- >>> list(split_before(range(10), lambda n: n % 3 == 0))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_before(range(10), lambda n: n % 3 == 0, maxsplit=2))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8, 9]]
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- if pred(item) and buf:
- yield buf
- if maxsplit == 1:
- yield [item] + list(it)
- return
- buf = []
- maxsplit -= 1
- buf.append(item)
- if buf:
- yield buf
-
-
-def split_after(iterable, pred, maxsplit=-1):
- """Yield lists of items from *iterable*, where each list ends with an
- item where callable *pred* returns ``True``:
-
- >>> list(split_after('one1two2', lambda s: s.isdigit()))
- [['o', 'n', 'e', '1'], ['t', 'w', 'o', '2']]
-
- >>> list(split_after(range(10), lambda n: n % 3 == 0))
- [[0], [1, 2, 3], [4, 5, 6], [7, 8, 9]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_after(range(10), lambda n: n % 3 == 0, maxsplit=2))
- [[0], [1, 2, 3], [4, 5, 6, 7, 8, 9]]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- buf = []
- it = iter(iterable)
- for item in it:
- buf.append(item)
- if pred(item) and buf:
- yield buf
- if maxsplit == 1:
- yield list(it)
- return
- buf = []
- maxsplit -= 1
- if buf:
- yield buf
-
-
-def split_when(iterable, pred, maxsplit=-1):
- """Split *iterable* into pieces based on the output of *pred*.
- *pred* should be a function that takes successive pairs of items and
- returns ``True`` if the iterable should be split in between them.
-
- For example, to find runs of increasing numbers, split the iterable when
- element ``i`` is larger than element ``i + 1``:
-
- >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2], lambda x, y: x > y))
- [[1, 2, 3, 3], [2, 5], [2, 4], [2]]
-
- At most *maxsplit* splits are done. If *maxsplit* is not specified or -1,
- then there is no limit on the number of splits:
-
- >>> list(split_when([1, 2, 3, 3, 2, 5, 2, 4, 2],
- ... lambda x, y: x > y, maxsplit=2))
- [[1, 2, 3, 3], [2, 5], [2, 4, 2]]
-
- """
- if maxsplit == 0:
- yield list(iterable)
- return
-
- it = iter(iterable)
- try:
- cur_item = next(it)
- except StopIteration:
- return
-
- buf = [cur_item]
- for next_item in it:
- if pred(cur_item, next_item):
- yield buf
- if maxsplit == 1:
- yield [next_item] + list(it)
- return
- buf = []
- maxsplit -= 1
-
- buf.append(next_item)
- cur_item = next_item
-
- yield buf
-
-
-def split_into(iterable, sizes):
- """Yield a list of sequential items from *iterable* of length 'n' for each
- integer 'n' in *sizes*.
-
- >>> list(split_into([1,2,3,4,5,6], [1,2,3]))
- [[1], [2, 3], [4, 5, 6]]
-
- If the sum of *sizes* is smaller than the length of *iterable*, then the
- remaining items of *iterable* will not be returned.
-
- >>> list(split_into([1,2,3,4,5,6], [2,3]))
- [[1, 2], [3, 4, 5]]
-
- If the sum of *sizes* is larger than the length of *iterable*, fewer items
- will be returned in the iteration that overruns *iterable* and further
- lists will be empty:
-
- >>> list(split_into([1,2,3,4], [1,2,3,4]))
- [[1], [2, 3], [4], []]
-
- When a ``None`` object is encountered in *sizes*, the returned list will
- contain items up to the end of *iterable* the same way that itertools.slice
- does:
-
- >>> list(split_into([1,2,3,4,5,6,7,8,9,0], [2,3,None]))
- [[1, 2], [3, 4, 5], [6, 7, 8, 9, 0]]
-
- :func:`split_into` can be useful for grouping a series of items where the
- sizes of the groups are not uniform. An example would be where in a row
- from a table, multiple columns represent elements of the same feature
- (e.g. a point represented by x,y,z) but, the format is not the same for
- all columns.
- """
- # convert the iterable argument into an iterator so its contents can
- # be consumed by islice in case it is a generator
- it = iter(iterable)
-
- for size in sizes:
- if size is None:
- yield list(it)
- return
- else:
- yield list(islice(it, size))
-
-
-def padded(iterable, fillvalue=None, n=None, next_multiple=False):
- """Yield the elements from *iterable*, followed by *fillvalue*, such that
- at least *n* items are emitted.
-
- >>> list(padded([1, 2, 3], '?', 5))
- [1, 2, 3, '?', '?']
-
- If *next_multiple* is ``True``, *fillvalue* will be emitted until the
- number of items emitted is a multiple of *n*::
-
- >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
- [1, 2, 3, 4, None, None]
-
- If *n* is ``None``, *fillvalue* will be emitted indefinitely.
-
- """
- it = iter(iterable)
- if n is None:
- yield from chain(it, repeat(fillvalue))
- elif n < 1:
- raise ValueError('n must be at least 1')
- else:
- item_count = 0
- for item in it:
- yield item
- item_count += 1
-
- remaining = (n - item_count) % n if next_multiple else n - item_count
- for _ in range(remaining):
- yield fillvalue
-
-
-def repeat_last(iterable, default=None):
- """After the *iterable* is exhausted, keep yielding its last element.
-
- >>> list(islice(repeat_last(range(3)), 5))
- [0, 1, 2, 2, 2]
-
- If the iterable is empty, yield *default* forever::
-
- >>> list(islice(repeat_last(range(0), 42), 5))
- [42, 42, 42, 42, 42]
-
- """
- item = _marker
- for item in iterable:
- yield item
- final = default if item is _marker else item
- yield from repeat(final)
-
-
-def distribute(n, iterable):
- """Distribute the items from *iterable* among *n* smaller iterables.
-
- >>> group_1, group_2 = distribute(2, [1, 2, 3, 4, 5, 6])
- >>> list(group_1)
- [1, 3, 5]
- >>> list(group_2)
- [2, 4, 6]
-
- If the length of *iterable* is not evenly divisible by *n*, then the
- length of the returned iterables will not be identical:
-
- >>> children = distribute(3, [1, 2, 3, 4, 5, 6, 7])
- >>> [list(c) for c in children]
- [[1, 4, 7], [2, 5], [3, 6]]
-
- If the length of *iterable* is smaller than *n*, then the last returned
- iterables will be empty:
-
- >>> children = distribute(5, [1, 2, 3])
- >>> [list(c) for c in children]
- [[1], [2], [3], [], []]
-
- This function uses :func:`itertools.tee` and may require significant
- storage. If you need the order items in the smaller iterables to match the
- original iterable, see :func:`divide`.
-
- """
- if n < 1:
- raise ValueError('n must be at least 1')
-
- children = tee(iterable, n)
- return [islice(it, index, None, n) for index, it in enumerate(children)]
-
-
-def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None):
- """Yield tuples whose elements are offset from *iterable*.
- The amount by which the `i`-th item in each tuple is offset is given by
- the `i`-th item in *offsets*.
-
- >>> list(stagger([0, 1, 2, 3]))
- [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
- >>> list(stagger(range(8), offsets=(0, 2, 4)))
- [(0, 2, 4), (1, 3, 5), (2, 4, 6), (3, 5, 7)]
-
- By default, the sequence will end when the final element of a tuple is the
- last item in the iterable. To continue until the first element of a tuple
- is the last item in the iterable, set *longest* to ``True``::
-
- >>> list(stagger([0, 1, 2, 3], longest=True))
- [(None, 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, None), (3, None, None)]
-
- By default, ``None`` will be used to replace offsets beyond the end of the
- sequence. Specify *fillvalue* to use some other value.
-
- """
- children = tee(iterable, len(offsets))
-
- return zip_offset(
- *children, offsets=offsets, longest=longest, fillvalue=fillvalue
- )
-
-
-class UnequalIterablesError(ValueError):
- def __init__(self, details=None):
- msg = 'Iterables have different lengths'
- if details is not None:
- msg += (': index 0 has length {}; index {} has length {}').format(
- *details
- )
-
- super().__init__(msg)
-
-
-def _zip_equal_generator(iterables):
- for combo in zip_longest(*iterables, fillvalue=_marker):
- for val in combo:
- if val is _marker:
- raise UnequalIterablesError()
- yield combo
-
-
-def zip_equal(*iterables):
- """``zip`` the input *iterables* together, but raise
- ``UnequalIterablesError`` if they aren't all the same length.
-
- >>> it_1 = range(3)
- >>> it_2 = iter('abc')
- >>> list(zip_equal(it_1, it_2))
- [(0, 'a'), (1, 'b'), (2, 'c')]
-
- >>> it_1 = range(3)
- >>> it_2 = iter('abcd')
- >>> list(zip_equal(it_1, it_2)) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- more_itertools.more.UnequalIterablesError: Iterables have different
- lengths
-
- """
- if hexversion >= 0x30A00A6:
- warnings.warn(
- (
- 'zip_equal will be removed in a future version of '
- 'more-itertools. Use the builtin zip function with '
- 'strict=True instead.'
- ),
- DeprecationWarning,
- )
- # Check whether the iterables are all the same size.
- try:
- first_size = len(iterables[0])
- for i, it in enumerate(iterables[1:], 1):
- size = len(it)
- if size != first_size:
- break
- else:
- # If we didn't break out, we can use the built-in zip.
- return zip(*iterables)
-
- # If we did break out, there was a mismatch.
- raise UnequalIterablesError(details=(first_size, i, size))
- # If any one of the iterables didn't have a length, start reading
- # them until one runs out.
- except TypeError:
- return _zip_equal_generator(iterables)
-
-
-def zip_offset(*iterables, offsets, longest=False, fillvalue=None):
- """``zip`` the input *iterables* together, but offset the `i`-th iterable
- by the `i`-th item in *offsets*.
-
- >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1)))
- [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e')]
-
- This can be used as a lightweight alternative to SciPy or pandas to analyze
- data sets in which some series have a lead or lag relationship.
-
- By default, the sequence will end when the shortest iterable is exhausted.
- To continue until the longest iterable is exhausted, set *longest* to
- ``True``.
-
- >>> list(zip_offset('0123', 'abcdef', offsets=(0, 1), longest=True))
- [('0', 'b'), ('1', 'c'), ('2', 'd'), ('3', 'e'), (None, 'f')]
-
- By default, ``None`` will be used to replace offsets beyond the end of the
- sequence. Specify *fillvalue* to use some other value.
-
- """
- if len(iterables) != len(offsets):
- raise ValueError("Number of iterables and offsets didn't match")
-
- staggered = []
- for it, n in zip(iterables, offsets):
- if n < 0:
- staggered.append(chain(repeat(fillvalue, -n), it))
- elif n > 0:
- staggered.append(islice(it, n, None))
- else:
- staggered.append(it)
-
- if longest:
- return zip_longest(*staggered, fillvalue=fillvalue)
-
- return zip(*staggered)
-
-
-def sort_together(iterables, key_list=(0,), key=None, reverse=False):
- """Return the input iterables sorted together, with *key_list* as the
- priority for sorting. All iterables are trimmed to the length of the
- shortest one.
-
- This can be used like the sorting function in a spreadsheet. If each
- iterable represents a column of data, the key list determines which
- columns are used for sorting.
-
- By default, all iterables are sorted using the ``0``-th iterable::
-
- >>> iterables = [(4, 3, 2, 1), ('a', 'b', 'c', 'd')]
- >>> sort_together(iterables)
- [(1, 2, 3, 4), ('d', 'c', 'b', 'a')]
-
- Set a different key list to sort according to another iterable.
- Specifying multiple keys dictates how ties are broken::
-
- >>> iterables = [(3, 1, 2), (0, 1, 0), ('c', 'b', 'a')]
- >>> sort_together(iterables, key_list=(1, 2))
- [(2, 3, 1), (0, 0, 1), ('a', 'c', 'b')]
-
- To sort by a function of the elements of the iterable, pass a *key*
- function. Its arguments are the elements of the iterables corresponding to
- the key list::
-
- >>> names = ('a', 'b', 'c')
- >>> lengths = (1, 2, 3)
- >>> widths = (5, 2, 1)
- >>> def area(length, width):
- ... return length * width
- >>> sort_together([names, lengths, widths], key_list=(1, 2), key=area)
- [('c', 'b', 'a'), (3, 2, 1), (1, 2, 5)]
-
- Set *reverse* to ``True`` to sort in descending order.
-
- >>> sort_together([(1, 2, 3), ('c', 'b', 'a')], reverse=True)
- [(3, 2, 1), ('a', 'b', 'c')]
-
- """
- if key is None:
- # if there is no key function, the key argument to sorted is an
- # itemgetter
- key_argument = itemgetter(*key_list)
- else:
- # if there is a key function, call it with the items at the offsets
- # specified by the key function as arguments
- key_list = list(key_list)
- if len(key_list) == 1:
- # if key_list contains a single item, pass the item at that offset
- # as the only argument to the key function
- key_offset = key_list[0]
- key_argument = lambda zipped_items: key(zipped_items[key_offset])
- else:
- # if key_list contains multiple items, use itemgetter to return a
- # tuple of items, which we pass as *args to the key function
- get_key_items = itemgetter(*key_list)
- key_argument = lambda zipped_items: key(
- *get_key_items(zipped_items)
- )
-
- return list(
- zip(*sorted(zip(*iterables), key=key_argument, reverse=reverse))
- )
-
-
-def unzip(iterable):
- """The inverse of :func:`zip`, this function disaggregates the elements
- of the zipped *iterable*.
-
- The ``i``-th iterable contains the ``i``-th element from each element
- of the zipped iterable. The first element is used to to determine the
- length of the remaining elements.
-
- >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
- >>> letters, numbers = unzip(iterable)
- >>> list(letters)
- ['a', 'b', 'c', 'd']
- >>> list(numbers)
- [1, 2, 3, 4]
-
- This is similar to using ``zip(*iterable)``, but it avoids reading
- *iterable* into memory. Note, however, that this function uses
- :func:`itertools.tee` and thus may require significant storage.
-
- """
- head, iterable = spy(iter(iterable))
- if not head:
- # empty iterable, e.g. zip([], [], [])
- return ()
- # spy returns a one-length iterable as head
- head = head[0]
- iterables = tee(iterable, len(head))
-
- def itemgetter(i):
- def getter(obj):
- try:
- return obj[i]
- except IndexError:
- # basically if we have an iterable like
- # iter([(1, 2, 3), (4, 5), (6,)])
- # the second unzipped iterable would fail at the third tuple
- # since it would try to access tup[1]
- # same with the third unzipped iterable and the second tuple
- # to support these "improperly zipped" iterables,
- # we create a custom itemgetter
- # which just stops the unzipped iterables
- # at first length mismatch
- raise StopIteration
-
- return getter
-
- return tuple(map(itemgetter(i), it) for i, it in enumerate(iterables))
-
-
-def divide(n, iterable):
- """Divide the elements from *iterable* into *n* parts, maintaining
- order.
-
- >>> group_1, group_2 = divide(2, [1, 2, 3, 4, 5, 6])
- >>> list(group_1)
- [1, 2, 3]
- >>> list(group_2)
- [4, 5, 6]
-
- If the length of *iterable* is not evenly divisible by *n*, then the
- length of the returned iterables will not be identical:
-
- >>> children = divide(3, [1, 2, 3, 4, 5, 6, 7])
- >>> [list(c) for c in children]
- [[1, 2, 3], [4, 5], [6, 7]]
-
- If the length of the iterable is smaller than n, then the last returned
- iterables will be empty:
-
- >>> children = divide(5, [1, 2, 3])
- >>> [list(c) for c in children]
- [[1], [2], [3], [], []]
-
- This function will exhaust the iterable before returning and may require
- significant storage. If order is not important, see :func:`distribute`,
- which does not first pull the iterable into memory.
-
- """
- if n < 1:
- raise ValueError('n must be at least 1')
-
- try:
- iterable[:0]
- except TypeError:
- seq = tuple(iterable)
- else:
- seq = iterable
-
- q, r = divmod(len(seq), n)
-
- ret = []
- stop = 0
- for i in range(1, n + 1):
- start = stop
- stop += q + 1 if i <= r else q
- ret.append(iter(seq[start:stop]))
-
- return ret
-
-
-def always_iterable(obj, base_type=(str, bytes)):
- """If *obj* is iterable, return an iterator over its items::
-
- >>> obj = (1, 2, 3)
- >>> list(always_iterable(obj))
- [1, 2, 3]
-
- If *obj* is not iterable, return a one-item iterable containing *obj*::
-
- >>> obj = 1
- >>> list(always_iterable(obj))
- [1]
-
- If *obj* is ``None``, return an empty iterable:
-
- >>> obj = None
- >>> list(always_iterable(None))
- []
-
- By default, binary and text strings are not considered iterable::
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj))
- ['foo']
-
- If *base_type* is set, objects for which ``isinstance(obj, base_type)``
- returns ``True`` won't be considered iterable.
-
- >>> obj = {'a': 1}
- >>> list(always_iterable(obj)) # Iterate over the dict's keys
- ['a']
- >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
- [{'a': 1}]
-
- Set *base_type* to ``None`` to avoid any special handling and treat objects
- Python considers iterable as iterable:
-
- >>> obj = 'foo'
- >>> list(always_iterable(obj, base_type=None))
- ['f', 'o', 'o']
- """
- if obj is None:
- return iter(())
-
- if (base_type is not None) and isinstance(obj, base_type):
- return iter((obj,))
-
- try:
- return iter(obj)
- except TypeError:
- return iter((obj,))
-
-
-def adjacent(predicate, iterable, distance=1):
- """Return an iterable over `(bool, item)` tuples where the `item` is
- drawn from *iterable* and the `bool` indicates whether
- that item satisfies the *predicate* or is adjacent to an item that does.
-
- For example, to find whether items are adjacent to a ``3``::
-
- >>> list(adjacent(lambda x: x == 3, range(6)))
- [(False, 0), (False, 1), (True, 2), (True, 3), (True, 4), (False, 5)]
-
- Set *distance* to change what counts as adjacent. For example, to find
- whether items are two places away from a ``3``:
-
- >>> list(adjacent(lambda x: x == 3, range(6), distance=2))
- [(False, 0), (True, 1), (True, 2), (True, 3), (True, 4), (True, 5)]
-
- This is useful for contextualizing the results of a search function.
- For example, a code comparison tool might want to identify lines that
- have changed, but also surrounding lines to give the viewer of the diff
- context.
-
- The predicate function will only be called once for each item in the
- iterable.
-
- See also :func:`groupby_transform`, which can be used with this function
- to group ranges of items with the same `bool` value.
-
- """
- # Allow distance=0 mainly for testing that it reproduces results with map()
- if distance < 0:
- raise ValueError('distance must be at least 0')
-
- i1, i2 = tee(iterable)
- padding = [False] * distance
- selected = chain(padding, map(predicate, i1), padding)
- adjacent_to_selected = map(any, windowed(selected, 2 * distance + 1))
- return zip(adjacent_to_selected, i2)
-
-
-def groupby_transform(iterable, keyfunc=None, valuefunc=None, reducefunc=None):
- """An extension of :func:`itertools.groupby` that can apply transformations
- to the grouped data.
-
- * *keyfunc* is a function computing a key value for each item in *iterable*
- * *valuefunc* is a function that transforms the individual items from
- *iterable* after grouping
- * *reducefunc* is a function that transforms each group of items
-
- >>> iterable = 'aAAbBBcCC'
- >>> keyfunc = lambda k: k.upper()
- >>> valuefunc = lambda v: v.lower()
- >>> reducefunc = lambda g: ''.join(g)
- >>> list(groupby_transform(iterable, keyfunc, valuefunc, reducefunc))
- [('A', 'aaa'), ('B', 'bbb'), ('C', 'ccc')]
-
- Each optional argument defaults to an identity function if not specified.
-
- :func:`groupby_transform` is useful when grouping elements of an iterable
- using a separate iterable as the key. To do this, :func:`zip` the iterables
- and pass a *keyfunc* that extracts the first element and a *valuefunc*
- that extracts the second element::
-
- >>> from operator import itemgetter
- >>> keys = [0, 0, 1, 1, 1, 2, 2, 2, 3]
- >>> values = 'abcdefghi'
- >>> iterable = zip(keys, values)
- >>> grouper = groupby_transform(iterable, itemgetter(0), itemgetter(1))
- >>> [(k, ''.join(g)) for k, g in grouper]
- [(0, 'ab'), (1, 'cde'), (2, 'fgh'), (3, 'i')]
-
- Note that the order of items in the iterable is significant.
- Only adjacent items are grouped together, so if you don't want any
- duplicate groups, you should sort the iterable by the key function.
-
- """
- ret = groupby(iterable, keyfunc)
- if valuefunc:
- ret = ((k, map(valuefunc, g)) for k, g in ret)
- if reducefunc:
- ret = ((k, reducefunc(g)) for k, g in ret)
-
- return ret
-
-
-class numeric_range(abc.Sequence, abc.Hashable):
- """An extension of the built-in ``range()`` function whose arguments can
- be any orderable numeric type.
-
- With only *stop* specified, *start* defaults to ``0`` and *step*
- defaults to ``1``. The output items will match the type of *stop*:
-
- >>> list(numeric_range(3.5))
- [0.0, 1.0, 2.0, 3.0]
-
- With only *start* and *stop* specified, *step* defaults to ``1``. The
- output items will match the type of *start*:
-
- >>> from decimal import Decimal
- >>> start = Decimal('2.1')
- >>> stop = Decimal('5.1')
- >>> list(numeric_range(start, stop))
- [Decimal('2.1'), Decimal('3.1'), Decimal('4.1')]
-
- With *start*, *stop*, and *step* specified the output items will match
- the type of ``start + step``:
-
- >>> from fractions import Fraction
- >>> start = Fraction(1, 2) # Start at 1/2
- >>> stop = Fraction(5, 2) # End at 5/2
- >>> step = Fraction(1, 2) # Count by 1/2
- >>> list(numeric_range(start, stop, step))
- [Fraction(1, 2), Fraction(1, 1), Fraction(3, 2), Fraction(2, 1)]
-
- If *step* is zero, ``ValueError`` is raised. Negative steps are supported:
-
- >>> list(numeric_range(3, -1, -1.0))
- [3.0, 2.0, 1.0, 0.0]
-
- Be aware of the limitations of floating point numbers; the representation
- of the yielded numbers may be surprising.
-
- ``datetime.datetime`` objects can be used for *start* and *stop*, if *step*
- is a ``datetime.timedelta`` object:
-
- >>> import datetime
- >>> start = datetime.datetime(2019, 1, 1)
- >>> stop = datetime.datetime(2019, 1, 3)
- >>> step = datetime.timedelta(days=1)
- >>> items = iter(numeric_range(start, stop, step))
- >>> next(items)
- datetime.datetime(2019, 1, 1, 0, 0)
- >>> next(items)
- datetime.datetime(2019, 1, 2, 0, 0)
-
- """
-
- _EMPTY_HASH = hash(range(0, 0))
-
- def __init__(self, *args):
- argc = len(args)
- if argc == 1:
- (self._stop,) = args
- self._start = type(self._stop)(0)
- self._step = type(self._stop - self._start)(1)
- elif argc == 2:
- self._start, self._stop = args
- self._step = type(self._stop - self._start)(1)
- elif argc == 3:
- self._start, self._stop, self._step = args
- elif argc == 0:
- raise TypeError(
- 'numeric_range expected at least '
- '1 argument, got {}'.format(argc)
- )
- else:
- raise TypeError(
- 'numeric_range expected at most '
- '3 arguments, got {}'.format(argc)
- )
-
- self._zero = type(self._step)(0)
- if self._step == self._zero:
- raise ValueError('numeric_range() arg 3 must not be zero')
- self._growing = self._step > self._zero
- self._init_len()
-
- def __bool__(self):
- if self._growing:
- return self._start < self._stop
- else:
- return self._start > self._stop
-
- def __contains__(self, elem):
- if self._growing:
- if self._start <= elem < self._stop:
- return (elem - self._start) % self._step == self._zero
- else:
- if self._start >= elem > self._stop:
- return (self._start - elem) % (-self._step) == self._zero
-
- return False
-
- def __eq__(self, other):
- if isinstance(other, numeric_range):
- empty_self = not bool(self)
- empty_other = not bool(other)
- if empty_self or empty_other:
- return empty_self and empty_other # True if both empty
- else:
- return (
- self._start == other._start
- and self._step == other._step
- and self._get_by_index(-1) == other._get_by_index(-1)
- )
- else:
- return False
-
- def __getitem__(self, key):
- if isinstance(key, int):
- return self._get_by_index(key)
- elif isinstance(key, slice):
- step = self._step if key.step is None else key.step * self._step
-
- if key.start is None or key.start <= -self._len:
- start = self._start
- elif key.start >= self._len:
- start = self._stop
- else: # -self._len < key.start < self._len
- start = self._get_by_index(key.start)
-
- if key.stop is None or key.stop >= self._len:
- stop = self._stop
- elif key.stop <= -self._len:
- stop = self._start
- else: # -self._len < key.stop < self._len
- stop = self._get_by_index(key.stop)
-
- return numeric_range(start, stop, step)
- else:
- raise TypeError(
- 'numeric range indices must be '
- 'integers or slices, not {}'.format(type(key).__name__)
- )
-
- def __hash__(self):
- if self:
- return hash((self._start, self._get_by_index(-1), self._step))
- else:
- return self._EMPTY_HASH
-
- def __iter__(self):
- values = (self._start + (n * self._step) for n in count())
- if self._growing:
- return takewhile(partial(gt, self._stop), values)
- else:
- return takewhile(partial(lt, self._stop), values)
-
- def __len__(self):
- return self._len
-
- def _init_len(self):
- if self._growing:
- start = self._start
- stop = self._stop
- step = self._step
- else:
- start = self._stop
- stop = self._start
- step = -self._step
- distance = stop - start
- if distance <= self._zero:
- self._len = 0
- else: # distance > 0 and step > 0: regular euclidean division
- q, r = divmod(distance, step)
- self._len = int(q) + int(r != self._zero)
-
- def __reduce__(self):
- return numeric_range, (self._start, self._stop, self._step)
-
- def __repr__(self):
- if self._step == 1:
- return "numeric_range({}, {})".format(
- repr(self._start), repr(self._stop)
- )
- else:
- return "numeric_range({}, {}, {})".format(
- repr(self._start), repr(self._stop), repr(self._step)
- )
-
- def __reversed__(self):
- return iter(
- numeric_range(
- self._get_by_index(-1), self._start - self._step, -self._step
- )
- )
-
- def count(self, value):
- return int(value in self)
-
- def index(self, value):
- if self._growing:
- if self._start <= value < self._stop:
- q, r = divmod(value - self._start, self._step)
- if r == self._zero:
- return int(q)
- else:
- if self._start >= value > self._stop:
- q, r = divmod(self._start - value, -self._step)
- if r == self._zero:
- return int(q)
-
- raise ValueError("{} is not in numeric range".format(value))
-
- def _get_by_index(self, i):
- if i < 0:
- i += self._len
- if i < 0 or i >= self._len:
- raise IndexError("numeric range object index out of range")
- return self._start + i * self._step
-
-
-def count_cycle(iterable, n=None):
- """Cycle through the items from *iterable* up to *n* times, yielding
- the number of completed cycles along with each item. If *n* is omitted the
- process repeats indefinitely.
-
- >>> list(count_cycle('AB', 3))
- [(0, 'A'), (0, 'B'), (1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')]
-
- """
- iterable = tuple(iterable)
- if not iterable:
- return iter(())
- counter = count() if n is None else range(n)
- return ((i, item) for i in counter for item in iterable)
-
-
-def mark_ends(iterable):
- """Yield 3-tuples of the form ``(is_first, is_last, item)``.
-
- >>> list(mark_ends('ABC'))
- [(True, False, 'A'), (False, False, 'B'), (False, True, 'C')]
-
- Use this when looping over an iterable to take special action on its first
- and/or last items:
-
- >>> iterable = ['Header', 100, 200, 'Footer']
- >>> total = 0
- >>> for is_first, is_last, item in mark_ends(iterable):
- ... if is_first:
- ... continue # Skip the header
- ... if is_last:
- ... continue # Skip the footer
- ... total += item
- >>> print(total)
- 300
- """
- it = iter(iterable)
-
- try:
- b = next(it)
- except StopIteration:
- return
-
- try:
- for i in count():
- a = b
- b = next(it)
- yield i == 0, False, a
-
- except StopIteration:
- yield i == 0, True, a
-
-
-def locate(iterable, pred=bool, window_size=None):
- """Yield the index of each item in *iterable* for which *pred* returns
- ``True``.
-
- *pred* defaults to :func:`bool`, which will select truthy items:
-
- >>> list(locate([0, 1, 1, 0, 1, 0, 0]))
- [1, 2, 4]
-
- Set *pred* to a custom function to, e.g., find the indexes for a particular
- item.
-
- >>> list(locate(['a', 'b', 'c', 'b'], lambda x: x == 'b'))
- [1, 3]
-
- If *window_size* is given, then the *pred* function will be called with
- that many items. This enables searching for sub-sequences:
-
- >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
- >>> pred = lambda *args: args == (1, 2, 3)
- >>> list(locate(iterable, pred=pred, window_size=3))
- [1, 5, 9]
-
- Use with :func:`seekable` to find indexes and then retrieve the associated
- items:
-
- >>> from itertools import count
- >>> from more_itertools import seekable
- >>> source = (3 * n + 1 if (n % 2) else n // 2 for n in count())
- >>> it = seekable(source)
- >>> pred = lambda x: x > 100
- >>> indexes = locate(it, pred=pred)
- >>> i = next(indexes)
- >>> it.seek(i)
- >>> next(it)
- 106
-
- """
- if window_size is None:
- return compress(count(), map(pred, iterable))
-
- if window_size < 1:
- raise ValueError('window size must be at least 1')
-
- it = windowed(iterable, window_size, fillvalue=_marker)
- return compress(count(), starmap(pred, it))
-
-
-def lstrip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the beginning
- for which *pred* returns ``True``.
-
- For example, to remove a set of items from the start of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(lstrip(iterable, pred))
- [1, 2, None, 3, False, None]
-
- This function is analogous to to :func:`str.lstrip`, and is essentially
- an wrapper for :func:`itertools.dropwhile`.
-
- """
- return dropwhile(pred, iterable)
-
-
-def rstrip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the end
- for which *pred* returns ``True``.
-
- For example, to remove a set of items from the end of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(rstrip(iterable, pred))
- [None, False, None, 1, 2, None, 3]
-
- This function is analogous to :func:`str.rstrip`.
-
- """
- cache = []
- cache_append = cache.append
- cache_clear = cache.clear
- for x in iterable:
- if pred(x):
- cache_append(x)
- else:
- yield from cache
- cache_clear()
- yield x
-
-
-def strip(iterable, pred):
- """Yield the items from *iterable*, but strip any from the
- beginning and end for which *pred* returns ``True``.
-
- For example, to remove a set of items from both ends of an iterable:
-
- >>> iterable = (None, False, None, 1, 2, None, 3, False, None)
- >>> pred = lambda x: x in {None, False, ''}
- >>> list(strip(iterable, pred))
- [1, 2, None, 3]
-
- This function is analogous to :func:`str.strip`.
-
- """
- return rstrip(lstrip(iterable, pred), pred)
-
-
-class islice_extended:
- """An extension of :func:`itertools.islice` that supports negative values
- for *stop*, *start*, and *step*.
-
- >>> iterable = iter('abcdefgh')
- >>> list(islice_extended(iterable, -4, -1))
- ['e', 'f', 'g']
-
- Slices with negative values require some caching of *iterable*, but this
- function takes care to minimize the amount of memory required.
-
- For example, you can use a negative step with an infinite iterator:
-
- >>> from itertools import count
- >>> list(islice_extended(count(), 110, 99, -2))
- [110, 108, 106, 104, 102, 100]
-
- You can also use slice notation directly:
-
- >>> iterable = map(str, count())
- >>> it = islice_extended(iterable)[10:20:2]
- >>> list(it)
- ['10', '12', '14', '16', '18']
-
- """
-
- def __init__(self, iterable, *args):
- it = iter(iterable)
- if args:
- self._iterable = _islice_helper(it, slice(*args))
- else:
- self._iterable = it
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iterable)
-
- def __getitem__(self, key):
- if isinstance(key, slice):
- return islice_extended(_islice_helper(self._iterable, key))
-
- raise TypeError('islice_extended.__getitem__ argument must be a slice')
-
-
-def _islice_helper(it, s):
- start = s.start
- stop = s.stop
- if s.step == 0:
- raise ValueError('step argument must be a non-zero integer or None.')
- step = s.step or 1
-
- if step > 0:
- start = 0 if (start is None) else start
-
- if start < 0:
- # Consume all but the last -start items
- cache = deque(enumerate(it, 1), maxlen=-start)
- len_iter = cache[-1][0] if cache else 0
-
- # Adjust start to be positive
- i = max(len_iter + start, 0)
-
- # Adjust stop to be positive
- if stop is None:
- j = len_iter
- elif stop >= 0:
- j = min(stop, len_iter)
- else:
- j = max(len_iter + stop, 0)
-
- # Slice the cache
- n = j - i
- if n <= 0:
- return
-
- for index, item in islice(cache, 0, n, step):
- yield item
- elif (stop is not None) and (stop < 0):
- # Advance to the start position
- next(islice(it, start, start), None)
-
- # When stop is negative, we have to carry -stop items while
- # iterating
- cache = deque(islice(it, -stop), maxlen=-stop)
-
- for index, item in enumerate(it):
- cached_item = cache.popleft()
- if index % step == 0:
- yield cached_item
- cache.append(item)
- else:
- # When both start and stop are positive we have the normal case
- yield from islice(it, start, stop, step)
- else:
- start = -1 if (start is None) else start
-
- if (stop is not None) and (stop < 0):
- # Consume all but the last items
- n = -stop - 1
- cache = deque(enumerate(it, 1), maxlen=n)
- len_iter = cache[-1][0] if cache else 0
-
- # If start and stop are both negative they are comparable and
- # we can just slice. Otherwise we can adjust start to be negative
- # and then slice.
- if start < 0:
- i, j = start, stop
- else:
- i, j = min(start - len_iter, -1), None
-
- for index, item in list(cache)[i:j:step]:
- yield item
- else:
- # Advance to the stop position
- if stop is not None:
- m = stop + 1
- next(islice(it, m, m), None)
-
- # stop is positive, so if start is negative they are not comparable
- # and we need the rest of the items.
- if start < 0:
- i = start
- n = None
- # stop is None and start is positive, so we just need items up to
- # the start index.
- elif stop is None:
- i = None
- n = start + 1
- # Both stop and start are positive, so they are comparable.
- else:
- i = None
- n = start - stop
- if n <= 0:
- return
-
- cache = list(islice(it, n))
-
- yield from cache[i::step]
-
-
-def always_reversible(iterable):
- """An extension of :func:`reversed` that supports all iterables, not
- just those which implement the ``Reversible`` or ``Sequence`` protocols.
-
- >>> print(*always_reversible(x for x in range(3)))
- 2 1 0
-
- If the iterable is already reversible, this function returns the
- result of :func:`reversed()`. If the iterable is not reversible,
- this function will cache the remaining items in the iterable and
- yield them in reverse order, which may require significant storage.
- """
- try:
- return reversed(iterable)
- except TypeError:
- return reversed(list(iterable))
-
-
-def consecutive_groups(iterable, ordering=lambda x: x):
- """Yield groups of consecutive items using :func:`itertools.groupby`.
- The *ordering* function determines whether two items are adjacent by
- returning their position.
-
- By default, the ordering function is the identity function. This is
- suitable for finding runs of numbers:
-
- >>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
- >>> for group in consecutive_groups(iterable):
- ... print(list(group))
- [1]
- [10, 11, 12]
- [20]
- [30, 31, 32, 33]
- [40]
-
- For finding runs of adjacent letters, try using the :meth:`index` method
- of a string of letters:
-
- >>> from string import ascii_lowercase
- >>> iterable = 'abcdfgilmnop'
- >>> ordering = ascii_lowercase.index
- >>> for group in consecutive_groups(iterable, ordering):
- ... print(list(group))
- ['a', 'b', 'c', 'd']
- ['f', 'g']
- ['i']
- ['l', 'm', 'n', 'o', 'p']
-
- Each group of consecutive items is an iterator that shares it source with
- *iterable*. When an an output group is advanced, the previous group is
- no longer available unless its elements are copied (e.g., into a ``list``).
-
- >>> iterable = [1, 2, 11, 12, 21, 22]
- >>> saved_groups = []
- >>> for group in consecutive_groups(iterable):
- ... saved_groups.append(list(group)) # Copy group elements
- >>> saved_groups
- [[1, 2], [11, 12], [21, 22]]
-
- """
- for k, g in groupby(
- enumerate(iterable), key=lambda x: x[0] - ordering(x[1])
- ):
- yield map(itemgetter(1), g)
-
-
-def difference(iterable, func=sub, *, initial=None):
- """This function is the inverse of :func:`itertools.accumulate`. By default
- it will compute the first difference of *iterable* using
- :func:`operator.sub`:
-
- >>> from itertools import accumulate
- >>> iterable = accumulate([0, 1, 2, 3, 4]) # produces 0, 1, 3, 6, 10
- >>> list(difference(iterable))
- [0, 1, 2, 3, 4]
-
- *func* defaults to :func:`operator.sub`, but other functions can be
- specified. They will be applied as follows::
-
- A, B, C, D, ... --> A, func(B, A), func(C, B), func(D, C), ...
-
- For example, to do progressive division:
-
- >>> iterable = [1, 2, 6, 24, 120]
- >>> func = lambda x, y: x // y
- >>> list(difference(iterable, func))
- [1, 2, 3, 4, 5]
-
- If the *initial* keyword is set, the first element will be skipped when
- computing successive differences.
-
- >>> it = [10, 11, 13, 16] # from accumulate([1, 2, 3], initial=10)
- >>> list(difference(it, initial=10))
- [1, 2, 3]
-
- """
- a, b = tee(iterable)
- try:
- first = [next(b)]
- except StopIteration:
- return iter([])
-
- if initial is not None:
- first = []
-
- return chain(first, starmap(func, zip(b, a)))
-
-
-class SequenceView(Sequence):
- """Return a read-only view of the sequence object *target*.
-
- :class:`SequenceView` objects are analogous to Python's built-in
- "dictionary view" types. They provide a dynamic view of a sequence's items,
- meaning that when the sequence updates, so does the view.
-
- >>> seq = ['0', '1', '2']
- >>> view = SequenceView(seq)
- >>> view
- SequenceView(['0', '1', '2'])
- >>> seq.append('3')
- >>> view
- SequenceView(['0', '1', '2', '3'])
-
- Sequence views support indexing, slicing, and length queries. They act
- like the underlying sequence, except they don't allow assignment:
-
- >>> view[1]
- '1'
- >>> view[1:-1]
- ['1', '2']
- >>> len(view)
- 4
-
- Sequence views are useful as an alternative to copying, as they don't
- require (much) extra storage.
-
- """
-
- def __init__(self, target):
- if not isinstance(target, Sequence):
- raise TypeError
- self._target = target
-
- def __getitem__(self, index):
- return self._target[index]
-
- def __len__(self):
- return len(self._target)
-
- def __repr__(self):
- return '{}({})'.format(self.__class__.__name__, repr(self._target))
-
-
-class seekable:
- """Wrap an iterator to allow for seeking backward and forward. This
- progressively caches the items in the source iterable so they can be
- re-visited.
-
- Call :meth:`seek` with an index to seek to that position in the source
- iterable.
-
- To "reset" an iterator, seek to ``0``:
-
- >>> from itertools import count
- >>> it = seekable((str(n) for n in count()))
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> it.seek(0)
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> next(it)
- '3'
-
- You can also seek forward:
-
- >>> it = seekable((str(n) for n in range(20)))
- >>> it.seek(10)
- >>> next(it)
- '10'
- >>> it.seek(20) # Seeking past the end of the source isn't a problem
- >>> list(it)
- []
- >>> it.seek(0) # Resetting works even after hitting the end
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
-
- Call :meth:`peek` to look ahead one item without advancing the iterator:
-
- >>> it = seekable('1234')
- >>> it.peek()
- '1'
- >>> list(it)
- ['1', '2', '3', '4']
- >>> it.peek(default='empty')
- 'empty'
-
- Before the iterator is at its end, calling :func:`bool` on it will return
- ``True``. After it will return ``False``:
-
- >>> it = seekable('5678')
- >>> bool(it)
- True
- >>> list(it)
- ['5', '6', '7', '8']
- >>> bool(it)
- False
-
- You may view the contents of the cache with the :meth:`elements` method.
- That returns a :class:`SequenceView`, a view that updates automatically:
-
- >>> it = seekable((str(n) for n in range(10)))
- >>> next(it), next(it), next(it)
- ('0', '1', '2')
- >>> elements = it.elements()
- >>> elements
- SequenceView(['0', '1', '2'])
- >>> next(it)
- '3'
- >>> elements
- SequenceView(['0', '1', '2', '3'])
-
- By default, the cache grows as the source iterable progresses, so beware of
- wrapping very large or infinite iterables. Supply *maxlen* to limit the
- size of the cache (this of course limits how far back you can seek).
-
- >>> from itertools import count
- >>> it = seekable((str(n) for n in count()), maxlen=2)
- >>> next(it), next(it), next(it), next(it)
- ('0', '1', '2', '3')
- >>> list(it.elements())
- ['2', '3']
- >>> it.seek(0)
- >>> next(it), next(it), next(it), next(it)
- ('2', '3', '4', '5')
- >>> next(it)
- '6'
-
- """
-
- def __init__(self, iterable, maxlen=None):
- self._source = iter(iterable)
- if maxlen is None:
- self._cache = []
- else:
- self._cache = deque([], maxlen)
- self._index = None
-
- def __iter__(self):
- return self
-
- def __next__(self):
- if self._index is not None:
- try:
- item = self._cache[self._index]
- except IndexError:
- self._index = None
- else:
- self._index += 1
- return item
-
- item = next(self._source)
- self._cache.append(item)
- return item
-
- def __bool__(self):
- try:
- self.peek()
- except StopIteration:
- return False
- return True
-
- def peek(self, default=_marker):
- try:
- peeked = next(self)
- except StopIteration:
- if default is _marker:
- raise
- return default
- if self._index is None:
- self._index = len(self._cache)
- self._index -= 1
- return peeked
-
- def elements(self):
- return SequenceView(self._cache)
-
- def seek(self, index):
- self._index = index
- remainder = index - len(self._cache)
- if remainder > 0:
- consume(self, remainder)
-
-
-class run_length:
- """
- :func:`run_length.encode` compresses an iterable with run-length encoding.
- It yields groups of repeated items with the count of how many times they
- were repeated:
-
- >>> uncompressed = 'abbcccdddd'
- >>> list(run_length.encode(uncompressed))
- [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
-
- :func:`run_length.decode` decompresses an iterable that was previously
- compressed with run-length encoding. It yields the items of the
- decompressed iterable:
-
- >>> compressed = [('a', 1), ('b', 2), ('c', 3), ('d', 4)]
- >>> list(run_length.decode(compressed))
- ['a', 'b', 'b', 'c', 'c', 'c', 'd', 'd', 'd', 'd']
-
- """
-
- @staticmethod
- def encode(iterable):
- return ((k, ilen(g)) for k, g in groupby(iterable))
-
- @staticmethod
- def decode(iterable):
- return chain.from_iterable(repeat(k, n) for k, n in iterable)
-
-
-def exactly_n(iterable, n, predicate=bool):
- """Return ``True`` if exactly ``n`` items in the iterable are ``True``
- according to the *predicate* function.
-
- >>> exactly_n([True, True, False], 2)
- True
- >>> exactly_n([True, True, False], 1)
- False
- >>> exactly_n([0, 1, 2, 3, 4, 5], 3, lambda x: x < 3)
- True
-
- The iterable will be advanced until ``n + 1`` truthy items are encountered,
- so avoid calling it on infinite iterables.
-
- """
- return len(take(n + 1, filter(predicate, iterable))) == n
-
-
-def circular_shifts(iterable):
- """Return a list of circular shifts of *iterable*.
-
- >>> circular_shifts(range(4))
- [(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
- """
- lst = list(iterable)
- return take(len(lst), windowed(cycle(lst), len(lst)))
-
-
-def make_decorator(wrapping_func, result_index=0):
- """Return a decorator version of *wrapping_func*, which is a function that
- modifies an iterable. *result_index* is the position in that function's
- signature where the iterable goes.
-
- This lets you use itertools on the "production end," i.e. at function
- definition. This can augment what the function returns without changing the
- function's code.
-
- For example, to produce a decorator version of :func:`chunked`:
-
- >>> from more_itertools import chunked
- >>> chunker = make_decorator(chunked, result_index=0)
- >>> @chunker(3)
- ... def iter_range(n):
- ... return iter(range(n))
- ...
- >>> list(iter_range(9))
- [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
-
- To only allow truthy items to be returned:
-
- >>> truth_serum = make_decorator(filter, result_index=1)
- >>> @truth_serum(bool)
- ... def boolean_test():
- ... return [0, 1, '', ' ', False, True]
- ...
- >>> list(boolean_test())
- [1, ' ', True]
-
- The :func:`peekable` and :func:`seekable` wrappers make for practical
- decorators:
-
- >>> from more_itertools import peekable
- >>> peekable_function = make_decorator(peekable)
- >>> @peekable_function()
- ... def str_range(*args):
- ... return (str(x) for x in range(*args))
- ...
- >>> it = str_range(1, 20, 2)
- >>> next(it), next(it), next(it)
- ('1', '3', '5')
- >>> it.peek()
- '7'
- >>> next(it)
- '7'
-
- """
- # See https://sites.google.com/site/bbayles/index/decorator_factory for
- # notes on how this works.
- def decorator(*wrapping_args, **wrapping_kwargs):
- def outer_wrapper(f):
- def inner_wrapper(*args, **kwargs):
- result = f(*args, **kwargs)
- wrapping_args_ = list(wrapping_args)
- wrapping_args_.insert(result_index, result)
- return wrapping_func(*wrapping_args_, **wrapping_kwargs)
-
- return inner_wrapper
-
- return outer_wrapper
-
- return decorator
-
-
-def map_reduce(iterable, keyfunc, valuefunc=None, reducefunc=None):
- """Return a dictionary that maps the items in *iterable* to categories
- defined by *keyfunc*, transforms them with *valuefunc*, and
- then summarizes them by category with *reducefunc*.
-
- *valuefunc* defaults to the identity function if it is unspecified.
- If *reducefunc* is unspecified, no summarization takes place:
-
- >>> keyfunc = lambda x: x.upper()
- >>> result = map_reduce('abbccc', keyfunc)
- >>> sorted(result.items())
- [('A', ['a']), ('B', ['b', 'b']), ('C', ['c', 'c', 'c'])]
-
- Specifying *valuefunc* transforms the categorized items:
-
- >>> keyfunc = lambda x: x.upper()
- >>> valuefunc = lambda x: 1
- >>> result = map_reduce('abbccc', keyfunc, valuefunc)
- >>> sorted(result.items())
- [('A', [1]), ('B', [1, 1]), ('C', [1, 1, 1])]
-
- Specifying *reducefunc* summarizes the categorized items:
-
- >>> keyfunc = lambda x: x.upper()
- >>> valuefunc = lambda x: 1
- >>> reducefunc = sum
- >>> result = map_reduce('abbccc', keyfunc, valuefunc, reducefunc)
- >>> sorted(result.items())
- [('A', 1), ('B', 2), ('C', 3)]
-
- You may want to filter the input iterable before applying the map/reduce
- procedure:
-
- >>> all_items = range(30)
- >>> items = [x for x in all_items if 10 <= x <= 20] # Filter
- >>> keyfunc = lambda x: x % 2 # Evens map to 0; odds to 1
- >>> categories = map_reduce(items, keyfunc=keyfunc)
- >>> sorted(categories.items())
- [(0, [10, 12, 14, 16, 18, 20]), (1, [11, 13, 15, 17, 19])]
- >>> summaries = map_reduce(items, keyfunc=keyfunc, reducefunc=sum)
- >>> sorted(summaries.items())
- [(0, 90), (1, 75)]
-
- Note that all items in the iterable are gathered into a list before the
- summarization step, which may require significant storage.
-
- The returned object is a :obj:`collections.defaultdict` with the
- ``default_factory`` set to ``None``, such that it behaves like a normal
- dictionary.
-
- """
- valuefunc = (lambda x: x) if (valuefunc is None) else valuefunc
-
- ret = defaultdict(list)
- for item in iterable:
- key = keyfunc(item)
- value = valuefunc(item)
- ret[key].append(value)
-
- if reducefunc is not None:
- for key, value_list in ret.items():
- ret[key] = reducefunc(value_list)
-
- ret.default_factory = None
- return ret
-
-
-def rlocate(iterable, pred=bool, window_size=None):
- """Yield the index of each item in *iterable* for which *pred* returns
- ``True``, starting from the right and moving left.
-
- *pred* defaults to :func:`bool`, which will select truthy items:
-
- >>> list(rlocate([0, 1, 1, 0, 1, 0, 0])) # Truthy at 1, 2, and 4
- [4, 2, 1]
-
- Set *pred* to a custom function to, e.g., find the indexes for a particular
- item:
-
- >>> iterable = iter('abcb')
- >>> pred = lambda x: x == 'b'
- >>> list(rlocate(iterable, pred))
- [3, 1]
-
- If *window_size* is given, then the *pred* function will be called with
- that many items. This enables searching for sub-sequences:
-
- >>> iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]
- >>> pred = lambda *args: args == (1, 2, 3)
- >>> list(rlocate(iterable, pred=pred, window_size=3))
- [9, 5, 1]
-
- Beware, this function won't return anything for infinite iterables.
- If *iterable* is reversible, ``rlocate`` will reverse it and search from
- the right. Otherwise, it will search from the left and return the results
- in reverse order.
-
- See :func:`locate` to for other example applications.
-
- """
- if window_size is None:
- try:
- len_iter = len(iterable)
- return (len_iter - i - 1 for i in locate(reversed(iterable), pred))
- except TypeError:
- pass
-
- return reversed(list(locate(iterable, pred, window_size)))
-
-
-def replace(iterable, pred, substitutes, count=None, window_size=1):
- """Yield the items from *iterable*, replacing the items for which *pred*
- returns ``True`` with the items from the iterable *substitutes*.
-
- >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1]
- >>> pred = lambda x: x == 0
- >>> substitutes = (2, 3)
- >>> list(replace(iterable, pred, substitutes))
- [1, 1, 2, 3, 1, 1, 2, 3, 1, 1]
-
- If *count* is given, the number of replacements will be limited:
-
- >>> iterable = [1, 1, 0, 1, 1, 0, 1, 1, 0]
- >>> pred = lambda x: x == 0
- >>> substitutes = [None]
- >>> list(replace(iterable, pred, substitutes, count=2))
- [1, 1, None, 1, 1, None, 1, 1, 0]
-
- Use *window_size* to control the number of items passed as arguments to
- *pred*. This allows for locating and replacing subsequences.
-
- >>> iterable = [0, 1, 2, 5, 0, 1, 2, 5]
- >>> window_size = 3
- >>> pred = lambda *args: args == (0, 1, 2) # 3 items passed to pred
- >>> substitutes = [3, 4] # Splice in these items
- >>> list(replace(iterable, pred, substitutes, window_size=window_size))
- [3, 4, 5, 3, 4, 5]
-
- """
- if window_size < 1:
- raise ValueError('window_size must be at least 1')
-
- # Save the substitutes iterable, since it's used more than once
- substitutes = tuple(substitutes)
-
- # Add padding such that the number of windows matches the length of the
- # iterable
- it = chain(iterable, [_marker] * (window_size - 1))
- windows = windowed(it, window_size)
-
- n = 0
- for w in windows:
- # If the current window matches our predicate (and we haven't hit
- # our maximum number of replacements), splice in the substitutes
- # and then consume the following windows that overlap with this one.
- # For example, if the iterable is (0, 1, 2, 3, 4...)
- # and the window size is 2, we have (0, 1), (1, 2), (2, 3)...
- # If the predicate matches on (0, 1), we need to zap (0, 1) and (1, 2)
- if pred(*w):
- if (count is None) or (n < count):
- n += 1
- yield from substitutes
- consume(windows, window_size - 1)
- continue
-
- # If there was no match (or we've reached the replacement limit),
- # yield the first item from the window.
- if w and (w[0] is not _marker):
- yield w[0]
-
-
-def partitions(iterable):
- """Yield all possible order-preserving partitions of *iterable*.
-
- >>> iterable = 'abc'
- >>> for part in partitions(iterable):
- ... print([''.join(p) for p in part])
- ['abc']
- ['a', 'bc']
- ['ab', 'c']
- ['a', 'b', 'c']
-
- This is unrelated to :func:`partition`.
-
- """
- sequence = list(iterable)
- n = len(sequence)
- for i in powerset(range(1, n)):
- yield [sequence[i:j] for i, j in zip((0,) + i, i + (n,))]
-
-
-def set_partitions(iterable, k=None):
- """
- Yield the set partitions of *iterable* into *k* parts. Set partitions are
- not order-preserving.
-
- >>> iterable = 'abc'
- >>> for part in set_partitions(iterable, 2):
- ... print([''.join(p) for p in part])
- ['a', 'bc']
- ['ab', 'c']
- ['b', 'ac']
-
-
- If *k* is not given, every set partition is generated.
-
- >>> iterable = 'abc'
- >>> for part in set_partitions(iterable):
- ... print([''.join(p) for p in part])
- ['abc']
- ['a', 'bc']
- ['ab', 'c']
- ['b', 'ac']
- ['a', 'b', 'c']
-
- """
- L = list(iterable)
- n = len(L)
- if k is not None:
- if k < 1:
- raise ValueError(
- "Can't partition in a negative or zero number of groups"
- )
- elif k > n:
- return
-
- def set_partitions_helper(L, k):
- n = len(L)
- if k == 1:
- yield [L]
- elif n == k:
- yield [[s] for s in L]
- else:
- e, *M = L
- for p in set_partitions_helper(M, k - 1):
- yield [[e], *p]
- for p in set_partitions_helper(M, k):
- for i in range(len(p)):
- yield p[:i] + [[e] + p[i]] + p[i + 1 :]
-
- if k is None:
- for k in range(1, n + 1):
- yield from set_partitions_helper(L, k)
- else:
- yield from set_partitions_helper(L, k)
-
-
-class time_limited:
- """
- Yield items from *iterable* until *limit_seconds* have passed.
- If the time limit expires before all items have been yielded, the
- ``timed_out`` parameter will be set to ``True``.
-
- >>> from time import sleep
- >>> def generator():
- ... yield 1
- ... yield 2
- ... sleep(0.2)
- ... yield 3
- >>> iterable = time_limited(0.1, generator())
- >>> list(iterable)
- [1, 2]
- >>> iterable.timed_out
- True
-
- Note that the time is checked before each item is yielded, and iteration
- stops if the time elapsed is greater than *limit_seconds*. If your time
- limit is 1 second, but it takes 2 seconds to generate the first item from
- the iterable, the function will run for 2 seconds and not yield anything.
-
- """
-
- def __init__(self, limit_seconds, iterable):
- if limit_seconds < 0:
- raise ValueError('limit_seconds must be positive')
- self.limit_seconds = limit_seconds
- self._iterable = iter(iterable)
- self._start_time = monotonic()
- self.timed_out = False
-
- def __iter__(self):
- return self
-
- def __next__(self):
- item = next(self._iterable)
- if monotonic() - self._start_time > self.limit_seconds:
- self.timed_out = True
- raise StopIteration
-
- return item
-
-
-def only(iterable, default=None, too_long=None):
- """If *iterable* has only one item, return it.
- If it has zero items, return *default*.
- If it has more than one item, raise the exception given by *too_long*,
- which is ``ValueError`` by default.
-
- >>> only([], default='missing')
- 'missing'
- >>> only([1])
- 1
- >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ValueError: Expected exactly one item in iterable, but got 1, 2,
- and perhaps more.'
- >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- TypeError
-
- Note that :func:`only` attempts to advance *iterable* twice to ensure there
- is only one item. See :func:`spy` or :func:`peekable` to check
- iterable contents less destructively.
- """
- it = iter(iterable)
- first_value = next(it, default)
-
- try:
- second_value = next(it)
- except StopIteration:
- pass
- else:
- msg = (
- 'Expected exactly one item in iterable, but got {!r}, {!r}, '
- 'and perhaps more.'.format(first_value, second_value)
- )
- raise too_long or ValueError(msg)
-
- return first_value
-
-
-def ichunked(iterable, n):
- """Break *iterable* into sub-iterables with *n* elements each.
- :func:`ichunked` is like :func:`chunked`, but it yields iterables
- instead of lists.
-
- If the sub-iterables are read in order, the elements of *iterable*
- won't be stored in memory.
- If they are read out of order, :func:`itertools.tee` is used to cache
- elements as necessary.
-
- >>> from itertools import count
- >>> all_chunks = ichunked(count(), 4)
- >>> c_1, c_2, c_3 = next(all_chunks), next(all_chunks), next(all_chunks)
- >>> list(c_2) # c_1's elements have been cached; c_3's haven't been
- [4, 5, 6, 7]
- >>> list(c_1)
- [0, 1, 2, 3]
- >>> list(c_3)
- [8, 9, 10, 11]
-
- """
- source = iter(iterable)
-
- while True:
- # Check to see whether we're at the end of the source iterable
- item = next(source, _marker)
- if item is _marker:
- return
-
- # Clone the source and yield an n-length slice
- source, it = tee(chain([item], source))
- yield islice(it, n)
-
- # Advance the source iterable
- consume(source, n)
-
-
-def distinct_combinations(iterable, r):
- """Yield the distinct combinations of *r* items taken from *iterable*.
-
- >>> list(distinct_combinations([0, 0, 1], 2))
- [(0, 0), (0, 1)]
-
- Equivalent to ``set(combinations(iterable))``, except duplicates are not
- generated and thrown away. For larger input sequences this is much more
- efficient.
-
- """
- if r < 0:
- raise ValueError('r must be non-negative')
- elif r == 0:
- yield ()
- return
- pool = tuple(iterable)
- generators = [unique_everseen(enumerate(pool), key=itemgetter(1))]
- current_combo = [None] * r
- level = 0
- while generators:
- try:
- cur_idx, p = next(generators[-1])
- except StopIteration:
- generators.pop()
- level -= 1
- continue
- current_combo[level] = p
- if level + 1 == r:
- yield tuple(current_combo)
- else:
- generators.append(
- unique_everseen(
- enumerate(pool[cur_idx + 1 :], cur_idx + 1),
- key=itemgetter(1),
- )
- )
- level += 1
-
-
-def filter_except(validator, iterable, *exceptions):
- """Yield the items from *iterable* for which the *validator* function does
- not raise one of the specified *exceptions*.
-
- *validator* is called for each item in *iterable*.
- It should be a function that accepts one argument and raises an exception
- if that item is not valid.
-
- >>> iterable = ['1', '2', 'three', '4', None]
- >>> list(filter_except(int, iterable, ValueError, TypeError))
- ['1', '2', '4']
-
- If an exception other than one given by *exceptions* is raised by
- *validator*, it is raised like normal.
- """
- for item in iterable:
- try:
- validator(item)
- except exceptions:
- pass
- else:
- yield item
-
-
-def map_except(function, iterable, *exceptions):
- """Transform each item from *iterable* with *function* and yield the
- result, unless *function* raises one of the specified *exceptions*.
-
- *function* is called to transform each item in *iterable*.
- It should be a accept one argument.
-
- >>> iterable = ['1', '2', 'three', '4', None]
- >>> list(map_except(int, iterable, ValueError, TypeError))
- [1, 2, 4]
-
- If an exception other than one given by *exceptions* is raised by
- *function*, it is raised like normal.
- """
- for item in iterable:
- try:
- yield function(item)
- except exceptions:
- pass
-
-
-def _sample_unweighted(iterable, k):
- # Implementation of "Algorithm L" from the 1994 paper by Kim-Hung Li:
- # "Reservoir-Sampling Algorithms of Time Complexity O(n(1+log(N/n)))".
-
- # Fill up the reservoir (collection of samples) with the first `k` samples
- reservoir = take(k, iterable)
-
- # Generate random number that's the largest in a sample of k U(0,1) numbers
- # Largest order statistic: https://en.wikipedia.org/wiki/Order_statistic
- W = exp(log(random()) / k)
-
- # The number of elements to skip before changing the reservoir is a random
- # number with a geometric distribution. Sample it using random() and logs.
- next_index = k + floor(log(random()) / log(1 - W))
-
- for index, element in enumerate(iterable, k):
-
- if index == next_index:
- reservoir[randrange(k)] = element
- # The new W is the largest in a sample of k U(0, `old_W`) numbers
- W *= exp(log(random()) / k)
- next_index += floor(log(random()) / log(1 - W)) + 1
-
- return reservoir
-
-
-def _sample_weighted(iterable, k, weights):
- # Implementation of "A-ExpJ" from the 2006 paper by Efraimidis et al. :
- # "Weighted random sampling with a reservoir".
-
- # Log-transform for numerical stability for weights that are small/large
- weight_keys = (log(random()) / weight for weight in weights)
-
- # Fill up the reservoir (collection of samples) with the first `k`
- # weight-keys and elements, then heapify the list.
- reservoir = take(k, zip(weight_keys, iterable))
- heapify(reservoir)
-
- # The number of jumps before changing the reservoir is a random variable
- # with an exponential distribution. Sample it using random() and logs.
- smallest_weight_key, _ = reservoir[0]
- weights_to_skip = log(random()) / smallest_weight_key
-
- for weight, element in zip(weights, iterable):
- if weight >= weights_to_skip:
- # The notation here is consistent with the paper, but we store
- # the weight-keys in log-space for better numerical stability.
- smallest_weight_key, _ = reservoir[0]
- t_w = exp(weight * smallest_weight_key)
- r_2 = uniform(t_w, 1) # generate U(t_w, 1)
- weight_key = log(r_2) / weight
- heapreplace(reservoir, (weight_key, element))
- smallest_weight_key, _ = reservoir[0]
- weights_to_skip = log(random()) / smallest_weight_key
- else:
- weights_to_skip -= weight
-
- # Equivalent to [element for weight_key, element in sorted(reservoir)]
- return [heappop(reservoir)[1] for _ in range(k)]
-
-
-def sample(iterable, k, weights=None):
- """Return a *k*-length list of elements chosen (without replacement)
- from the *iterable*. Like :func:`random.sample`, but works on iterables
- of unknown length.
-
- >>> iterable = range(100)
- >>> sample(iterable, 5) # doctest: +SKIP
- [81, 60, 96, 16, 4]
-
- An iterable with *weights* may also be given:
-
- >>> iterable = range(100)
- >>> weights = (i * i + 1 for i in range(100))
- >>> sampled = sample(iterable, 5, weights=weights) # doctest: +SKIP
- [79, 67, 74, 66, 78]
-
- The algorithm can also be used to generate weighted random permutations.
- The relative weight of each item determines the probability that it
- appears late in the permutation.
-
- >>> data = "abcdefgh"
- >>> weights = range(1, len(data) + 1)
- >>> sample(data, k=len(data), weights=weights) # doctest: +SKIP
- ['c', 'a', 'b', 'e', 'g', 'd', 'h', 'f']
- """
- if k == 0:
- return []
-
- iterable = iter(iterable)
- if weights is None:
- return _sample_unweighted(iterable, k)
- else:
- weights = iter(weights)
- return _sample_weighted(iterable, k, weights)
-
-
-def is_sorted(iterable, key=None, reverse=False):
- """Returns ``True`` if the items of iterable are in sorted order, and
- ``False`` otherwise. *key* and *reverse* have the same meaning that they do
- in the built-in :func:`sorted` function.
-
- >>> is_sorted(['1', '2', '3', '4', '5'], key=int)
- True
- >>> is_sorted([5, 4, 3, 1, 2], reverse=True)
- False
-
- The function returns ``False`` after encountering the first out-of-order
- item. If there are no out-of-order items, the iterable is exhausted.
- """
-
- compare = lt if reverse else gt
- it = iterable if (key is None) else map(key, iterable)
- return not any(starmap(compare, pairwise(it)))
-
-
-class AbortThread(BaseException):
- pass
-
-
-class callback_iter:
- """Convert a function that uses callbacks to an iterator.
-
- Let *func* be a function that takes a `callback` keyword argument.
- For example:
-
- >>> def func(callback=None):
- ... for i, c in [(1, 'a'), (2, 'b'), (3, 'c')]:
- ... if callback:
- ... callback(i, c)
- ... return 4
-
-
- Use ``with callback_iter(func)`` to get an iterator over the parameters
- that are delivered to the callback.
-
- >>> with callback_iter(func) as it:
- ... for args, kwargs in it:
- ... print(args)
- (1, 'a')
- (2, 'b')
- (3, 'c')
-
- The function will be called in a background thread. The ``done`` property
- indicates whether it has completed execution.
-
- >>> it.done
- True
-
- If it completes successfully, its return value will be available
- in the ``result`` property.
-
- >>> it.result
- 4
-
- Notes:
-
- * If the function uses some keyword argument besides ``callback``, supply
- *callback_kwd*.
- * If it finished executing, but raised an exception, accessing the
- ``result`` property will raise the same exception.
- * If it hasn't finished executing, accessing the ``result``
- property from within the ``with`` block will raise ``RuntimeError``.
- * If it hasn't finished executing, accessing the ``result`` property from
- outside the ``with`` block will raise a
- ``more_itertools.AbortThread`` exception.
- * Provide *wait_seconds* to adjust how frequently the it is polled for
- output.
-
- """
-
- def __init__(self, func, callback_kwd='callback', wait_seconds=0.1):
- self._func = func
- self._callback_kwd = callback_kwd
- self._aborted = False
- self._future = None
- self._wait_seconds = wait_seconds
- self._executor = __import__("concurrent.futures").futures.ThreadPoolExecutor(max_workers=1)
- self._iterator = self._reader()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self._aborted = True
- self._executor.shutdown()
-
- def __iter__(self):
- return self
-
- def __next__(self):
- return next(self._iterator)
-
- @property
- def done(self):
- if self._future is None:
- return False
- return self._future.done()
-
- @property
- def result(self):
- if not self.done:
- raise RuntimeError('Function has not yet completed')
-
- return self._future.result()
-
- def _reader(self):
- q = Queue()
-
- def callback(*args, **kwargs):
- if self._aborted:
- raise AbortThread('canceled by user')
-
- q.put((args, kwargs))
-
- self._future = self._executor.submit(
- self._func, **{self._callback_kwd: callback}
- )
-
- while True:
- try:
- item = q.get(timeout=self._wait_seconds)
- except Empty:
- pass
- else:
- q.task_done()
- yield item
-
- if self._future.done():
- break
-
- remaining = []
- while True:
- try:
- item = q.get_nowait()
- except Empty:
- break
- else:
- q.task_done()
- remaining.append(item)
- q.join()
- yield from remaining
-
-
-def windowed_complete(iterable, n):
- """
- Yield ``(beginning, middle, end)`` tuples, where:
-
- * Each ``middle`` has *n* items from *iterable*
- * Each ``beginning`` has the items before the ones in ``middle``
- * Each ``end`` has the items after the ones in ``middle``
-
- >>> iterable = range(7)
- >>> n = 3
- >>> for beginning, middle, end in windowed_complete(iterable, n):
- ... print(beginning, middle, end)
- () (0, 1, 2) (3, 4, 5, 6)
- (0,) (1, 2, 3) (4, 5, 6)
- (0, 1) (2, 3, 4) (5, 6)
- (0, 1, 2) (3, 4, 5) (6,)
- (0, 1, 2, 3) (4, 5, 6) ()
-
- Note that *n* must be at least 0 and most equal to the length of
- *iterable*.
-
- This function will exhaust the iterable and may require significant
- storage.
- """
- if n < 0:
- raise ValueError('n must be >= 0')
-
- seq = tuple(iterable)
- size = len(seq)
-
- if n > size:
- raise ValueError('n must be <= len(seq)')
-
- for i in range(size - n + 1):
- beginning = seq[:i]
- middle = seq[i : i + n]
- end = seq[i + n :]
- yield beginning, middle, end
-
-
-def all_unique(iterable, key=None):
- """
- Returns ``True`` if all the elements of *iterable* are unique (no two
- elements are equal).
-
- >>> all_unique('ABCB')
- False
-
- If a *key* function is specified, it will be used to make comparisons.
-
- >>> all_unique('ABCb')
- True
- >>> all_unique('ABCb', str.lower)
- False
-
- The function returns as soon as the first non-unique element is
- encountered. Iterables with a mix of hashable and unhashable items can
- be used, but the function will be slower for unhashable items.
- """
- seenset = set()
- seenset_add = seenset.add
- seenlist = []
- seenlist_add = seenlist.append
- for element in map(key, iterable) if key else iterable:
- try:
- if element in seenset:
- return False
- seenset_add(element)
- except TypeError:
- if element in seenlist:
- return False
- seenlist_add(element)
- return True
-
-
-def nth_product(index, *args):
- """Equivalent to ``list(product(*args))[index]``.
-
- The products of *args* can be ordered lexicographically.
- :func:`nth_product` computes the product at sort position *index* without
- computing the previous products.
-
- >>> nth_product(8, range(2), range(2), range(2), range(2))
- (1, 0, 0, 0)
-
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pools = list(map(tuple, reversed(args)))
- ns = list(map(len, pools))
-
- c = reduce(mul, ns)
-
- if index < 0:
- index += c
-
- if not 0 <= index < c:
- raise IndexError
-
- result = []
- for pool, n in zip(pools, ns):
- result.append(pool[index % n])
- index //= n
-
- return tuple(reversed(result))
-
-
-def nth_permutation(iterable, r, index):
- """Equivalent to ``list(permutations(iterable, r))[index]```
-
- The subsequences of *iterable* that are of length *r* where order is
- important can be ordered lexicographically. :func:`nth_permutation`
- computes the subsequence at sort position *index* directly, without
- computing the previous subsequences.
-
- >>> nth_permutation('ghijk', 2, 5)
- ('h', 'i')
-
- ``ValueError`` will be raised If *r* is negative or greater than the length
- of *iterable*.
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pool = list(iterable)
- n = len(pool)
-
- if r is None or r == n:
- r, c = n, factorial(n)
- elif not 0 <= r < n:
- raise ValueError
- else:
- c = factorial(n) // factorial(n - r)
-
- if index < 0:
- index += c
-
- if not 0 <= index < c:
- raise IndexError
-
- if c == 0:
- return tuple()
-
- result = [0] * r
- q = index * factorial(n) // c if r < n else index
- for d in range(1, n + 1):
- q, i = divmod(q, d)
- if 0 <= n - d < r:
- result[n - d] = i
- if q == 0:
- break
-
- return tuple(map(pool.pop, result))
-
-
-def value_chain(*args):
- """Yield all arguments passed to the function in the same order in which
- they were passed. If an argument itself is iterable then iterate over its
- values.
-
- >>> list(value_chain(1, 2, 3, [4, 5, 6]))
- [1, 2, 3, 4, 5, 6]
-
- Binary and text strings are not considered iterable and are emitted
- as-is:
-
- >>> list(value_chain('12', '34', ['56', '78']))
- ['12', '34', '56', '78']
-
-
- Multiple levels of nesting are not flattened.
-
- """
- for value in args:
- if isinstance(value, (str, bytes)):
- yield value
- continue
- try:
- yield from value
- except TypeError:
- yield value
-
-
-def product_index(element, *args):
- """Equivalent to ``list(product(*args)).index(element)``
-
- The products of *args* can be ordered lexicographically.
- :func:`product_index` computes the first index of *element* without
- computing the previous products.
-
- >>> product_index([8, 2], range(10), range(5))
- 42
-
- ``ValueError`` will be raised if the given *element* isn't in the product
- of *args*.
- """
- index = 0
-
- for x, pool in zip_longest(element, args, fillvalue=_marker):
- if x is _marker or pool is _marker:
- raise ValueError('element is not a product of args')
-
- pool = tuple(pool)
- index = index * len(pool) + pool.index(x)
-
- return index
-
-
-def combination_index(element, iterable):
- """Equivalent to ``list(combinations(iterable, r)).index(element)``
-
- The subsequences of *iterable* that are of length *r* can be ordered
- lexicographically. :func:`combination_index` computes the index of the
- first *element*, without computing the previous combinations.
-
- >>> combination_index('adf', 'abcdefg')
- 10
-
- ``ValueError`` will be raised if the given *element* isn't one of the
- combinations of *iterable*.
- """
- element = enumerate(element)
- k, y = next(element, (None, None))
- if k is None:
- return 0
-
- indexes = []
- pool = enumerate(iterable)
- for n, x in pool:
- if x == y:
- indexes.append(n)
- tmp, y = next(element, (None, None))
- if tmp is None:
- break
- else:
- k = tmp
- else:
- raise ValueError('element is not a combination of iterable')
-
- n, _ = last(pool, default=(n, None))
-
- # Python versiosn below 3.8 don't have math.comb
- index = 1
- for i, j in enumerate(reversed(indexes), start=1):
- j = n - j
- if i <= j:
- index += factorial(j) // (factorial(i) * factorial(j - i))
-
- return factorial(n + 1) // (factorial(k + 1) * factorial(n - k)) - index
-
-
-def permutation_index(element, iterable):
- """Equivalent to ``list(permutations(iterable, r)).index(element)```
-
- The subsequences of *iterable* that are of length *r* where order is
- important can be ordered lexicographically. :func:`permutation_index`
- computes the index of the first *element* directly, without computing
- the previous permutations.
-
- >>> permutation_index([1, 3, 2], range(5))
- 19
-
- ``ValueError`` will be raised if the given *element* isn't one of the
- permutations of *iterable*.
- """
- index = 0
- pool = list(iterable)
- for i, x in zip(range(len(pool), -1, -1), element):
- r = pool.index(x)
- index = index * i + r
- del pool[r]
-
- return index
-
-
-class countable:
- """Wrap *iterable* and keep a count of how many items have been consumed.
-
- The ``items_seen`` attribute starts at ``0`` and increments as the iterable
- is consumed:
-
- >>> iterable = map(str, range(10))
- >>> it = countable(iterable)
- >>> it.items_seen
- 0
- >>> next(it), next(it)
- ('0', '1')
- >>> list(it)
- ['2', '3', '4', '5', '6', '7', '8', '9']
- >>> it.items_seen
- 10
- """
-
- def __init__(self, iterable):
- self._it = iter(iterable)
- self.items_seen = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- item = next(self._it)
- self.items_seen += 1
-
- return item
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py
deleted file mode 100644
index 521abd7..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/more_itertools/recipes.py
+++ /dev/null
@@ -1,620 +0,0 @@
-"""Imported from the recipes section of the itertools documentation.
-
-All functions taken from the recipes section of the itertools library docs
-[1]_.
-Some backward-compatible usability improvements have been made.
-
-.. [1] http://docs.python.org/library/itertools.html#recipes
-
-"""
-import warnings
-from collections import deque
-from itertools import (
- chain,
- combinations,
- count,
- cycle,
- groupby,
- islice,
- repeat,
- starmap,
- tee,
- zip_longest,
-)
-import operator
-from random import randrange, sample, choice
-
-__all__ = [
- 'all_equal',
- 'consume',
- 'convolve',
- 'dotproduct',
- 'first_true',
- 'flatten',
- 'grouper',
- 'iter_except',
- 'ncycles',
- 'nth',
- 'nth_combination',
- 'padnone',
- 'pad_none',
- 'pairwise',
- 'partition',
- 'powerset',
- 'prepend',
- 'quantify',
- 'random_combination_with_replacement',
- 'random_combination',
- 'random_permutation',
- 'random_product',
- 'repeatfunc',
- 'roundrobin',
- 'tabulate',
- 'tail',
- 'take',
- 'unique_everseen',
- 'unique_justseen',
-]
-
-
-def take(n, iterable):
- """Return first *n* items of the iterable as a list.
-
- >>> take(3, range(10))
- [0, 1, 2]
-
- If there are fewer than *n* items in the iterable, all of them are
- returned.
-
- >>> take(10, range(3))
- [0, 1, 2]
-
- """
- return list(islice(iterable, n))
-
-
-def tabulate(function, start=0):
- """Return an iterator over the results of ``func(start)``,
- ``func(start + 1)``, ``func(start + 2)``...
-
- *func* should be a function that accepts one integer argument.
-
- If *start* is not specified it defaults to 0. It will be incremented each
- time the iterator is advanced.
-
- >>> square = lambda x: x ** 2
- >>> iterator = tabulate(square, -3)
- >>> take(4, iterator)
- [9, 4, 1, 0]
-
- """
- return map(function, count(start))
-
-
-def tail(n, iterable):
- """Return an iterator over the last *n* items of *iterable*.
-
- >>> t = tail(3, 'ABCDEFG')
- >>> list(t)
- ['E', 'F', 'G']
-
- """
- return iter(deque(iterable, maxlen=n))
-
-
-def consume(iterator, n=None):
- """Advance *iterable* by *n* steps. If *n* is ``None``, consume it
- entirely.
-
- Efficiently exhausts an iterator without returning values. Defaults to
- consuming the whole iterator, but an optional second argument may be
- provided to limit consumption.
-
- >>> i = (x for x in range(10))
- >>> next(i)
- 0
- >>> consume(i, 3)
- >>> next(i)
- 4
- >>> consume(i)
- >>> next(i)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- StopIteration
-
- If the iterator has fewer items remaining than the provided limit, the
- whole iterator will be consumed.
-
- >>> i = (x for x in range(3))
- >>> consume(i, 5)
- >>> next(i)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- StopIteration
-
- """
- # Use functions that consume iterators at C speed.
- if n is None:
- # feed the entire iterator into a zero-length deque
- deque(iterator, maxlen=0)
- else:
- # advance to the empty slice starting at position n
- next(islice(iterator, n, n), None)
-
-
-def nth(iterable, n, default=None):
- """Returns the nth item or a default value.
-
- >>> l = range(10)
- >>> nth(l, 3)
- 3
- >>> nth(l, 20, "zebra")
- 'zebra'
-
- """
- return next(islice(iterable, n, None), default)
-
-
-def all_equal(iterable):
- """
- Returns ``True`` if all the elements are equal to each other.
-
- >>> all_equal('aaaa')
- True
- >>> all_equal('aaab')
- False
-
- """
- g = groupby(iterable)
- return next(g, True) and not next(g, False)
-
-
-def quantify(iterable, pred=bool):
- """Return the how many times the predicate is true.
-
- >>> quantify([True, False, True])
- 2
-
- """
- return sum(map(pred, iterable))
-
-
-def pad_none(iterable):
- """Returns the sequence of elements and then returns ``None`` indefinitely.
-
- >>> take(5, pad_none(range(3)))
- [0, 1, 2, None, None]
-
- Useful for emulating the behavior of the built-in :func:`map` function.
-
- See also :func:`padded`.
-
- """
- return chain(iterable, repeat(None))
-
-
-padnone = pad_none
-
-
-def ncycles(iterable, n):
- """Returns the sequence elements *n* times
-
- >>> list(ncycles(["a", "b"], 3))
- ['a', 'b', 'a', 'b', 'a', 'b']
-
- """
- return chain.from_iterable(repeat(tuple(iterable), n))
-
-
-def dotproduct(vec1, vec2):
- """Returns the dot product of the two iterables.
-
- >>> dotproduct([10, 10], [20, 20])
- 400
-
- """
- return sum(map(operator.mul, vec1, vec2))
-
-
-def flatten(listOfLists):
- """Return an iterator flattening one level of nesting in a list of lists.
-
- >>> list(flatten([[0, 1], [2, 3]]))
- [0, 1, 2, 3]
-
- See also :func:`collapse`, which can flatten multiple levels of nesting.
-
- """
- return chain.from_iterable(listOfLists)
-
-
-def repeatfunc(func, times=None, *args):
- """Call *func* with *args* repeatedly, returning an iterable over the
- results.
-
- If *times* is specified, the iterable will terminate after that many
- repetitions:
-
- >>> from operator import add
- >>> times = 4
- >>> args = 3, 5
- >>> list(repeatfunc(add, times, *args))
- [8, 8, 8, 8]
-
- If *times* is ``None`` the iterable will not terminate:
-
- >>> from random import randrange
- >>> times = None
- >>> args = 1, 11
- >>> take(6, repeatfunc(randrange, times, *args)) # doctest:+SKIP
- [2, 4, 8, 1, 8, 4]
-
- """
- if times is None:
- return starmap(func, repeat(args))
- return starmap(func, repeat(args, times))
-
-
-def _pairwise(iterable):
- """Returns an iterator of paired items, overlapping, from the original
-
- >>> take(4, pairwise(count()))
- [(0, 1), (1, 2), (2, 3), (3, 4)]
-
- On Python 3.10 and above, this is an alias for :func:`itertools.pairwise`.
-
- """
- a, b = tee(iterable)
- next(b, None)
- yield from zip(a, b)
-
-
-try:
- from itertools import pairwise as itertools_pairwise
-except ImportError:
- pairwise = _pairwise
-else:
-
- def pairwise(iterable):
- yield from itertools_pairwise(iterable)
-
- pairwise.__doc__ = _pairwise.__doc__
-
-
-def grouper(iterable, n, fillvalue=None):
- """Collect data into fixed-length chunks or blocks.
-
- >>> list(grouper('ABCDEFG', 3, 'x'))
- [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
-
- """
- if isinstance(iterable, int):
- warnings.warn(
- "grouper expects iterable as first parameter", DeprecationWarning
- )
- n, iterable = iterable, n
- args = [iter(iterable)] * n
- return zip_longest(fillvalue=fillvalue, *args)
-
-
-def roundrobin(*iterables):
- """Yields an item from each iterable, alternating between them.
-
- >>> list(roundrobin('ABC', 'D', 'EF'))
- ['A', 'D', 'E', 'B', 'F', 'C']
-
- This function produces the same output as :func:`interleave_longest`, but
- may perform better for some inputs (in particular when the number of
- iterables is small).
-
- """
- # Recipe credited to George Sakkis
- pending = len(iterables)
- nexts = cycle(iter(it).__next__ for it in iterables)
- while pending:
- try:
- for next in nexts:
- yield next()
- except StopIteration:
- pending -= 1
- nexts = cycle(islice(nexts, pending))
-
-
-def partition(pred, iterable):
- """
- Returns a 2-tuple of iterables derived from the input iterable.
- The first yields the items that have ``pred(item) == False``.
- The second yields the items that have ``pred(item) == True``.
-
- >>> is_odd = lambda x: x % 2 != 0
- >>> iterable = range(10)
- >>> even_items, odd_items = partition(is_odd, iterable)
- >>> list(even_items), list(odd_items)
- ([0, 2, 4, 6, 8], [1, 3, 5, 7, 9])
-
- If *pred* is None, :func:`bool` is used.
-
- >>> iterable = [0, 1, False, True, '', ' ']
- >>> false_items, true_items = partition(None, iterable)
- >>> list(false_items), list(true_items)
- ([0, False, ''], [1, True, ' '])
-
- """
- if pred is None:
- pred = bool
-
- evaluations = ((pred(x), x) for x in iterable)
- t1, t2 = tee(evaluations)
- return (
- (x for (cond, x) in t1 if not cond),
- (x for (cond, x) in t2 if cond),
- )
-
-
-def powerset(iterable):
- """Yields all possible subsets of the iterable.
-
- >>> list(powerset([1, 2, 3]))
- [(), (1,), (2,), (3,), (1, 2), (1, 3), (2, 3), (1, 2, 3)]
-
- :func:`powerset` will operate on iterables that aren't :class:`set`
- instances, so repeated elements in the input will produce repeated elements
- in the output. Use :func:`unique_everseen` on the input to avoid generating
- duplicates:
-
- >>> seq = [1, 1, 0]
- >>> list(powerset(seq))
- [(), (1,), (1,), (0,), (1, 1), (1, 0), (1, 0), (1, 1, 0)]
- >>> from more_itertools import unique_everseen
- >>> list(powerset(unique_everseen(seq)))
- [(), (1,), (0,), (1, 0)]
-
- """
- s = list(iterable)
- return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))
-
-
-def unique_everseen(iterable, key=None):
- """
- Yield unique elements, preserving order.
-
- >>> list(unique_everseen('AAAABBBCCDAABBB'))
- ['A', 'B', 'C', 'D']
- >>> list(unique_everseen('ABBCcAD', str.lower))
- ['A', 'B', 'C', 'D']
-
- Sequences with a mix of hashable and unhashable items can be used.
- The function will be slower (i.e., `O(n^2)`) for unhashable items.
-
- Remember that ``list`` objects are unhashable - you can use the *key*
- parameter to transform the list to a tuple (which is hashable) to
- avoid a slowdown.
-
- >>> iterable = ([1, 2], [2, 3], [1, 2])
- >>> list(unique_everseen(iterable)) # Slow
- [[1, 2], [2, 3]]
- >>> list(unique_everseen(iterable, key=tuple)) # Faster
- [[1, 2], [2, 3]]
-
- Similary, you may want to convert unhashable ``set`` objects with
- ``key=frozenset``. For ``dict`` objects,
- ``key=lambda x: frozenset(x.items())`` can be used.
-
- """
- seenset = set()
- seenset_add = seenset.add
- seenlist = []
- seenlist_add = seenlist.append
- use_key = key is not None
-
- for element in iterable:
- k = key(element) if use_key else element
- try:
- if k not in seenset:
- seenset_add(k)
- yield element
- except TypeError:
- if k not in seenlist:
- seenlist_add(k)
- yield element
-
-
-def unique_justseen(iterable, key=None):
- """Yields elements in order, ignoring serial duplicates
-
- >>> list(unique_justseen('AAAABBBCCDAABBB'))
- ['A', 'B', 'C', 'D', 'A', 'B']
- >>> list(unique_justseen('ABBCcAD', str.lower))
- ['A', 'B', 'C', 'A', 'D']
-
- """
- return map(next, map(operator.itemgetter(1), groupby(iterable, key)))
-
-
-def iter_except(func, exception, first=None):
- """Yields results from a function repeatedly until an exception is raised.
-
- Converts a call-until-exception interface to an iterator interface.
- Like ``iter(func, sentinel)``, but uses an exception instead of a sentinel
- to end the loop.
-
- >>> l = [0, 1, 2]
- >>> list(iter_except(l.pop, IndexError))
- [2, 1, 0]
-
- """
- try:
- if first is not None:
- yield first()
- while 1:
- yield func()
- except exception:
- pass
-
-
-def first_true(iterable, default=None, pred=None):
- """
- Returns the first true value in the iterable.
-
- If no true value is found, returns *default*
-
- If *pred* is not None, returns the first item for which
- ``pred(item) == True`` .
-
- >>> first_true(range(10))
- 1
- >>> first_true(range(10), pred=lambda x: x > 5)
- 6
- >>> first_true(range(10), default='missing', pred=lambda x: x > 9)
- 'missing'
-
- """
- return next(filter(pred, iterable), default)
-
-
-def random_product(*args, repeat=1):
- """Draw an item at random from each of the input iterables.
-
- >>> random_product('abc', range(4), 'XYZ') # doctest:+SKIP
- ('c', 3, 'Z')
-
- If *repeat* is provided as a keyword argument, that many items will be
- drawn from each iterable.
-
- >>> random_product('abcd', range(4), repeat=2) # doctest:+SKIP
- ('a', 2, 'd', 3)
-
- This equivalent to taking a random selection from
- ``itertools.product(*args, **kwarg)``.
-
- """
- pools = [tuple(pool) for pool in args] * repeat
- return tuple(choice(pool) for pool in pools)
-
-
-def random_permutation(iterable, r=None):
- """Return a random *r* length permutation of the elements in *iterable*.
-
- If *r* is not specified or is ``None``, then *r* defaults to the length of
- *iterable*.
-
- >>> random_permutation(range(5)) # doctest:+SKIP
- (3, 4, 0, 1, 2)
-
- This equivalent to taking a random selection from
- ``itertools.permutations(iterable, r)``.
-
- """
- pool = tuple(iterable)
- r = len(pool) if r is None else r
- return tuple(sample(pool, r))
-
-
-def random_combination(iterable, r):
- """Return a random *r* length subsequence of the elements in *iterable*.
-
- >>> random_combination(range(5), 3) # doctest:+SKIP
- (2, 3, 4)
-
- This equivalent to taking a random selection from
- ``itertools.combinations(iterable, r)``.
-
- """
- pool = tuple(iterable)
- n = len(pool)
- indices = sorted(sample(range(n), r))
- return tuple(pool[i] for i in indices)
-
-
-def random_combination_with_replacement(iterable, r):
- """Return a random *r* length subsequence of elements in *iterable*,
- allowing individual elements to be repeated.
-
- >>> random_combination_with_replacement(range(3), 5) # doctest:+SKIP
- (0, 0, 1, 2, 2)
-
- This equivalent to taking a random selection from
- ``itertools.combinations_with_replacement(iterable, r)``.
-
- """
- pool = tuple(iterable)
- n = len(pool)
- indices = sorted(randrange(n) for i in range(r))
- return tuple(pool[i] for i in indices)
-
-
-def nth_combination(iterable, r, index):
- """Equivalent to ``list(combinations(iterable, r))[index]``.
-
- The subsequences of *iterable* that are of length *r* can be ordered
- lexicographically. :func:`nth_combination` computes the subsequence at
- sort position *index* directly, without computing the previous
- subsequences.
-
- >>> nth_combination(range(5), 3, 5)
- (0, 3, 4)
-
- ``ValueError`` will be raised If *r* is negative or greater than the length
- of *iterable*.
- ``IndexError`` will be raised if the given *index* is invalid.
- """
- pool = tuple(iterable)
- n = len(pool)
- if (r < 0) or (r > n):
- raise ValueError
-
- c = 1
- k = min(r, n - r)
- for i in range(1, k + 1):
- c = c * (n - k + i) // i
-
- if index < 0:
- index += c
-
- if (index < 0) or (index >= c):
- raise IndexError
-
- result = []
- while r:
- c, n, r = c * r // n, n - 1, r - 1
- while index >= c:
- index -= c
- c, n = c * (n - r) // n, n - 1
- result.append(pool[-1 - n])
-
- return tuple(result)
-
-
-def prepend(value, iterator):
- """Yield *value*, followed by the elements in *iterator*.
-
- >>> value = '0'
- >>> iterator = ['1', '2', '3']
- >>> list(prepend(value, iterator))
- ['0', '1', '2', '3']
-
- To prepend multiple values, see :func:`itertools.chain`
- or :func:`value_chain`.
-
- """
- return chain([value], iterator)
-
-
-def convolve(signal, kernel):
- """Convolve the iterable *signal* with the iterable *kernel*.
-
- >>> signal = (1, 2, 3, 4, 5)
- >>> kernel = [3, 2, 1]
- >>> list(convolve(signal, kernel))
- [3, 8, 14, 20, 26, 14, 5]
-
- Note: the input arguments are not interchangeable, as the *kernel*
- is immediately consumed and stored.
-
- """
- kernel = tuple(kernel)[::-1]
- n = len(kernel)
- window = deque([0], maxlen=n) * n
- for x in chain(signal, repeat(0, n - 1)):
- window.append(x)
- yield sum(map(operator.mul, kernel, window))
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/ordered_set.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/ordered_set.py
deleted file mode 100644
index 1487600..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/ordered_set.py
+++ /dev/null
@@ -1,488 +0,0 @@
-"""
-An OrderedSet is a custom MutableSet that remembers its order, so that every
-entry has an index that can be looked up.
-
-Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
-and released under the MIT license.
-"""
-import itertools as it
-from collections import deque
-
-try:
- # Python 3
- from collections.abc import MutableSet, Sequence
-except ImportError:
- # Python 2.7
- from collections import MutableSet, Sequence
-
-SLICE_ALL = slice(None)
-__version__ = "3.1"
-
-
-def is_iterable(obj):
- """
- Are we being asked to look up a list of things, instead of a single thing?
- We check for the `__iter__` attribute so that this can cover types that
- don't have to be known by this module, such as NumPy arrays.
-
- Strings, however, should be considered as atomic values to look up, not
- iterables. The same goes for tuples, since they are immutable and therefore
- valid entries.
-
- We don't need to check for the Python 2 `unicode` type, because it doesn't
- have an `__iter__` attribute anyway.
- """
- return (
- hasattr(obj, "__iter__")
- and not isinstance(obj, str)
- and not isinstance(obj, tuple)
- )
-
-
-class OrderedSet(MutableSet, Sequence):
- """
- An OrderedSet is a custom MutableSet that remembers its order, so that
- every entry has an index that can be looked up.
-
- Example:
- >>> OrderedSet([1, 1, 2, 3, 2])
- OrderedSet([1, 2, 3])
- """
-
- def __init__(self, iterable=None):
- self.items = []
- self.map = {}
- if iterable is not None:
- self |= iterable
-
- def __len__(self):
- """
- Returns the number of unique elements in the ordered set
-
- Example:
- >>> len(OrderedSet([]))
- 0
- >>> len(OrderedSet([1, 2]))
- 2
- """
- return len(self.items)
-
- def __getitem__(self, index):
- """
- Get the item at a given index.
-
- If `index` is a slice, you will get back that slice of items, as a
- new OrderedSet.
-
- If `index` is a list or a similar iterable, you'll get a list of
- items corresponding to those indices. This is similar to NumPy's
- "fancy indexing". The result is not an OrderedSet because you may ask
- for duplicate indices, and the number of elements returned should be
- the number of elements asked for.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset[1]
- 2
- """
- if isinstance(index, slice) and index == SLICE_ALL:
- return self.copy()
- elif is_iterable(index):
- return [self.items[i] for i in index]
- elif hasattr(index, "__index__") or isinstance(index, slice):
- result = self.items[index]
- if isinstance(result, list):
- return self.__class__(result)
- else:
- return result
- else:
- raise TypeError("Don't know how to index an OrderedSet by %r" % index)
-
- def copy(self):
- """
- Return a shallow copy of this object.
-
- Example:
- >>> this = OrderedSet([1, 2, 3])
- >>> other = this.copy()
- >>> this == other
- True
- >>> this is other
- False
- """
- return self.__class__(self)
-
- def __getstate__(self):
- if len(self) == 0:
- # The state can't be an empty list.
- # We need to return a truthy value, or else __setstate__ won't be run.
- #
- # This could have been done more gracefully by always putting the state
- # in a tuple, but this way is backwards- and forwards- compatible with
- # previous versions of OrderedSet.
- return (None,)
- else:
- return list(self)
-
- def __setstate__(self, state):
- if state == (None,):
- self.__init__([])
- else:
- self.__init__(state)
-
- def __contains__(self, key):
- """
- Test if the item is in this ordered set
-
- Example:
- >>> 1 in OrderedSet([1, 3, 2])
- True
- >>> 5 in OrderedSet([1, 3, 2])
- False
- """
- return key in self.map
-
- def add(self, key):
- """
- Add `key` as an item to this OrderedSet, then return its index.
-
- If `key` is already in the OrderedSet, return the index it already
- had.
-
- Example:
- >>> oset = OrderedSet()
- >>> oset.append(3)
- 0
- >>> print(oset)
- OrderedSet([3])
- """
- if key not in self.map:
- self.map[key] = len(self.items)
- self.items.append(key)
- return self.map[key]
-
- append = add
-
- def update(self, sequence):
- """
- Update the set with the given iterable sequence, then return the index
- of the last element inserted.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.update([3, 1, 5, 1, 4])
- 4
- >>> print(oset)
- OrderedSet([1, 2, 3, 5, 4])
- """
- item_index = None
- try:
- for item in sequence:
- item_index = self.add(item)
- except TypeError:
- raise ValueError(
- "Argument needs to be an iterable, got %s" % type(sequence)
- )
- return item_index
-
- def index(self, key):
- """
- Get the index of a given entry, raising an IndexError if it's not
- present.
-
- `key` can be an iterable of entries that is not a string, in which case
- this returns a list of indices.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.index(2)
- 1
- """
- if is_iterable(key):
- return [self.index(subkey) for subkey in key]
- return self.map[key]
-
- # Provide some compatibility with pd.Index
- get_loc = index
- get_indexer = index
-
- def pop(self):
- """
- Remove and return the last element from the set.
-
- Raises KeyError if the set is empty.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.pop()
- 3
- """
- if not self.items:
- raise KeyError("Set is empty")
-
- elem = self.items[-1]
- del self.items[-1]
- del self.map[elem]
- return elem
-
- def discard(self, key):
- """
- Remove an element. Do not raise an exception if absent.
-
- The MutableSet mixin uses this to implement the .remove() method, which
- *does* raise an error when asked to remove a non-existent item.
-
- Example:
- >>> oset = OrderedSet([1, 2, 3])
- >>> oset.discard(2)
- >>> print(oset)
- OrderedSet([1, 3])
- >>> oset.discard(2)
- >>> print(oset)
- OrderedSet([1, 3])
- """
- if key in self:
- i = self.map[key]
- del self.items[i]
- del self.map[key]
- for k, v in self.map.items():
- if v >= i:
- self.map[k] = v - 1
-
- def clear(self):
- """
- Remove all items from this OrderedSet.
- """
- del self.items[:]
- self.map.clear()
-
- def __iter__(self):
- """
- Example:
- >>> list(iter(OrderedSet([1, 2, 3])))
- [1, 2, 3]
- """
- return iter(self.items)
-
- def __reversed__(self):
- """
- Example:
- >>> list(reversed(OrderedSet([1, 2, 3])))
- [3, 2, 1]
- """
- return reversed(self.items)
-
- def __repr__(self):
- if not self:
- return "%s()" % (self.__class__.__name__,)
- return "%s(%r)" % (self.__class__.__name__, list(self))
-
- def __eq__(self, other):
- """
- Returns true if the containers have the same items. If `other` is a
- Sequence, then order is checked, otherwise it is ignored.
-
- Example:
- >>> oset = OrderedSet([1, 3, 2])
- >>> oset == [1, 3, 2]
- True
- >>> oset == [1, 2, 3]
- False
- >>> oset == [2, 3]
- False
- >>> oset == OrderedSet([3, 2, 1])
- False
- """
- # In Python 2 deque is not a Sequence, so treat it as one for
- # consistent behavior with Python 3.
- if isinstance(other, (Sequence, deque)):
- # Check that this OrderedSet contains the same elements, in the
- # same order, as the other object.
- return list(self) == list(other)
- try:
- other_as_set = set(other)
- except TypeError:
- # If `other` can't be converted into a set, it's not equal.
- return False
- else:
- return set(self) == other_as_set
-
- def union(self, *sets):
- """
- Combines all unique items.
- Each items order is defined by its first appearance.
-
- Example:
- >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
- >>> print(oset)
- OrderedSet([3, 1, 4, 5, 2, 0])
- >>> oset.union([8, 9])
- OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
- >>> oset | {10}
- OrderedSet([3, 1, 4, 5, 2, 0, 10])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- containers = map(list, it.chain([self], sets))
- items = it.chain.from_iterable(containers)
- return cls(items)
-
- def __and__(self, other):
- # the parent implementation of this is backwards
- return self.intersection(other)
-
- def intersection(self, *sets):
- """
- Returns elements in common between all sets. Order is defined only
- by the first set.
-
- Example:
- >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
- >>> print(oset)
- OrderedSet([1, 2, 3])
- >>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
- OrderedSet([2])
- >>> oset.intersection()
- OrderedSet([1, 2, 3])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- if sets:
- common = set.intersection(*map(set, sets))
- items = (item for item in self if item in common)
- else:
- items = self
- return cls(items)
-
- def difference(self, *sets):
- """
- Returns all elements that are in this set but not the others.
-
- Example:
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
- OrderedSet([1, 3])
- >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
- OrderedSet([1])
- >>> OrderedSet([1, 2, 3]) - OrderedSet([2])
- OrderedSet([1, 3])
- >>> OrderedSet([1, 2, 3]).difference()
- OrderedSet([1, 2, 3])
- """
- cls = self.__class__
- if sets:
- other = set.union(*map(set, sets))
- items = (item for item in self if item not in other)
- else:
- items = self
- return cls(items)
-
- def issubset(self, other):
- """
- Report whether another set contains this set.
-
- Example:
- >>> OrderedSet([1, 2, 3]).issubset({1, 2})
- False
- >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
- True
- >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
- False
- """
- if len(self) > len(other): # Fast check for obvious cases
- return False
- return all(item in other for item in self)
-
- def issuperset(self, other):
- """
- Report whether this set contains another set.
-
- Example:
- >>> OrderedSet([1, 2]).issuperset([1, 2, 3])
- False
- >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
- True
- >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
- False
- """
- if len(self) < len(other): # Fast check for obvious cases
- return False
- return all(item in self for item in other)
-
- def symmetric_difference(self, other):
- """
- Return the symmetric difference of two OrderedSets as a new set.
- That is, the new set will contain all elements that are in exactly
- one of the sets.
-
- Their order will be preserved, with elements from `self` preceding
- elements from `other`.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.symmetric_difference(other)
- OrderedSet([4, 5, 9, 2])
- """
- cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
- diff1 = cls(self).difference(other)
- diff2 = cls(other).difference(self)
- return diff1.union(diff2)
-
- def _update_items(self, items):
- """
- Replace the 'items' list of this OrderedSet with a new one, updating
- self.map accordingly.
- """
- self.items = items
- self.map = {item: idx for (idx, item) in enumerate(items)}
-
- def difference_update(self, *sets):
- """
- Update this OrderedSet to remove items from one or more other sets.
-
- Example:
- >>> this = OrderedSet([1, 2, 3])
- >>> this.difference_update(OrderedSet([2, 4]))
- >>> print(this)
- OrderedSet([1, 3])
-
- >>> this = OrderedSet([1, 2, 3, 4, 5])
- >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
- >>> print(this)
- OrderedSet([3, 5])
- """
- items_to_remove = set()
- for other in sets:
- items_to_remove |= set(other)
- self._update_items([item for item in self.items if item not in items_to_remove])
-
- def intersection_update(self, other):
- """
- Update this OrderedSet to keep only items in another set, preserving
- their order in this set.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.intersection_update(other)
- >>> print(this)
- OrderedSet([1, 3, 7])
- """
- other = set(other)
- self._update_items([item for item in self.items if item in other])
-
- def symmetric_difference_update(self, other):
- """
- Update this OrderedSet to remove items from another set, then
- add items from the other set that were not present in this set.
-
- Example:
- >>> this = OrderedSet([1, 4, 3, 5, 7])
- >>> other = OrderedSet([9, 7, 1, 3, 2])
- >>> this.symmetric_difference_update(other)
- >>> print(this)
- OrderedSet([4, 5, 9, 2])
- """
- items_to_add = [item for item in other if item not in self]
- items_to_remove = set(other)
- self._update_items(
- [item for item in self.items if item not in items_to_remove] + items_to_add
- )
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__about__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__about__.py
deleted file mode 100644
index 3551bc2..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__about__.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
-
-__title__ = "packaging"
-__summary__ = "Core utilities for Python packages"
-__uri__ = "https://github.com/pypa/packaging"
-
-__version__ = "21.3"
-
-__author__ = "Donald Stufft and individual contributors"
-__email__ = "donald@stufft.io"
-
-__license__ = "BSD-2-Clause or Apache-2.0"
-__copyright__ = "2014-2019 %s" % __author__
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__init__.py
deleted file mode 100644
index 3c50c5d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__init__.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-from .__about__ import (
- __author__,
- __copyright__,
- __email__,
- __license__,
- __summary__,
- __title__,
- __uri__,
- __version__,
-)
-
-__all__ = [
- "__title__",
- "__summary__",
- "__uri__",
- "__version__",
- "__author__",
- "__email__",
- "__license__",
- "__copyright__",
-]
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-311.pyc
deleted file mode 100644
index a3c748c..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 7da354c..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_manylinux.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_manylinux.cpython-311.pyc
deleted file mode 100644
index 18f5613..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_manylinux.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_musllinux.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_musllinux.cpython-311.pyc
deleted file mode 100644
index 5284f6a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_musllinux.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-311.pyc
deleted file mode 100644
index e813336..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-311.pyc
deleted file mode 100644
index aa44949..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-311.pyc
deleted file mode 100644
index 63e418f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-311.pyc
deleted file mode 100644
index 66e8935..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/tags.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/tags.cpython-311.pyc
deleted file mode 100644
index eff3ace..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/tags.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-311.pyc
deleted file mode 100644
index f591d4d..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-311.pyc
deleted file mode 100644
index 12eff75..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_manylinux.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_manylinux.py
deleted file mode 100644
index 4c379aa..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_manylinux.py
+++ /dev/null
@@ -1,301 +0,0 @@
-import collections
-import functools
-import os
-import re
-import struct
-import sys
-import warnings
-from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
-
-
-# Python does not provide platform information at sufficient granularity to
-# identify the architecture of the running executable in some cases, so we
-# determine it dynamically by reading the information from the running
-# process. This only applies on Linux, which uses the ELF format.
-class _ELFFileHeader:
- # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
- class _InvalidELFFileHeader(ValueError):
- """
- An invalid ELF file header was found.
- """
-
- ELF_MAGIC_NUMBER = 0x7F454C46
- ELFCLASS32 = 1
- ELFCLASS64 = 2
- ELFDATA2LSB = 1
- ELFDATA2MSB = 2
- EM_386 = 3
- EM_S390 = 22
- EM_ARM = 40
- EM_X86_64 = 62
- EF_ARM_ABIMASK = 0xFF000000
- EF_ARM_ABI_VER5 = 0x05000000
- EF_ARM_ABI_FLOAT_HARD = 0x00000400
-
- def __init__(self, file: IO[bytes]) -> None:
- def unpack(fmt: str) -> int:
- try:
- data = file.read(struct.calcsize(fmt))
- result: Tuple[int, ...] = struct.unpack(fmt, data)
- except struct.error:
- raise _ELFFileHeader._InvalidELFFileHeader()
- return result[0]
-
- self.e_ident_magic = unpack(">I")
- if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_class = unpack("B")
- if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_data = unpack("B")
- if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
- raise _ELFFileHeader._InvalidELFFileHeader()
- self.e_ident_version = unpack("B")
- self.e_ident_osabi = unpack("B")
- self.e_ident_abiversion = unpack("B")
- self.e_ident_pad = file.read(7)
- format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
- format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
- format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
- format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
- self.e_type = unpack(format_h)
- self.e_machine = unpack(format_h)
- self.e_version = unpack(format_i)
- self.e_entry = unpack(format_p)
- self.e_phoff = unpack(format_p)
- self.e_shoff = unpack(format_p)
- self.e_flags = unpack(format_i)
- self.e_ehsize = unpack(format_h)
- self.e_phentsize = unpack(format_h)
- self.e_phnum = unpack(format_h)
- self.e_shentsize = unpack(format_h)
- self.e_shnum = unpack(format_h)
- self.e_shstrndx = unpack(format_h)
-
-
-def _get_elf_header() -> Optional[_ELFFileHeader]:
- try:
- with open(sys.executable, "rb") as f:
- elf_header = _ELFFileHeader(f)
- except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
- return None
- return elf_header
-
-
-def _is_linux_armhf() -> bool:
- # hard-float ABI can be detected from the ELF header of the running
- # process
- # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
- elf_header = _get_elf_header()
- if elf_header is None:
- return False
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
- result &= elf_header.e_machine == elf_header.EM_ARM
- result &= (
- elf_header.e_flags & elf_header.EF_ARM_ABIMASK
- ) == elf_header.EF_ARM_ABI_VER5
- result &= (
- elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
- ) == elf_header.EF_ARM_ABI_FLOAT_HARD
- return result
-
-
-def _is_linux_i686() -> bool:
- elf_header = _get_elf_header()
- if elf_header is None:
- return False
- result = elf_header.e_ident_class == elf_header.ELFCLASS32
- result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
- result &= elf_header.e_machine == elf_header.EM_386
- return result
-
-
-def _have_compatible_abi(arch: str) -> bool:
- if arch == "armv7l":
- return _is_linux_armhf()
- if arch == "i686":
- return _is_linux_i686()
- return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
-
-
-# If glibc ever changes its major version, we need to know what the last
-# minor version was, so we can build the complete list of all versions.
-# For now, guess what the highest minor version might be, assume it will
-# be 50 for testing. Once this actually happens, update the dictionary
-# with the actual value.
-_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
-
-
-class _GLibCVersion(NamedTuple):
- major: int
- minor: int
-
-
-def _glibc_version_string_confstr() -> Optional[str]:
- """
- Primary implementation of glibc_version_string using os.confstr.
- """
- # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
- # to be broken or missing. This strategy is used in the standard library
- # platform module.
- # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
- try:
- # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
- version_string = os.confstr("CS_GNU_LIBC_VERSION")
- assert version_string is not None
- _, version = version_string.split()
- except (AssertionError, AttributeError, OSError, ValueError):
- # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
- return None
- return version
-
-
-def _glibc_version_string_ctypes() -> Optional[str]:
- """
- Fallback implementation of glibc_version_string using ctypes.
- """
- try:
- import ctypes
- except ImportError:
- return None
-
- # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
- # manpage says, "If filename is NULL, then the returned handle is for the
- # main program". This way we can let the linker do the work to figure out
- # which libc our process is actually using.
- #
- # We must also handle the special case where the executable is not a
- # dynamically linked executable. This can occur when using musl libc,
- # for example. In this situation, dlopen() will error, leading to an
- # OSError. Interestingly, at least in the case of musl, there is no
- # errno set on the OSError. The single string argument used to construct
- # OSError comes from libc itself and is therefore not portable to
- # hard code here. In any case, failure to call dlopen() means we
- # can proceed, so we bail on our attempt.
- try:
- process_namespace = ctypes.CDLL(None)
- except OSError:
- return None
-
- try:
- gnu_get_libc_version = process_namespace.gnu_get_libc_version
- except AttributeError:
- # Symbol doesn't exist -> therefore, we are not linked to
- # glibc.
- return None
-
- # Call gnu_get_libc_version, which returns a string like "2.5"
- gnu_get_libc_version.restype = ctypes.c_char_p
- version_str: str = gnu_get_libc_version()
- # py2 / py3 compatibility:
- if not isinstance(version_str, str):
- version_str = version_str.decode("ascii")
-
- return version_str
-
-
-def _glibc_version_string() -> Optional[str]:
- """Returns glibc version string, or None if not using glibc."""
- return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
-
-
-def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
- """Parse glibc version.
-
- We use a regexp instead of str.split because we want to discard any
- random junk that might come after the minor version -- this might happen
- in patched/forked versions of glibc (e.g. Linaro's version of glibc
- uses version strings like "2.20-2014.11"). See gh-3588.
- """
- m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
- if not m:
- warnings.warn(
- "Expected glibc version with 2 components major.minor,"
- " got: %s" % version_str,
- RuntimeWarning,
- )
- return -1, -1
- return int(m.group("major")), int(m.group("minor"))
-
-
-@functools.lru_cache()
-def _get_glibc_version() -> Tuple[int, int]:
- version_str = _glibc_version_string()
- if version_str is None:
- return (-1, -1)
- return _parse_glibc_version(version_str)
-
-
-# From PEP 513, PEP 600
-def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
- sys_glibc = _get_glibc_version()
- if sys_glibc < version:
- return False
- # Check for presence of _manylinux module.
- try:
- import _manylinux # noqa
- except ImportError:
- return True
- if hasattr(_manylinux, "manylinux_compatible"):
- result = _manylinux.manylinux_compatible(version[0], version[1], arch)
- if result is not None:
- return bool(result)
- return True
- if version == _GLibCVersion(2, 5):
- if hasattr(_manylinux, "manylinux1_compatible"):
- return bool(_manylinux.manylinux1_compatible)
- if version == _GLibCVersion(2, 12):
- if hasattr(_manylinux, "manylinux2010_compatible"):
- return bool(_manylinux.manylinux2010_compatible)
- if version == _GLibCVersion(2, 17):
- if hasattr(_manylinux, "manylinux2014_compatible"):
- return bool(_manylinux.manylinux2014_compatible)
- return True
-
-
-_LEGACY_MANYLINUX_MAP = {
- # CentOS 7 w/ glibc 2.17 (PEP 599)
- (2, 17): "manylinux2014",
- # CentOS 6 w/ glibc 2.12 (PEP 571)
- (2, 12): "manylinux2010",
- # CentOS 5 w/ glibc 2.5 (PEP 513)
- (2, 5): "manylinux1",
-}
-
-
-def platform_tags(linux: str, arch: str) -> Iterator[str]:
- if not _have_compatible_abi(arch):
- return
- # Oldest glibc to be supported regardless of architecture is (2, 17).
- too_old_glibc2 = _GLibCVersion(2, 16)
- if arch in {"x86_64", "i686"}:
- # On x86/i686 also oldest glibc to be supported is (2, 5).
- too_old_glibc2 = _GLibCVersion(2, 4)
- current_glibc = _GLibCVersion(*_get_glibc_version())
- glibc_max_list = [current_glibc]
- # We can assume compatibility across glibc major versions.
- # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
- #
- # Build a list of maximum glibc versions so that we can
- # output the canonical list of all glibc from current_glibc
- # down to too_old_glibc2, including all intermediary versions.
- for glibc_major in range(current_glibc.major - 1, 1, -1):
- glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
- glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
- for glibc_max in glibc_max_list:
- if glibc_max.major == too_old_glibc2.major:
- min_minor = too_old_glibc2.minor
- else:
- # For other glibc major versions oldest supported is (x, 0).
- min_minor = -1
- for glibc_minor in range(glibc_max.minor, min_minor, -1):
- glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
- tag = "manylinux_{}_{}".format(*glibc_version)
- if _is_compatible(tag, arch, glibc_version):
- yield linux.replace("linux", tag)
- # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
- if glibc_version in _LEGACY_MANYLINUX_MAP:
- legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
- if _is_compatible(legacy_tag, arch, glibc_version):
- yield linux.replace("linux", legacy_tag)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_musllinux.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_musllinux.py
deleted file mode 100644
index 8ac3059..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_musllinux.py
+++ /dev/null
@@ -1,136 +0,0 @@
-"""PEP 656 support.
-
-This module implements logic to detect if the currently running Python is
-linked against musl, and what musl version is used.
-"""
-
-import contextlib
-import functools
-import operator
-import os
-import re
-import struct
-import subprocess
-import sys
-from typing import IO, Iterator, NamedTuple, Optional, Tuple
-
-
-def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
- return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
-
-
-def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
- """Detect musl libc location by parsing the Python executable.
-
- Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
- ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
- """
- f.seek(0)
- try:
- ident = _read_unpacked(f, "16B")
- except struct.error:
- return None
- if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
- return None
- f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
-
- try:
- # e_fmt: Format for program header.
- # p_fmt: Format for section header.
- # p_idx: Indexes to find p_type, p_offset, and p_filesz.
- e_fmt, p_fmt, p_idx = {
- 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
- 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
- }[ident[4]]
- except KeyError:
- return None
- else:
- p_get = operator.itemgetter(*p_idx)
-
- # Find the interpreter section and return its content.
- try:
- _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
- except struct.error:
- return None
- for i in range(e_phnum + 1):
- f.seek(e_phoff + e_phentsize * i)
- try:
- p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
- except struct.error:
- return None
- if p_type != 3: # Not PT_INTERP.
- continue
- f.seek(p_offset)
- interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
- if "musl" not in interpreter:
- return None
- return interpreter
- return None
-
-
-class _MuslVersion(NamedTuple):
- major: int
- minor: int
-
-
-def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
- lines = [n for n in (n.strip() for n in output.splitlines()) if n]
- if len(lines) < 2 or lines[0][:4] != "musl":
- return None
- m = re.match(r"Version (\d+)\.(\d+)", lines[1])
- if not m:
- return None
- return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
-
-
-@functools.lru_cache()
-def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
- """Detect currently-running musl runtime version.
-
- This is done by checking the specified executable's dynamic linking
- information, and invoking the loader to parse its output for a version
- string. If the loader is musl, the output would be something like::
-
- musl libc (x86_64)
- Version 1.2.2
- Dynamic Program Loader
- """
- with contextlib.ExitStack() as stack:
- try:
- f = stack.enter_context(open(executable, "rb"))
- except OSError:
- return None
- ld = _parse_ld_musl_from_elf(f)
- if not ld:
- return None
- proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
- return _parse_musl_version(proc.stderr)
-
-
-def platform_tags(arch: str) -> Iterator[str]:
- """Generate musllinux tags compatible to the current platform.
-
- :param arch: Should be the part of platform tag after the ``linux_``
- prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
- prerequisite for the current platform to be musllinux-compatible.
-
- :returns: An iterator of compatible musllinux tags.
- """
- sys_musl = _get_musl_version(sys.executable)
- if sys_musl is None: # Python not dynamically linked against musl.
- return
- for minor in range(sys_musl.minor, -1, -1):
- yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
-
-
-if __name__ == "__main__": # pragma: no cover
- import sysconfig
-
- plat = sysconfig.get_platform()
- assert plat.startswith("linux-"), "not linux"
-
- print("plat:", plat)
- print("musl:", _get_musl_version(sys.executable))
- print("tags:", end=" ")
- for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
- print(t, end="\n ")
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_structures.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_structures.py
deleted file mode 100644
index 90a6465..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/_structures.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-
-class InfinityType:
- def __repr__(self) -> str:
- return "Infinity"
-
- def __hash__(self) -> int:
- return hash(repr(self))
-
- def __lt__(self, other: object) -> bool:
- return False
-
- def __le__(self, other: object) -> bool:
- return False
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, self.__class__)
-
- def __gt__(self, other: object) -> bool:
- return True
-
- def __ge__(self, other: object) -> bool:
- return True
-
- def __neg__(self: object) -> "NegativeInfinityType":
- return NegativeInfinity
-
-
-Infinity = InfinityType()
-
-
-class NegativeInfinityType:
- def __repr__(self) -> str:
- return "-Infinity"
-
- def __hash__(self) -> int:
- return hash(repr(self))
-
- def __lt__(self, other: object) -> bool:
- return True
-
- def __le__(self, other: object) -> bool:
- return True
-
- def __eq__(self, other: object) -> bool:
- return isinstance(other, self.__class__)
-
- def __gt__(self, other: object) -> bool:
- return False
-
- def __ge__(self, other: object) -> bool:
- return False
-
- def __neg__(self: object) -> InfinityType:
- return Infinity
-
-
-NegativeInfinity = NegativeInfinityType()
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/markers.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/markers.py
deleted file mode 100644
index eb0541b..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/markers.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import operator
-import os
-import platform
-import sys
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
-
-from setuptools.extern.pyparsing import ( # noqa: N817
- Forward,
- Group,
- Literal as L,
- ParseException,
- ParseResults,
- QuotedString,
- ZeroOrMore,
- stringEnd,
- stringStart,
-)
-
-from .specifiers import InvalidSpecifier, Specifier
-
-__all__ = [
- "InvalidMarker",
- "UndefinedComparison",
- "UndefinedEnvironmentName",
- "Marker",
- "default_environment",
-]
-
-Operator = Callable[[str, str], bool]
-
-
-class InvalidMarker(ValueError):
- """
- An invalid marker was found, users should refer to PEP 508.
- """
-
-
-class UndefinedComparison(ValueError):
- """
- An invalid operation was attempted on a value that doesn't support it.
- """
-
-
-class UndefinedEnvironmentName(ValueError):
- """
- A name was attempted to be used that does not exist inside of the
- environment.
- """
-
-
-class Node:
- def __init__(self, value: Any) -> None:
- self.value = value
-
- def __str__(self) -> str:
- return str(self.value)
-
- def __repr__(self) -> str:
- return f"<{self.__class__.__name__}('{self}')>"
-
- def serialize(self) -> str:
- raise NotImplementedError
-
-
-class Variable(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-class Value(Node):
- def serialize(self) -> str:
- return f'"{self}"'
-
-
-class Op(Node):
- def serialize(self) -> str:
- return str(self)
-
-
-VARIABLE = (
- L("implementation_version")
- | L("platform_python_implementation")
- | L("implementation_name")
- | L("python_full_version")
- | L("platform_release")
- | L("platform_version")
- | L("platform_machine")
- | L("platform_system")
- | L("python_version")
- | L("sys_platform")
- | L("os_name")
- | L("os.name") # PEP-345
- | L("sys.platform") # PEP-345
- | L("platform.version") # PEP-345
- | L("platform.machine") # PEP-345
- | L("platform.python_implementation") # PEP-345
- | L("python_implementation") # undocumented setuptools legacy
- | L("extra") # PEP-508
-)
-ALIASES = {
- "os.name": "os_name",
- "sys.platform": "sys_platform",
- "platform.version": "platform_version",
- "platform.machine": "platform_machine",
- "platform.python_implementation": "platform_python_implementation",
- "python_implementation": "platform_python_implementation",
-}
-VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
-
-VERSION_CMP = (
- L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
-)
-
-MARKER_OP = VERSION_CMP | L("not in") | L("in")
-MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
-
-MARKER_VALUE = QuotedString("'") | QuotedString('"')
-MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
-
-BOOLOP = L("and") | L("or")
-
-MARKER_VAR = VARIABLE | MARKER_VALUE
-
-MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
-MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
-
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-
-MARKER_EXPR = Forward()
-MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
-MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
-
-MARKER = stringStart + MARKER_EXPR + stringEnd
-
-
-def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
- if isinstance(results, ParseResults):
- return [_coerce_parse_result(i) for i in results]
- else:
- return results
-
-
-def _format_marker(
- marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
-) -> str:
-
- assert isinstance(marker, (list, tuple, str))
-
- # Sometimes we have a structure like [[...]] which is a single item list
- # where the single item is itself it's own list. In that case we want skip
- # the rest of this function so that we don't get extraneous () on the
- # outside.
- if (
- isinstance(marker, list)
- and len(marker) == 1
- and isinstance(marker[0], (list, tuple))
- ):
- return _format_marker(marker[0])
-
- if isinstance(marker, list):
- inner = (_format_marker(m, first=False) for m in marker)
- if first:
- return " ".join(inner)
- else:
- return "(" + " ".join(inner) + ")"
- elif isinstance(marker, tuple):
- return " ".join([m.serialize() for m in marker])
- else:
- return marker
-
-
-_operators: Dict[str, Operator] = {
- "in": lambda lhs, rhs: lhs in rhs,
- "not in": lambda lhs, rhs: lhs not in rhs,
- "<": operator.lt,
- "<=": operator.le,
- "==": operator.eq,
- "!=": operator.ne,
- ">=": operator.ge,
- ">": operator.gt,
-}
-
-
-def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
- try:
- spec = Specifier("".join([op.serialize(), rhs]))
- except InvalidSpecifier:
- pass
- else:
- return spec.contains(lhs)
-
- oper: Optional[Operator] = _operators.get(op.serialize())
- if oper is None:
- raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
-
- return oper(lhs, rhs)
-
-
-class Undefined:
- pass
-
-
-_undefined = Undefined()
-
-
-def _get_env(environment: Dict[str, str], name: str) -> str:
- value: Union[str, Undefined] = environment.get(name, _undefined)
-
- if isinstance(value, Undefined):
- raise UndefinedEnvironmentName(
- f"{name!r} does not exist in evaluation environment."
- )
-
- return value
-
-
-def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
- groups: List[List[bool]] = [[]]
-
- for marker in markers:
- assert isinstance(marker, (list, tuple, str))
-
- if isinstance(marker, list):
- groups[-1].append(_evaluate_markers(marker, environment))
- elif isinstance(marker, tuple):
- lhs, op, rhs = marker
-
- if isinstance(lhs, Variable):
- lhs_value = _get_env(environment, lhs.value)
- rhs_value = rhs.value
- else:
- lhs_value = lhs.value
- rhs_value = _get_env(environment, rhs.value)
-
- groups[-1].append(_eval_op(lhs_value, op, rhs_value))
- else:
- assert marker in ["and", "or"]
- if marker == "or":
- groups.append([])
-
- return any(all(item) for item in groups)
-
-
-def format_full_version(info: "sys._version_info") -> str:
- version = "{0.major}.{0.minor}.{0.micro}".format(info)
- kind = info.releaselevel
- if kind != "final":
- version += kind[0] + str(info.serial)
- return version
-
-
-def default_environment() -> Dict[str, str]:
- iver = format_full_version(sys.implementation.version)
- implementation_name = sys.implementation.name
- return {
- "implementation_name": implementation_name,
- "implementation_version": iver,
- "os_name": os.name,
- "platform_machine": platform.machine(),
- "platform_release": platform.release(),
- "platform_system": platform.system(),
- "platform_version": platform.version(),
- "python_full_version": platform.python_version(),
- "platform_python_implementation": platform.python_implementation(),
- "python_version": ".".join(platform.python_version_tuple()[:2]),
- "sys_platform": sys.platform,
- }
-
-
-class Marker:
- def __init__(self, marker: str) -> None:
- try:
- self._markers = _coerce_parse_result(MARKER.parseString(marker))
- except ParseException as e:
- raise InvalidMarker(
- f"Invalid marker: {marker!r}, parse error at "
- f"{marker[e.loc : e.loc + 8]!r}"
- )
-
- def __str__(self) -> str:
- return _format_marker(self._markers)
-
- def __repr__(self) -> str:
- return f"<Marker('{self}')>"
-
- def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
- """Evaluate a marker.
-
- Return the boolean from evaluating the given marker against the
- environment. environment is an optional argument to override all or
- part of the determined environment.
-
- The environment is determined from the current Python process.
- """
- current_environment = default_environment()
- if environment is not None:
- current_environment.update(environment)
-
- return _evaluate_markers(self._markers, current_environment)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/requirements.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/requirements.py
deleted file mode 100644
index 0d93231..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/requirements.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-import string
-import urllib.parse
-from typing import List, Optional as TOptional, Set
-
-from setuptools.extern.pyparsing import ( # noqa
- Combine,
- Literal as L,
- Optional,
- ParseException,
- Regex,
- Word,
- ZeroOrMore,
- originalTextFor,
- stringEnd,
- stringStart,
-)
-
-from .markers import MARKER_EXPR, Marker
-from .specifiers import LegacySpecifier, Specifier, SpecifierSet
-
-
-class InvalidRequirement(ValueError):
- """
- An invalid requirement was found, users should refer to PEP 508.
- """
-
-
-ALPHANUM = Word(string.ascii_letters + string.digits)
-
-LBRACKET = L("[").suppress()
-RBRACKET = L("]").suppress()
-LPAREN = L("(").suppress()
-RPAREN = L(")").suppress()
-COMMA = L(",").suppress()
-SEMICOLON = L(";").suppress()
-AT = L("@").suppress()
-
-PUNCTUATION = Word("-_.")
-IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
-IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
-
-NAME = IDENTIFIER("name")
-EXTRA = IDENTIFIER
-
-URI = Regex(r"[^ ]+")("url")
-URL = AT + URI
-
-EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
-EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
-
-VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
-VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
-
-VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
-VERSION_MANY = Combine(
- VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
-)("_raw_spec")
-_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
-_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
-
-VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
-VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
-
-MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
-MARKER_EXPR.setParseAction(
- lambda s, l, t: Marker(s[t._original_start : t._original_end])
-)
-MARKER_SEPARATOR = SEMICOLON
-MARKER = MARKER_SEPARATOR + MARKER_EXPR
-
-VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
-URL_AND_MARKER = URL + Optional(MARKER)
-
-NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
-
-REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
-# setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
-# issue #104
-REQUIREMENT.parseString("x[]")
-
-
-class Requirement:
- """Parse a requirement.
-
- Parse a given requirement string into its parts, such as name, specifier,
- URL, and extras. Raises InvalidRequirement on a badly-formed requirement
- string.
- """
-
- # TODO: Can we test whether something is contained within a requirement?
- # If so how do we do that? Do we need to test against the _name_ of
- # the thing as well as the version? What about the markers?
- # TODO: Can we normalize the name and extra name?
-
- def __init__(self, requirement_string: str) -> None:
- try:
- req = REQUIREMENT.parseString(requirement_string)
- except ParseException as e:
- raise InvalidRequirement(
- f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
- )
-
- self.name: str = req.name
- if req.url:
- parsed_url = urllib.parse.urlparse(req.url)
- if parsed_url.scheme == "file":
- if urllib.parse.urlunparse(parsed_url) != req.url:
- raise InvalidRequirement("Invalid URL given")
- elif not (parsed_url.scheme and parsed_url.netloc) or (
- not parsed_url.scheme and not parsed_url.netloc
- ):
- raise InvalidRequirement(f"Invalid URL: {req.url}")
- self.url: TOptional[str] = req.url
- else:
- self.url = None
- self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
- self.specifier: SpecifierSet = SpecifierSet(req.specifier)
- self.marker: TOptional[Marker] = req.marker if req.marker else None
-
- def __str__(self) -> str:
- parts: List[str] = [self.name]
-
- if self.extras:
- formatted_extras = ",".join(sorted(self.extras))
- parts.append(f"[{formatted_extras}]")
-
- if self.specifier:
- parts.append(str(self.specifier))
-
- if self.url:
- parts.append(f"@ {self.url}")
- if self.marker:
- parts.append(" ")
-
- if self.marker:
- parts.append(f"; {self.marker}")
-
- return "".join(parts)
-
- def __repr__(self) -> str:
- return f"<Requirement('{self}')>"
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/specifiers.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/specifiers.py
deleted file mode 100644
index 0e218a6..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/specifiers.py
+++ /dev/null
@@ -1,802 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import abc
-import functools
-import itertools
-import re
-import warnings
-from typing import (
- Callable,
- Dict,
- Iterable,
- Iterator,
- List,
- Optional,
- Pattern,
- Set,
- Tuple,
- TypeVar,
- Union,
-)
-
-from .utils import canonicalize_version
-from .version import LegacyVersion, Version, parse
-
-ParsedVersion = Union[Version, LegacyVersion]
-UnparsedVersion = Union[Version, LegacyVersion, str]
-VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
-CallableOperator = Callable[[ParsedVersion, str], bool]
-
-
-class InvalidSpecifier(ValueError):
- """
- An invalid specifier was found, users should refer to PEP 440.
- """
-
-
-class BaseSpecifier(metaclass=abc.ABCMeta):
- @abc.abstractmethod
- def __str__(self) -> str:
- """
- Returns the str representation of this Specifier like object. This
- should be representative of the Specifier itself.
- """
-
- @abc.abstractmethod
- def __hash__(self) -> int:
- """
- Returns a hash value for this Specifier like object.
- """
-
- @abc.abstractmethod
- def __eq__(self, other: object) -> bool:
- """
- Returns a boolean representing whether or not the two Specifier like
- objects are equal.
- """
-
- @abc.abstractproperty
- def prereleases(self) -> Optional[bool]:
- """
- Returns whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- """
- Sets whether or not pre-releases as a whole are allowed by this
- specifier.
- """
-
- @abc.abstractmethod
- def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
- """
- Determines if the given item is contained within this specifier.
- """
-
- @abc.abstractmethod
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
- """
- Takes an iterable of items and filters them so that only items which
- are contained within this specifier are allowed in it.
- """
-
-
-class _IndividualSpecifier(BaseSpecifier):
-
- _operators: Dict[str, str] = {}
- _regex: Pattern[str]
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- match = self._regex.search(spec)
- if not match:
- raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
-
- self._spec: Tuple[str, str] = (
- match.group("operator").strip(),
- match.group("version").strip(),
- )
-
- # Store whether or not this Specifier should accept prereleases
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
-
- def __str__(self) -> str:
- return "{}{}".format(*self._spec)
-
- @property
- def _canonical_spec(self) -> Tuple[str, str]:
- return self._spec[0], canonicalize_version(self._spec[1])
-
- def __hash__(self) -> int:
- return hash(self._canonical_spec)
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, str):
- try:
- other = self.__class__(str(other))
- except InvalidSpecifier:
- return NotImplemented
- elif not isinstance(other, self.__class__):
- return NotImplemented
-
- return self._canonical_spec == other._canonical_spec
-
- def _get_operator(self, op: str) -> CallableOperator:
- operator_callable: CallableOperator = getattr(
- self, f"_compare_{self._operators[op]}"
- )
- return operator_callable
-
- def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
- if not isinstance(version, (LegacyVersion, Version)):
- version = parse(version)
- return version
-
- @property
- def operator(self) -> str:
- return self._spec[0]
-
- @property
- def version(self) -> str:
- return self._spec[1]
-
- @property
- def prereleases(self) -> Optional[bool]:
- return self._prereleases
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: str) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Determine if prereleases are to be allowed or not.
- if prereleases is None:
- prereleases = self.prereleases
-
- # Normalize item to a Version or LegacyVersion, this allows us to have
- # a shortcut for ``"2.0" in Specifier(">=2")
- normalized_item = self._coerce_version(item)
-
- # Determine if we should be supporting prereleases in this specifier
- # or not, if we do not support prereleases than we can short circuit
- # logic if this version is a prereleases.
- if normalized_item.is_prerelease and not prereleases:
- return False
-
- # Actually do the comparison to determine if this item is contained
- # within this Specifier or not.
- operator_callable: CallableOperator = self._get_operator(self.operator)
- return operator_callable(normalized_item, self.version)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- yielded = False
- found_prereleases = []
-
- kw = {"prereleases": prereleases if prereleases is not None else True}
-
- # Attempt to iterate over all the values in the iterable and if any of
- # them match, yield them.
- for version in iterable:
- parsed_version = self._coerce_version(version)
-
- if self.contains(parsed_version, **kw):
- # If our version is a prerelease, and we were not set to allow
- # prereleases, then we'll store it for later in case nothing
- # else matches this specifier.
- if parsed_version.is_prerelease and not (
- prereleases or self.prereleases
- ):
- found_prereleases.append(version)
- # Either this is not a prerelease, or we should have been
- # accepting prereleases from the beginning.
- else:
- yielded = True
- yield version
-
- # Now that we've iterated over everything, determine if we've yielded
- # any values, and if we have not and we have any prereleases stored up
- # then we will go ahead and yield the prereleases.
- if not yielded and found_prereleases:
- for version in found_prereleases:
- yield version
-
-
-class LegacySpecifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P<operator>(==|!=|<=|>=|<|>))
- \s*
- (?P<version>
- [^,;\s)]* # Since this is a "legacy" specifier, and the version
- # string can be just about anything, we match everything
- # except for whitespace, a semi-colon for marker support,
- # a closing paren since versions can be enclosed in
- # them, and a comma since it's a version separator.
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- }
-
- def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
- super().__init__(spec, prereleases)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
- if not isinstance(version, LegacyVersion):
- version = LegacyVersion(str(version))
- return version
-
- def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective == self._coerce_version(spec)
-
- def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective != self._coerce_version(spec)
-
- def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective <= self._coerce_version(spec)
-
- def _compare_greater_than_equal(
- self, prospective: LegacyVersion, spec: str
- ) -> bool:
- return prospective >= self._coerce_version(spec)
-
- def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective < self._coerce_version(spec)
-
- def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
- return prospective > self._coerce_version(spec)
-
-
-def _require_version_compare(
- fn: Callable[["Specifier", ParsedVersion, str], bool]
-) -> Callable[["Specifier", ParsedVersion, str], bool]:
- @functools.wraps(fn)
- def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
- if not isinstance(prospective, Version):
- return False
- return fn(self, prospective, spec)
-
- return wrapped
-
-
-class Specifier(_IndividualSpecifier):
-
- _regex_str = r"""
- (?P<operator>(~=|==|!=|<=|>=|<|>|===))
- (?P<version>
- (?:
- # The identity operators allow for an escape hatch that will
- # do an exact string match of the version you wish to install.
- # This will not be parsed by PEP 440 and we cannot determine
- # any semantic meaning from it. This operator is discouraged
- # but included entirely as an escape hatch.
- (?<====) # Only match for the identity operator
- \s*
- [^\s]* # We just match everything, except for whitespace
- # since we are only testing for strict identity.
- )
- |
- (?:
- # The (non)equality operators allow for wild card and local
- # versions to be specified so we have to define these two
- # operators separately to enable that.
- (?<===|!=) # Only match for equals and not equals
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
-
- # You cannot use a wild card and a dev or local version
- # together so group them with a | and make them optional.
- (?:
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
- |
- \.\* # Wild card syntax of .*
- )?
- )
- |
- (?:
- # The compatible operator requires at least two digits in the
- # release segment.
- (?<=~=) # Only match for the compatible operator
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- |
- (?:
- # All other operators only allow a sub set of what the
- # (non)equality operators do. Specifically they do not allow
- # local versions to be specified nor do they allow the prefix
- # matching wild cards.
- (?<!==|!=|~=) # We have special cases for these
- # operators so we want to make sure they
- # don't match here.
-
- \s*
- v?
- (?:[0-9]+!)? # epoch
- [0-9]+(?:\.[0-9]+)* # release
- (?: # pre release
- [-_\.]?
- (a|b|c|rc|alpha|beta|pre|preview)
- [-_\.]?
- [0-9]*
- )?
- (?: # post release
- (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
- )?
- (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
- )
- )
- """
-
- _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- _operators = {
- "~=": "compatible",
- "==": "equal",
- "!=": "not_equal",
- "<=": "less_than_equal",
- ">=": "greater_than_equal",
- "<": "less_than",
- ">": "greater_than",
- "===": "arbitrary",
- }
-
- @_require_version_compare
- def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # Compatible releases have an equivalent combination of >= and ==. That
- # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
- # implement this in terms of the other specifiers instead of
- # implementing it ourselves. The only thing we need to do is construct
- # the other specifiers.
-
- # We want everything but the last item in the version, but we want to
- # ignore suffix segments.
- prefix = ".".join(
- list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
- )
-
- # Add the prefix notation to the end of our string
- prefix += ".*"
-
- return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
- prospective, prefix
- )
-
- @_require_version_compare
- def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # We need special logic to handle prefix matching
- if spec.endswith(".*"):
- # In the case of prefix matching we want to ignore local segment.
- prospective = Version(prospective.public)
- # Split the spec out by dots, and pretend that there is an implicit
- # dot in between a release segment and a pre-release segment.
- split_spec = _version_split(spec[:-2]) # Remove the trailing .*
-
- # Split the prospective version out by dots, and pretend that there
- # is an implicit dot in between a release segment and a pre-release
- # segment.
- split_prospective = _version_split(str(prospective))
-
- # Shorten the prospective version to be the same length as the spec
- # so that we can determine if the specifier is a prefix of the
- # prospective version or not.
- shortened_prospective = split_prospective[: len(split_spec)]
-
- # Pad out our two sides with zeros so that they both equal the same
- # length.
- padded_spec, padded_prospective = _pad_version(
- split_spec, shortened_prospective
- )
-
- return padded_prospective == padded_spec
- else:
- # Convert our spec string into a Version
- spec_version = Version(spec)
-
- # If the specifier does not have a local segment, then we want to
- # act as if the prospective version also does not have a local
- # segment.
- if not spec_version.local:
- prospective = Version(prospective.public)
-
- return prospective == spec_version
-
- @_require_version_compare
- def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
- return not self._compare_equal(prospective, spec)
-
- @_require_version_compare
- def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) <= Version(spec)
-
- @_require_version_compare
- def _compare_greater_than_equal(
- self, prospective: ParsedVersion, spec: str
- ) -> bool:
-
- # NB: Local version identifiers are NOT permitted in the version
- # specifier, so local version labels can be universally removed from
- # the prospective version.
- return Version(prospective.public) >= Version(spec)
-
- @_require_version_compare
- def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is less than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective < spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a pre-release version, that we do not accept pre-release
- # versions for the version mentioned in the specifier (e.g. <3.1 should
- # not match 3.1.dev0, but should match 3.0.dev0).
- if not spec.is_prerelease and prospective.is_prerelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # less than the spec version *and* it's not a pre-release of the same
- # version in the spec.
- return True
-
- @_require_version_compare
- def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
-
- # Convert our spec to a Version instance, since we'll want to work with
- # it as a version.
- spec = Version(spec_str)
-
- # Check to see if the prospective version is greater than the spec
- # version. If it's not we can short circuit and just return False now
- # instead of doing extra unneeded work.
- if not prospective > spec:
- return False
-
- # This special case is here so that, unless the specifier itself
- # includes is a post-release version, that we do not accept
- # post-release versions for the version mentioned in the specifier
- # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
- if not spec.is_postrelease and prospective.is_postrelease:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # Ensure that we do not allow a local version of the version mentioned
- # in the specifier, which is technically greater than, to match.
- if prospective.local is not None:
- if Version(prospective.base_version) == Version(spec.base_version):
- return False
-
- # If we've gotten to here, it means that prospective version is both
- # greater than the spec version *and* it's not a pre-release of the
- # same version in the spec.
- return True
-
- def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
- return str(prospective).lower() == str(spec).lower()
-
- @property
- def prereleases(self) -> bool:
-
- # If there is an explicit prereleases set for this, then we'll just
- # blindly use that.
- if self._prereleases is not None:
- return self._prereleases
-
- # Look at all of our specifiers and determine if they are inclusive
- # operators, and if they are if they are including an explicit
- # prerelease.
- operator, version = self._spec
- if operator in ["==", ">=", "<=", "~=", "==="]:
- # The == specifier can include a trailing .*, if it does we
- # want to remove before parsing.
- if operator == "==" and version.endswith(".*"):
- version = version[:-2]
-
- # Parse the version, and if it is a pre-release than this
- # specifier allows pre-releases.
- if parse(version).is_prerelease:
- return True
-
- return False
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
-
-_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
-
-
-def _version_split(version: str) -> List[str]:
- result: List[str] = []
- for item in version.split("."):
- match = _prefix_regex.search(item)
- if match:
- result.extend(match.groups())
- else:
- result.append(item)
- return result
-
-
-def _is_not_suffix(segment: str) -> bool:
- return not any(
- segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
- )
-
-
-def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
- left_split, right_split = [], []
-
- # Get the release segment of our versions
- left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
- right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
-
- # Get the rest of our versions
- left_split.append(left[len(left_split[0]) :])
- right_split.append(right[len(right_split[0]) :])
-
- # Insert our padding
- left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
- right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
-
- return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
-
-
-class SpecifierSet(BaseSpecifier):
- def __init__(
- self, specifiers: str = "", prereleases: Optional[bool] = None
- ) -> None:
-
- # Split on , to break each individual specifier into it's own item, and
- # strip each item to remove leading/trailing whitespace.
- split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
-
- # Parsed each individual specifier, attempting first to make it a
- # Specifier and falling back to a LegacySpecifier.
- parsed: Set[_IndividualSpecifier] = set()
- for specifier in split_specifiers:
- try:
- parsed.add(Specifier(specifier))
- except InvalidSpecifier:
- parsed.add(LegacySpecifier(specifier))
-
- # Turn our parsed specifiers into a frozen set and save them for later.
- self._specs = frozenset(parsed)
-
- # Store our prereleases value so we can use it later to determine if
- # we accept prereleases or not.
- self._prereleases = prereleases
-
- def __repr__(self) -> str:
- pre = (
- f", prereleases={self.prereleases!r}"
- if self._prereleases is not None
- else ""
- )
-
- return f"<SpecifierSet({str(self)!r}{pre})>"
-
- def __str__(self) -> str:
- return ",".join(sorted(str(s) for s in self._specs))
-
- def __hash__(self) -> int:
- return hash(self._specs)
-
- def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
- if isinstance(other, str):
- other = SpecifierSet(other)
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- specifier = SpecifierSet()
- specifier._specs = frozenset(self._specs | other._specs)
-
- if self._prereleases is None and other._prereleases is not None:
- specifier._prereleases = other._prereleases
- elif self._prereleases is not None and other._prereleases is None:
- specifier._prereleases = self._prereleases
- elif self._prereleases == other._prereleases:
- specifier._prereleases = self._prereleases
- else:
- raise ValueError(
- "Cannot combine SpecifierSets with True and False prerelease "
- "overrides."
- )
-
- return specifier
-
- def __eq__(self, other: object) -> bool:
- if isinstance(other, (str, _IndividualSpecifier)):
- other = SpecifierSet(str(other))
- elif not isinstance(other, SpecifierSet):
- return NotImplemented
-
- return self._specs == other._specs
-
- def __len__(self) -> int:
- return len(self._specs)
-
- def __iter__(self) -> Iterator[_IndividualSpecifier]:
- return iter(self._specs)
-
- @property
- def prereleases(self) -> Optional[bool]:
-
- # If we have been given an explicit prerelease modifier, then we'll
- # pass that through here.
- if self._prereleases is not None:
- return self._prereleases
-
- # If we don't have any specifiers, and we don't have a forced value,
- # then we'll just return None since we don't know if this should have
- # pre-releases or not.
- if not self._specs:
- return None
-
- # Otherwise we'll see if any of the given specifiers accept
- # prereleases, if any of them do we'll return True, otherwise False.
- return any(s.prereleases for s in self._specs)
-
- @prereleases.setter
- def prereleases(self, value: bool) -> None:
- self._prereleases = value
-
- def __contains__(self, item: UnparsedVersion) -> bool:
- return self.contains(item)
-
- def contains(
- self, item: UnparsedVersion, prereleases: Optional[bool] = None
- ) -> bool:
-
- # Ensure that our item is a Version or LegacyVersion instance.
- if not isinstance(item, (LegacyVersion, Version)):
- item = parse(item)
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # We can determine if we're going to allow pre-releases by looking to
- # see if any of the underlying items supports them. If none of them do
- # and this item is a pre-release then we do not allow it and we can
- # short circuit that here.
- # Note: This means that 1.0.dev1 would not be contained in something
- # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
- if not prereleases and item.is_prerelease:
- return False
-
- # We simply dispatch to the underlying specs here to make sure that the
- # given version is contained within all of them.
- # Note: This use of all() here means that an empty set of specifiers
- # will always return True, this is an explicit design decision.
- return all(s.contains(item, prereleases=prereleases) for s in self._specs)
-
- def filter(
- self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
- ) -> Iterable[VersionTypeVar]:
-
- # Determine if we're forcing a prerelease or not, if we're not forcing
- # one for this particular filter call, then we'll use whatever the
- # SpecifierSet thinks for whether or not we should support prereleases.
- if prereleases is None:
- prereleases = self.prereleases
-
- # If we have any specifiers, then we want to wrap our iterable in the
- # filter method for each one, this will act as a logical AND amongst
- # each specifier.
- if self._specs:
- for spec in self._specs:
- iterable = spec.filter(iterable, prereleases=bool(prereleases))
- return iterable
- # If we do not have any specifiers, then we need to have a rough filter
- # which will filter out any pre-releases, unless there are no final
- # releases, and which will filter out LegacyVersion in general.
- else:
- filtered: List[VersionTypeVar] = []
- found_prereleases: List[VersionTypeVar] = []
-
- item: UnparsedVersion
- parsed_version: Union[Version, LegacyVersion]
-
- for item in iterable:
- # Ensure that we some kind of Version class for this item.
- if not isinstance(item, (LegacyVersion, Version)):
- parsed_version = parse(item)
- else:
- parsed_version = item
-
- # Filter out any item which is parsed as a LegacyVersion
- if isinstance(parsed_version, LegacyVersion):
- continue
-
- # Store any item which is a pre-release for later unless we've
- # already found a final version or we are accepting prereleases
- if parsed_version.is_prerelease and not prereleases:
- if not filtered:
- found_prereleases.append(item)
- else:
- filtered.append(item)
-
- # If we've found no items except for pre-releases, then we'll go
- # ahead and use the pre-releases
- if not filtered and found_prereleases and prereleases is None:
- return found_prereleases
-
- return filtered
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py
deleted file mode 100644
index 9a3d25a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/tags.py
+++ /dev/null
@@ -1,487 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import logging
-import platform
-import sys
-import sysconfig
-from importlib.machinery import EXTENSION_SUFFIXES
-from typing import (
- Dict,
- FrozenSet,
- Iterable,
- Iterator,
- List,
- Optional,
- Sequence,
- Tuple,
- Union,
- cast,
-)
-
-from . import _manylinux, _musllinux
-
-logger = logging.getLogger(__name__)
-
-PythonVersion = Sequence[int]
-MacVersion = Tuple[int, int]
-
-INTERPRETER_SHORT_NAMES: Dict[str, str] = {
- "python": "py", # Generic.
- "cpython": "cp",
- "pypy": "pp",
- "ironpython": "ip",
- "jython": "jy",
-}
-
-
-_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
-
-
-class Tag:
- """
- A representation of the tag triple for a wheel.
-
- Instances are considered immutable and thus are hashable. Equality checking
- is also supported.
- """
-
- __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
-
- def __init__(self, interpreter: str, abi: str, platform: str) -> None:
- self._interpreter = interpreter.lower()
- self._abi = abi.lower()
- self._platform = platform.lower()
- # The __hash__ of every single element in a Set[Tag] will be evaluated each time
- # that a set calls its `.disjoint()` method, which may be called hundreds of
- # times when scanning a page of links for packages with tags matching that
- # Set[Tag]. Pre-computing the value here produces significant speedups for
- # downstream consumers.
- self._hash = hash((self._interpreter, self._abi, self._platform))
-
- @property
- def interpreter(self) -> str:
- return self._interpreter
-
- @property
- def abi(self) -> str:
- return self._abi
-
- @property
- def platform(self) -> str:
- return self._platform
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, Tag):
- return NotImplemented
-
- return (
- (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
- and (self._platform == other._platform)
- and (self._abi == other._abi)
- and (self._interpreter == other._interpreter)
- )
-
- def __hash__(self) -> int:
- return self._hash
-
- def __str__(self) -> str:
- return f"{self._interpreter}-{self._abi}-{self._platform}"
-
- def __repr__(self) -> str:
- return f"<{self} @ {id(self)}>"
-
-
-def parse_tag(tag: str) -> FrozenSet[Tag]:
- """
- Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
-
- Returning a set is required due to the possibility that the tag is a
- compressed tag set.
- """
- tags = set()
- interpreters, abis, platforms = tag.split("-")
- for interpreter in interpreters.split("."):
- for abi in abis.split("."):
- for platform_ in platforms.split("."):
- tags.add(Tag(interpreter, abi, platform_))
- return frozenset(tags)
-
-
-def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
- value = sysconfig.get_config_var(name)
- if value is None and warn:
- logger.debug(
- "Config variable '%s' is unset, Python ABI tag may be incorrect", name
- )
- return value
-
-
-def _normalize_string(string: str) -> str:
- return string.replace(".", "_").replace("-", "_")
-
-
-def _abi3_applies(python_version: PythonVersion) -> bool:
- """
- Determine if the Python version supports abi3.
-
- PEP 384 was first implemented in Python 3.2.
- """
- return len(python_version) > 1 and tuple(python_version) >= (3, 2)
-
-
-def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
- py_version = tuple(py_version) # To allow for version comparison.
- abis = []
- version = _version_nodot(py_version[:2])
- debug = pymalloc = ucs4 = ""
- with_debug = _get_config_var("Py_DEBUG", warn)
- has_refcount = hasattr(sys, "gettotalrefcount")
- # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
- # extension modules is the best option.
- # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
- has_ext = "_d.pyd" in EXTENSION_SUFFIXES
- if with_debug or (with_debug is None and (has_refcount or has_ext)):
- debug = "d"
- if py_version < (3, 8):
- with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
- if with_pymalloc or with_pymalloc is None:
- pymalloc = "m"
- if py_version < (3, 3):
- unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
- if unicode_size == 4 or (
- unicode_size is None and sys.maxunicode == 0x10FFFF
- ):
- ucs4 = "u"
- elif debug:
- # Debug builds can also load "normal" extension modules.
- # We can also assume no UCS-4 or pymalloc requirement.
- abis.append(f"cp{version}")
- abis.insert(
- 0,
- "cp{version}{debug}{pymalloc}{ucs4}".format(
- version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
- ),
- )
- return abis
-
-
-def cpython_tags(
- python_version: Optional[PythonVersion] = None,
- abis: Optional[Iterable[str]] = None,
- platforms: Optional[Iterable[str]] = None,
- *,
- warn: bool = False,
-) -> Iterator[Tag]:
- """
- Yields the tags for a CPython interpreter.
-
- The tags consist of:
- - cp<python_version>-<abi>-<platform>
- - cp<python_version>-abi3-<platform>
- - cp<python_version>-none-<platform>
- - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
-
- If python_version only specifies a major version then user-provided ABIs and
- the 'none' ABItag will be used.
-
- If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
- their normal position and not at the beginning.
- """
- if not python_version:
- python_version = sys.version_info[:2]
-
- interpreter = f"cp{_version_nodot(python_version[:2])}"
-
- if abis is None:
- if len(python_version) > 1:
- abis = _cpython_abis(python_version, warn)
- else:
- abis = []
- abis = list(abis)
- # 'abi3' and 'none' are explicitly handled later.
- for explicit_abi in ("abi3", "none"):
- try:
- abis.remove(explicit_abi)
- except ValueError:
- pass
-
- platforms = list(platforms or platform_tags())
- for abi in abis:
- for platform_ in platforms:
- yield Tag(interpreter, abi, platform_)
- if _abi3_applies(python_version):
- yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
- yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
-
- if _abi3_applies(python_version):
- for minor_version in range(python_version[1] - 1, 1, -1):
- for platform_ in platforms:
- interpreter = "cp{version}".format(
- version=_version_nodot((python_version[0], minor_version))
- )
- yield Tag(interpreter, "abi3", platform_)
-
-
-def _generic_abi() -> Iterator[str]:
- abi = sysconfig.get_config_var("SOABI")
- if abi:
- yield _normalize_string(abi)
-
-
-def generic_tags(
- interpreter: Optional[str] = None,
- abis: Optional[Iterable[str]] = None,
- platforms: Optional[Iterable[str]] = None,
- *,
- warn: bool = False,
-) -> Iterator[Tag]:
- """
- Yields the tags for a generic interpreter.
-
- The tags consist of:
- - <interpreter>-<abi>-<platform>
-
- The "none" ABI will be added if it was not explicitly provided.
- """
- if not interpreter:
- interp_name = interpreter_name()
- interp_version = interpreter_version(warn=warn)
- interpreter = "".join([interp_name, interp_version])
- if abis is None:
- abis = _generic_abi()
- platforms = list(platforms or platform_tags())
- abis = list(abis)
- if "none" not in abis:
- abis.append("none")
- for abi in abis:
- for platform_ in platforms:
- yield Tag(interpreter, abi, platform_)
-
-
-def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
- """
- Yields Python versions in descending order.
-
- After the latest version, the major-only version will be yielded, and then
- all previous versions of that major version.
- """
- if len(py_version) > 1:
- yield f"py{_version_nodot(py_version[:2])}"
- yield f"py{py_version[0]}"
- if len(py_version) > 1:
- for minor in range(py_version[1] - 1, -1, -1):
- yield f"py{_version_nodot((py_version[0], minor))}"
-
-
-def compatible_tags(
- python_version: Optional[PythonVersion] = None,
- interpreter: Optional[str] = None,
- platforms: Optional[Iterable[str]] = None,
-) -> Iterator[Tag]:
- """
- Yields the sequence of tags that are compatible with a specific version of Python.
-
- The tags consist of:
- - py*-none-<platform>
- - <interpreter>-none-any # ... if `interpreter` is provided.
- - py*-none-any
- """
- if not python_version:
- python_version = sys.version_info[:2]
- platforms = list(platforms or platform_tags())
- for version in _py_interpreter_range(python_version):
- for platform_ in platforms:
- yield Tag(version, "none", platform_)
- if interpreter:
- yield Tag(interpreter, "none", "any")
- for version in _py_interpreter_range(python_version):
- yield Tag(version, "none", "any")
-
-
-def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
- if not is_32bit:
- return arch
-
- if arch.startswith("ppc"):
- return "ppc"
-
- return "i386"
-
-
-def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
- formats = [cpu_arch]
- if cpu_arch == "x86_64":
- if version < (10, 4):
- return []
- formats.extend(["intel", "fat64", "fat32"])
-
- elif cpu_arch == "i386":
- if version < (10, 4):
- return []
- formats.extend(["intel", "fat32", "fat"])
-
- elif cpu_arch == "ppc64":
- # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
- if version > (10, 5) or version < (10, 4):
- return []
- formats.append("fat64")
-
- elif cpu_arch == "ppc":
- if version > (10, 6):
- return []
- formats.extend(["fat32", "fat"])
-
- if cpu_arch in {"arm64", "x86_64"}:
- formats.append("universal2")
-
- if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
- formats.append("universal")
-
- return formats
-
-
-def mac_platforms(
- version: Optional[MacVersion] = None, arch: Optional[str] = None
-) -> Iterator[str]:
- """
- Yields the platform tags for a macOS system.
-
- The `version` parameter is a two-item tuple specifying the macOS version to
- generate platform tags for. The `arch` parameter is the CPU architecture to
- generate platform tags for. Both parameters default to the appropriate value
- for the current system.
- """
- version_str, _, cpu_arch = platform.mac_ver()
- if version is None:
- version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
- else:
- version = version
- if arch is None:
- arch = _mac_arch(cpu_arch)
- else:
- arch = arch
-
- if (10, 0) <= version and version < (11, 0):
- # Prior to Mac OS 11, each yearly release of Mac OS bumped the
- # "minor" version number. The major version was always 10.
- for minor_version in range(version[1], -1, -1):
- compat_version = 10, minor_version
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=10, minor=minor_version, binary_format=binary_format
- )
-
- if version >= (11, 0):
- # Starting with Mac OS 11, each yearly release bumps the major version
- # number. The minor versions are now the midyear updates.
- for major_version in range(version[0], 10, -1):
- compat_version = major_version, 0
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=major_version, minor=0, binary_format=binary_format
- )
-
- if version >= (11, 0):
- # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
- # Arm64 support was introduced in 11.0, so no Arm binaries from previous
- # releases exist.
- #
- # However, the "universal2" binary format can have a
- # macOS version earlier than 11.0 when the x86_64 part of the binary supports
- # that version of macOS.
- if arch == "x86_64":
- for minor_version in range(16, 3, -1):
- compat_version = 10, minor_version
- binary_formats = _mac_binary_formats(compat_version, arch)
- for binary_format in binary_formats:
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=compat_version[0],
- minor=compat_version[1],
- binary_format=binary_format,
- )
- else:
- for minor_version in range(16, 3, -1):
- compat_version = 10, minor_version
- binary_format = "universal2"
- yield "macosx_{major}_{minor}_{binary_format}".format(
- major=compat_version[0],
- minor=compat_version[1],
- binary_format=binary_format,
- )
-
-
-def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
- linux = _normalize_string(sysconfig.get_platform())
- if is_32bit:
- if linux == "linux_x86_64":
- linux = "linux_i686"
- elif linux == "linux_aarch64":
- linux = "linux_armv7l"
- _, arch = linux.split("_", 1)
- yield from _manylinux.platform_tags(linux, arch)
- yield from _musllinux.platform_tags(arch)
- yield linux
-
-
-def _generic_platforms() -> Iterator[str]:
- yield _normalize_string(sysconfig.get_platform())
-
-
-def platform_tags() -> Iterator[str]:
- """
- Provides the platform tags for this installation.
- """
- if platform.system() == "Darwin":
- return mac_platforms()
- elif platform.system() == "Linux":
- return _linux_platforms()
- else:
- return _generic_platforms()
-
-
-def interpreter_name() -> str:
- """
- Returns the name of the running interpreter.
- """
- name = sys.implementation.name
- return INTERPRETER_SHORT_NAMES.get(name) or name
-
-
-def interpreter_version(*, warn: bool = False) -> str:
- """
- Returns the version of the running interpreter.
- """
- version = _get_config_var("py_version_nodot", warn=warn)
- if version:
- version = str(version)
- else:
- version = _version_nodot(sys.version_info[:2])
- return version
-
-
-def _version_nodot(version: PythonVersion) -> str:
- return "".join(map(str, version))
-
-
-def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
- """
- Returns the sequence of tag triples for the running interpreter.
-
- The order of the sequence corresponds to priority order for the
- interpreter, from most to least important.
- """
-
- interp_name = interpreter_name()
- if interp_name == "cp":
- yield from cpython_tags(warn=warn)
- else:
- yield from generic_tags()
-
- if interp_name == "pp":
- yield from compatible_tags(interpreter="pp3")
- else:
- yield from compatible_tags()
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py
deleted file mode 100644
index bab11b8..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/utils.py
+++ /dev/null
@@ -1,136 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import re
-from typing import FrozenSet, NewType, Tuple, Union, cast
-
-from .tags import Tag, parse_tag
-from .version import InvalidVersion, Version
-
-BuildTag = Union[Tuple[()], Tuple[int, str]]
-NormalizedName = NewType("NormalizedName", str)
-
-
-class InvalidWheelFilename(ValueError):
- """
- An invalid wheel filename was found, users should refer to PEP 427.
- """
-
-
-class InvalidSdistFilename(ValueError):
- """
- An invalid sdist filename was found, users should refer to the packaging user guide.
- """
-
-
-_canonicalize_regex = re.compile(r"[-_.]+")
-# PEP 427: The build number must start with a digit.
-_build_tag_regex = re.compile(r"(\d+)(.*)")
-
-
-def canonicalize_name(name: str) -> NormalizedName:
- # This is taken from PEP 503.
- value = _canonicalize_regex.sub("-", name).lower()
- return cast(NormalizedName, value)
-
-
-def canonicalize_version(version: Union[Version, str]) -> str:
- """
- This is very similar to Version.__str__, but has one subtle difference
- with the way it handles the release segment.
- """
- if isinstance(version, str):
- try:
- parsed = Version(version)
- except InvalidVersion:
- # Legacy versions cannot be normalized
- return version
- else:
- parsed = version
-
- parts = []
-
- # Epoch
- if parsed.epoch != 0:
- parts.append(f"{parsed.epoch}!")
-
- # Release segment
- # NB: This strips trailing '.0's to normalize
- parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
-
- # Pre-release
- if parsed.pre is not None:
- parts.append("".join(str(x) for x in parsed.pre))
-
- # Post-release
- if parsed.post is not None:
- parts.append(f".post{parsed.post}")
-
- # Development release
- if parsed.dev is not None:
- parts.append(f".dev{parsed.dev}")
-
- # Local version segment
- if parsed.local is not None:
- parts.append(f"+{parsed.local}")
-
- return "".join(parts)
-
-
-def parse_wheel_filename(
- filename: str,
-) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
- if not filename.endswith(".whl"):
- raise InvalidWheelFilename(
- f"Invalid wheel filename (extension must be '.whl'): {filename}"
- )
-
- filename = filename[:-4]
- dashes = filename.count("-")
- if dashes not in (4, 5):
- raise InvalidWheelFilename(
- f"Invalid wheel filename (wrong number of parts): {filename}"
- )
-
- parts = filename.split("-", dashes - 2)
- name_part = parts[0]
- # See PEP 427 for the rules on escaping the project name
- if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
- raise InvalidWheelFilename(f"Invalid project name: {filename}")
- name = canonicalize_name(name_part)
- version = Version(parts[1])
- if dashes == 5:
- build_part = parts[2]
- build_match = _build_tag_regex.match(build_part)
- if build_match is None:
- raise InvalidWheelFilename(
- f"Invalid build number: {build_part} in '{filename}'"
- )
- build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
- else:
- build = ()
- tags = parse_tag(parts[-1])
- return (name, version, build, tags)
-
-
-def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
- if filename.endswith(".tar.gz"):
- file_stem = filename[: -len(".tar.gz")]
- elif filename.endswith(".zip"):
- file_stem = filename[: -len(".zip")]
- else:
- raise InvalidSdistFilename(
- f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
- f" {filename}"
- )
-
- # We are requiring a PEP 440 version, which cannot contain dashes,
- # so we split on the last dash.
- name_part, sep, version_part = file_stem.rpartition("-")
- if not sep:
- raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
-
- name = canonicalize_name(name_part)
- version = Version(version_part)
- return (name, version)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/version.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/version.py
deleted file mode 100644
index de9a09a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/packaging/version.py
+++ /dev/null
@@ -1,504 +0,0 @@
-# This file is dual licensed under the terms of the Apache License, Version
-# 2.0, and the BSD License. See the LICENSE file in the root of this repository
-# for complete details.
-
-import collections
-import itertools
-import re
-import warnings
-from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
-
-from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
-
-__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
-
-InfiniteTypes = Union[InfinityType, NegativeInfinityType]
-PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
-SubLocalType = Union[InfiniteTypes, int, str]
-LocalType = Union[
- NegativeInfinityType,
- Tuple[
- Union[
- SubLocalType,
- Tuple[SubLocalType, str],
- Tuple[NegativeInfinityType, SubLocalType],
- ],
- ...,
- ],
-]
-CmpKey = Tuple[
- int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
-]
-LegacyCmpKey = Tuple[int, Tuple[str, ...]]
-VersionComparisonMethod = Callable[
- [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
-]
-
-_Version = collections.namedtuple(
- "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
-)
-
-
-def parse(version: str) -> Union["LegacyVersion", "Version"]:
- """
- Parse the given version string and return either a :class:`Version` object
- or a :class:`LegacyVersion` object depending on if the given version is
- a valid PEP 440 version or a legacy version.
- """
- try:
- return Version(version)
- except InvalidVersion:
- return LegacyVersion(version)
-
-
-class InvalidVersion(ValueError):
- """
- An invalid version was found, users should refer to PEP 440.
- """
-
-
-class _BaseVersion:
- _key: Union[CmpKey, LegacyCmpKey]
-
- def __hash__(self) -> int:
- return hash(self._key)
-
- # Please keep the duplicated `isinstance` check
- # in the six comparisons hereunder
- # unless you find a way to avoid adding overhead function calls.
- def __lt__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key < other._key
-
- def __le__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key <= other._key
-
- def __eq__(self, other: object) -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key == other._key
-
- def __ge__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key >= other._key
-
- def __gt__(self, other: "_BaseVersion") -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key > other._key
-
- def __ne__(self, other: object) -> bool:
- if not isinstance(other, _BaseVersion):
- return NotImplemented
-
- return self._key != other._key
-
-
-class LegacyVersion(_BaseVersion):
- def __init__(self, version: str) -> None:
- self._version = str(version)
- self._key = _legacy_cmpkey(self._version)
-
- warnings.warn(
- "Creating a LegacyVersion has been deprecated and will be "
- "removed in the next major release",
- DeprecationWarning,
- )
-
- def __str__(self) -> str:
- return self._version
-
- def __repr__(self) -> str:
- return f"<LegacyVersion('{self}')>"
-
- @property
- def public(self) -> str:
- return self._version
-
- @property
- def base_version(self) -> str:
- return self._version
-
- @property
- def epoch(self) -> int:
- return -1
-
- @property
- def release(self) -> None:
- return None
-
- @property
- def pre(self) -> None:
- return None
-
- @property
- def post(self) -> None:
- return None
-
- @property
- def dev(self) -> None:
- return None
-
- @property
- def local(self) -> None:
- return None
-
- @property
- def is_prerelease(self) -> bool:
- return False
-
- @property
- def is_postrelease(self) -> bool:
- return False
-
- @property
- def is_devrelease(self) -> bool:
- return False
-
-
-_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
-
-_legacy_version_replacement_map = {
- "pre": "c",
- "preview": "c",
- "-": "final-",
- "rc": "c",
- "dev": "@",
-}
-
-
-def _parse_version_parts(s: str) -> Iterator[str]:
- for part in _legacy_version_component_re.split(s):
- part = _legacy_version_replacement_map.get(part, part)
-
- if not part or part == ".":
- continue
-
- if part[:1] in "0123456789":
- # pad for numeric comparison
- yield part.zfill(8)
- else:
- yield "*" + part
-
- # ensure that alpha/beta/candidate are before final
- yield "*final"
-
-
-def _legacy_cmpkey(version: str) -> LegacyCmpKey:
-
- # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
- # greater than or equal to 0. This will effectively put the LegacyVersion,
- # which uses the defacto standard originally implemented by setuptools,
- # as before all PEP 440 versions.
- epoch = -1
-
- # This scheme is taken from pkg_resources.parse_version setuptools prior to
- # it's adoption of the packaging library.
- parts: List[str] = []
- for part in _parse_version_parts(version.lower()):
- if part.startswith("*"):
- # remove "-" before a prerelease tag
- if part < "*final":
- while parts and parts[-1] == "*final-":
- parts.pop()
-
- # remove trailing zeros from each series of numeric parts
- while parts and parts[-1] == "00000000":
- parts.pop()
-
- parts.append(part)
-
- return epoch, tuple(parts)
-
-
-# Deliberately not anchored to the start and end of the string, to make it
-# easier for 3rd party code to reuse
-VERSION_PATTERN = r"""
- v?
- (?:
- (?:(?P<epoch>[0-9]+)!)? # epoch
- (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
- (?P<pre> # pre-release
- [-_\.]?
- (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
- [-_\.]?
- (?P<pre_n>[0-9]+)?
- )?
- (?P<post> # post release
- (?:-(?P<post_n1>[0-9]+))
- |
- (?:
- [-_\.]?
- (?P<post_l>post|rev|r)
- [-_\.]?
- (?P<post_n2>[0-9]+)?
- )
- )?
- (?P<dev> # dev release
- [-_\.]?
- (?P<dev_l>dev)
- [-_\.]?
- (?P<dev_n>[0-9]+)?
- )?
- )
- (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
-"""
-
-
-class Version(_BaseVersion):
-
- _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
-
- def __init__(self, version: str) -> None:
-
- # Validate the version and parse it into pieces
- match = self._regex.search(version)
- if not match:
- raise InvalidVersion(f"Invalid version: '{version}'")
-
- # Store the parsed out pieces of the version
- self._version = _Version(
- epoch=int(match.group("epoch")) if match.group("epoch") else 0,
- release=tuple(int(i) for i in match.group("release").split(".")),
- pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
- post=_parse_letter_version(
- match.group("post_l"), match.group("post_n1") or match.group("post_n2")
- ),
- dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
- local=_parse_local_version(match.group("local")),
- )
-
- # Generate a key which will be used for sorting
- self._key = _cmpkey(
- self._version.epoch,
- self._version.release,
- self._version.pre,
- self._version.post,
- self._version.dev,
- self._version.local,
- )
-
- def __repr__(self) -> str:
- return f"<Version('{self}')>"
-
- def __str__(self) -> str:
- parts = []
-
- # Epoch
- if self.epoch != 0:
- parts.append(f"{self.epoch}!")
-
- # Release segment
- parts.append(".".join(str(x) for x in self.release))
-
- # Pre-release
- if self.pre is not None:
- parts.append("".join(str(x) for x in self.pre))
-
- # Post-release
- if self.post is not None:
- parts.append(f".post{self.post}")
-
- # Development release
- if self.dev is not None:
- parts.append(f".dev{self.dev}")
-
- # Local version segment
- if self.local is not None:
- parts.append(f"+{self.local}")
-
- return "".join(parts)
-
- @property
- def epoch(self) -> int:
- _epoch: int = self._version.epoch
- return _epoch
-
- @property
- def release(self) -> Tuple[int, ...]:
- _release: Tuple[int, ...] = self._version.release
- return _release
-
- @property
- def pre(self) -> Optional[Tuple[str, int]]:
- _pre: Optional[Tuple[str, int]] = self._version.pre
- return _pre
-
- @property
- def post(self) -> Optional[int]:
- return self._version.post[1] if self._version.post else None
-
- @property
- def dev(self) -> Optional[int]:
- return self._version.dev[1] if self._version.dev else None
-
- @property
- def local(self) -> Optional[str]:
- if self._version.local:
- return ".".join(str(x) for x in self._version.local)
- else:
- return None
-
- @property
- def public(self) -> str:
- return str(self).split("+", 1)[0]
-
- @property
- def base_version(self) -> str:
- parts = []
-
- # Epoch
- if self.epoch != 0:
- parts.append(f"{self.epoch}!")
-
- # Release segment
- parts.append(".".join(str(x) for x in self.release))
-
- return "".join(parts)
-
- @property
- def is_prerelease(self) -> bool:
- return self.dev is not None or self.pre is not None
-
- @property
- def is_postrelease(self) -> bool:
- return self.post is not None
-
- @property
- def is_devrelease(self) -> bool:
- return self.dev is not None
-
- @property
- def major(self) -> int:
- return self.release[0] if len(self.release) >= 1 else 0
-
- @property
- def minor(self) -> int:
- return self.release[1] if len(self.release) >= 2 else 0
-
- @property
- def micro(self) -> int:
- return self.release[2] if len(self.release) >= 3 else 0
-
-
-def _parse_letter_version(
- letter: str, number: Union[str, bytes, SupportsInt]
-) -> Optional[Tuple[str, int]]:
-
- if letter:
- # We consider there to be an implicit 0 in a pre-release if there is
- # not a numeral associated with it.
- if number is None:
- number = 0
-
- # We normalize any letters to their lower case form
- letter = letter.lower()
-
- # We consider some words to be alternate spellings of other words and
- # in those cases we want to normalize the spellings to our preferred
- # spelling.
- if letter == "alpha":
- letter = "a"
- elif letter == "beta":
- letter = "b"
- elif letter in ["c", "pre", "preview"]:
- letter = "rc"
- elif letter in ["rev", "r"]:
- letter = "post"
-
- return letter, int(number)
- if not letter and number:
- # We assume if we are given a number, but we are not given a letter
- # then this is using the implicit post release syntax (e.g. 1.0-1)
- letter = "post"
-
- return letter, int(number)
-
- return None
-
-
-_local_version_separators = re.compile(r"[\._-]")
-
-
-def _parse_local_version(local: str) -> Optional[LocalType]:
- """
- Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
- """
- if local is not None:
- return tuple(
- part.lower() if not part.isdigit() else int(part)
- for part in _local_version_separators.split(local)
- )
- return None
-
-
-def _cmpkey(
- epoch: int,
- release: Tuple[int, ...],
- pre: Optional[Tuple[str, int]],
- post: Optional[Tuple[str, int]],
- dev: Optional[Tuple[str, int]],
- local: Optional[Tuple[SubLocalType]],
-) -> CmpKey:
-
- # When we compare a release version, we want to compare it with all of the
- # trailing zeros removed. So we'll use a reverse the list, drop all the now
- # leading zeros until we come to something non zero, then take the rest
- # re-reverse it back into the correct order and make it a tuple and use
- # that for our sorting key.
- _release = tuple(
- reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
- )
-
- # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
- # We'll do this by abusing the pre segment, but we _only_ want to do this
- # if there is not a pre or a post segment. If we have one of those then
- # the normal sorting rules will handle this case correctly.
- if pre is None and post is None and dev is not None:
- _pre: PrePostDevType = NegativeInfinity
- # Versions without a pre-release (except as noted above) should sort after
- # those with one.
- elif pre is None:
- _pre = Infinity
- else:
- _pre = pre
-
- # Versions without a post segment should sort before those with one.
- if post is None:
- _post: PrePostDevType = NegativeInfinity
-
- else:
- _post = post
-
- # Versions without a development segment should sort after those with one.
- if dev is None:
- _dev: PrePostDevType = Infinity
-
- else:
- _dev = dev
-
- if local is None:
- # Versions without a local segment should sort before those with one.
- _local: LocalType = NegativeInfinity
- else:
- # Versions with a local segment need that segment parsed to implement
- # the sorting rules in PEP440.
- # - Alpha numeric segments sort before numeric segments
- # - Alpha numeric segments sort lexicographically
- # - Numeric segments sort numerically
- # - Shorter versions sort before longer versions when the prefixes
- # match exactly
- _local = tuple(
- (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
- )
-
- return epoch, _release, _pre, _post, _dev, _local
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__init__.py
deleted file mode 100644
index 7802ff1..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__init__.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# module pyparsing.py
-#
-# Copyright (c) 2003-2022 Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-__doc__ = """
-pyparsing module - Classes and methods to define and execute parsing grammars
-=============================================================================
-
-The pyparsing module is an alternative approach to creating and
-executing simple grammars, vs. the traditional lex/yacc approach, or the
-use of regular expressions. With pyparsing, you don't need to learn
-a new syntax for defining grammars or matching expressions - the parsing
-module provides a library of classes that you use to construct the
-grammar directly in Python.
-
-Here is a program to parse "Hello, World!" (or any greeting of the form
-``"<salutation>, <addressee>!"``), built up using :class:`Word`,
-:class:`Literal`, and :class:`And` elements
-(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
-and the strings are auto-converted to :class:`Literal` expressions)::
-
- from pyparsing import Word, alphas
-
- # define grammar of a greeting
- greet = Word(alphas) + "," + Word(alphas) + "!"
-
- hello = "Hello, World!"
- print(hello, "->", greet.parse_string(hello))
-
-The program outputs the following::
-
- Hello, World! -> ['Hello', ',', 'World', '!']
-
-The Python representation of the grammar is quite readable, owing to the
-self-explanatory class names, and the use of :class:`'+'<And>`,
-:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
-
-The :class:`ParseResults` object returned from
-:class:`ParserElement.parseString` can be
-accessed as a nested list, a dictionary, or an object with named
-attributes.
-
-The pyparsing module handles some of the problems that are typically
-vexing when writing text parsers:
-
- - extra or missing whitespace (the above program will also handle
- "Hello,World!", "Hello , World !", etc.)
- - quoted strings
- - embedded comments
-
-
-Getting Started -
------------------
-Visit the classes :class:`ParserElement` and :class:`ParseResults` to
-see the base classes that most other pyparsing
-classes inherit from. Use the docstrings for examples of how to:
-
- - construct literal match expressions from :class:`Literal` and
- :class:`CaselessLiteral` classes
- - construct character word-group expressions using the :class:`Word`
- class
- - see how to create repetitive expressions using :class:`ZeroOrMore`
- and :class:`OneOrMore` classes
- - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
- and :class:`'&'<Each>` operators to combine simple expressions into
- more complex ones
- - associate names with your parsed results using
- :class:`ParserElement.setResultsName`
- - access the parsed data, which is returned as a :class:`ParseResults`
- object
- - find some helpful expression short-cuts like :class:`delimitedList`
- and :class:`oneOf`
- - find more useful common expressions in the :class:`pyparsing_common`
- namespace class
-"""
-from typing import NamedTuple
-
-
-class version_info(NamedTuple):
- major: int
- minor: int
- micro: int
- releaselevel: str
- serial: int
-
- @property
- def __version__(self):
- return (
- "{}.{}.{}".format(self.major, self.minor, self.micro)
- + (
- "{}{}{}".format(
- "r" if self.releaselevel[0] == "c" else "",
- self.releaselevel[0],
- self.serial,
- ),
- "",
- )[self.releaselevel == "final"]
- )
-
- def __str__(self):
- return "{} {} / {}".format(__name__, self.__version__, __version_time__)
-
- def __repr__(self):
- return "{}.{}({})".format(
- __name__,
- type(self).__name__,
- ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
- )
-
-
-__version_info__ = version_info(3, 0, 9, "final", 0)
-__version_time__ = "05 May 2022 07:02 UTC"
-__version__ = __version_info__.__version__
-__versionTime__ = __version_time__
-__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
-
-from .util import *
-from .exceptions import *
-from .actions import *
-from .core import __diag__, __compat__
-from .results import *
-from .core import *
-from .core import _builtin_exprs as core_builtin_exprs
-from .helpers import *
-from .helpers import _builtin_exprs as helper_builtin_exprs
-
-from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
-from .testing import pyparsing_test as testing
-from .common import (
- pyparsing_common as common,
- _builtin_exprs as common_builtin_exprs,
-)
-
-# define backward compat synonyms
-if "pyparsing_unicode" not in globals():
- pyparsing_unicode = unicode
-if "pyparsing_common" not in globals():
- pyparsing_common = common
-if "pyparsing_test" not in globals():
- pyparsing_test = testing
-
-core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
-
-
-__all__ = [
- "__version__",
- "__version_time__",
- "__author__",
- "__compat__",
- "__diag__",
- "And",
- "AtLineStart",
- "AtStringStart",
- "CaselessKeyword",
- "CaselessLiteral",
- "CharsNotIn",
- "Combine",
- "Dict",
- "Each",
- "Empty",
- "FollowedBy",
- "Forward",
- "GoToColumn",
- "Group",
- "IndentedBlock",
- "Keyword",
- "LineEnd",
- "LineStart",
- "Literal",
- "Located",
- "PrecededBy",
- "MatchFirst",
- "NoMatch",
- "NotAny",
- "OneOrMore",
- "OnlyOnce",
- "OpAssoc",
- "Opt",
- "Optional",
- "Or",
- "ParseBaseException",
- "ParseElementEnhance",
- "ParseException",
- "ParseExpression",
- "ParseFatalException",
- "ParseResults",
- "ParseSyntaxException",
- "ParserElement",
- "PositionToken",
- "QuotedString",
- "RecursiveGrammarException",
- "Regex",
- "SkipTo",
- "StringEnd",
- "StringStart",
- "Suppress",
- "Token",
- "TokenConverter",
- "White",
- "Word",
- "WordEnd",
- "WordStart",
- "ZeroOrMore",
- "Char",
- "alphanums",
- "alphas",
- "alphas8bit",
- "any_close_tag",
- "any_open_tag",
- "c_style_comment",
- "col",
- "common_html_entity",
- "counted_array",
- "cpp_style_comment",
- "dbl_quoted_string",
- "dbl_slash_comment",
- "delimited_list",
- "dict_of",
- "empty",
- "hexnums",
- "html_comment",
- "identchars",
- "identbodychars",
- "java_style_comment",
- "line",
- "line_end",
- "line_start",
- "lineno",
- "make_html_tags",
- "make_xml_tags",
- "match_only_at_col",
- "match_previous_expr",
- "match_previous_literal",
- "nested_expr",
- "null_debug_action",
- "nums",
- "one_of",
- "printables",
- "punc8bit",
- "python_style_comment",
- "quoted_string",
- "remove_quotes",
- "replace_with",
- "replace_html_entity",
- "rest_of_line",
- "sgl_quoted_string",
- "srange",
- "string_end",
- "string_start",
- "trace_parse_action",
- "unicode_string",
- "with_attribute",
- "indentedBlock",
- "original_text_for",
- "ungroup",
- "infix_notation",
- "locatedExpr",
- "with_class",
- "CloseMatch",
- "token_map",
- "pyparsing_common",
- "pyparsing_unicode",
- "unicode_set",
- "condition_as_parse_action",
- "pyparsing_test",
- # pre-PEP8 compatibility names
- "__versionTime__",
- "anyCloseTag",
- "anyOpenTag",
- "cStyleComment",
- "commonHTMLEntity",
- "countedArray",
- "cppStyleComment",
- "dblQuotedString",
- "dblSlashComment",
- "delimitedList",
- "dictOf",
- "htmlComment",
- "javaStyleComment",
- "lineEnd",
- "lineStart",
- "makeHTMLTags",
- "makeXMLTags",
- "matchOnlyAtCol",
- "matchPreviousExpr",
- "matchPreviousLiteral",
- "nestedExpr",
- "nullDebugAction",
- "oneOf",
- "opAssoc",
- "pythonStyleComment",
- "quotedString",
- "removeQuotes",
- "replaceHTMLEntity",
- "replaceWith",
- "restOfLine",
- "sglQuotedString",
- "stringEnd",
- "stringStart",
- "traceParseAction",
- "unicodeString",
- "withAttribute",
- "indentedBlock",
- "originalTextFor",
- "infixNotation",
- "locatedExpr",
- "withClass",
- "tokenMap",
- "conditionAsParseAction",
- "autoname_elements",
-]
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 762830a..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/actions.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/actions.cpython-311.pyc
deleted file mode 100644
index 4d772a0..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/actions.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/common.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/common.cpython-311.pyc
deleted file mode 100644
index 3d91e23..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/common.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/core.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/core.cpython-311.pyc
deleted file mode 100644
index 50e8401..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/core.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/exceptions.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/exceptions.cpython-311.pyc
deleted file mode 100644
index c83bae1..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/exceptions.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/helpers.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/helpers.cpython-311.pyc
deleted file mode 100644
index e05181f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/helpers.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/results.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/results.cpython-311.pyc
deleted file mode 100644
index de724b5..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/results.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/testing.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/testing.cpython-311.pyc
deleted file mode 100644
index 916d287..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/testing.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/unicode.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/unicode.cpython-311.pyc
deleted file mode 100644
index 0934ad5..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/unicode.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/util.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/util.cpython-311.pyc
deleted file mode 100644
index d9fb0a0..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/__pycache__/util.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py
deleted file mode 100644
index f72c66e..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/actions.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# actions.py
-
-from .exceptions import ParseException
-from .util import col
-
-
-class OnlyOnce:
- """
- Wrapper for parse actions, to ensure they are only called once.
- """
-
- def __init__(self, method_call):
- from .core import _trim_arity
-
- self.callable = _trim_arity(method_call)
- self.called = False
-
- def __call__(self, s, l, t):
- if not self.called:
- results = self.callable(s, l, t)
- self.called = True
- return results
- raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
-
- def reset(self):
- """
- Allow the associated parse action to be called once more.
- """
-
- self.called = False
-
-
-def match_only_at_col(n):
- """
- Helper method for defining parse actions that require matching at
- a specific column in the input text.
- """
-
- def verify_col(strg, locn, toks):
- if col(locn, strg) != n:
- raise ParseException(strg, locn, "matched token not at column {}".format(n))
-
- return verify_col
-
-
-def replace_with(repl_str):
- """
- Helper method for common parse actions that simply return
- a literal value. Especially useful when used with
- :class:`transform_string<ParserElement.transform_string>` ().
-
- Example::
-
- num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
- na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
- term = na | num
-
- term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
- """
- return lambda s, l, t: [repl_str]
-
-
-def remove_quotes(s, l, t):
- """
- Helper parse action for removing quotation marks from parsed
- quoted strings.
-
- Example::
-
- # by default, quotation marks are included in parsed results
- quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
-
- # use remove_quotes to strip quotation marks from parsed results
- quoted_string.set_parse_action(remove_quotes)
- quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
- """
- return t[0][1:-1]
-
-
-def with_attribute(*args, **attr_dict):
- """
- Helper to create a validating parse action to be used with start
- tags created with :class:`make_xml_tags` or
- :class:`make_html_tags`. Use ``with_attribute`` to qualify
- a starting tag with a required attribute value, to avoid false
- matches on common tags such as ``<TD>`` or ``<DIV>``.
-
- Call ``with_attribute`` with a series of attribute names and
- values. Specify the list of filter attributes names and values as:
-
- - keyword arguments, as in ``(align="right")``, or
- - as an explicit dict with ``**`` operator, when an attribute
- name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
- - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
-
- For attribute names with a namespace prefix, you must use the second
- form. Attribute names are matched insensitive to upper/lower case.
-
- If just testing for ``class`` (with or without a namespace), use
- :class:`with_class`.
-
- To verify that the attribute exists, but without specifying a value,
- pass ``with_attribute.ANY_VALUE`` as the value.
-
- Example::
-
- html = '''
- <div>
- Some text
- <div type="grid">1 4 0 1 0</div>
- <div type="graph">1,3 2,3 1,1</div>
- <div>this has no type</div>
- </div>
-
- '''
- div,div_end = make_html_tags("div")
-
- # only match div tag having a type attribute with value "grid"
- div_grid = div().set_parse_action(with_attribute(type="grid"))
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.search_string(html):
- print(grid_header.body)
-
- # construct a match with any div tag having a type attribute, regardless of the value
- div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.search_string(html):
- print(div_header.body)
-
- prints::
-
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- if args:
- attrs = args[:]
- else:
- attrs = attr_dict.items()
- attrs = [(k, v) for k, v in attrs]
-
- def pa(s, l, tokens):
- for attrName, attrValue in attrs:
- if attrName not in tokens:
- raise ParseException(s, l, "no matching attribute " + attrName)
- if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
- raise ParseException(
- s,
- l,
- "attribute {!r} has value {!r}, must be {!r}".format(
- attrName, tokens[attrName], attrValue
- ),
- )
-
- return pa
-
-
-with_attribute.ANY_VALUE = object()
-
-
-def with_class(classname, namespace=""):
- """
- Simplified version of :class:`with_attribute` when
- matching on a div class - made difficult because ``class`` is
- a reserved word in Python.
-
- Example::
-
- html = '''
- <div>
- Some text
- <div class="grid">1 4 0 1 0</div>
- <div class="graph">1,3 2,3 1,1</div>
- <div>this &lt;div&gt; has no class</div>
- </div>
-
- '''
- div,div_end = make_html_tags("div")
- div_grid = div().set_parse_action(with_class("grid"))
-
- grid_expr = div_grid + SkipTo(div | div_end)("body")
- for grid_header in grid_expr.search_string(html):
- print(grid_header.body)
-
- div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
- div_expr = div_any_type + SkipTo(div | div_end)("body")
- for div_header in div_expr.search_string(html):
- print(div_header.body)
-
- prints::
-
- 1 4 0 1 0
-
- 1 4 0 1 0
- 1,3 2,3 1,1
- """
- classattr = "{}:class".format(namespace) if namespace else "class"
- return with_attribute(**{classattr: classname})
-
-
-# pre-PEP8 compatibility symbols
-replaceWith = replace_with
-removeQuotes = remove_quotes
-withAttribute = with_attribute
-withClass = with_class
-matchOnlyAtCol = match_only_at_col
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/common.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/common.py
deleted file mode 100644
index 1859fb7..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/common.py
+++ /dev/null
@@ -1,424 +0,0 @@
-# common.py
-from .core import *
-from .helpers import delimited_list, any_open_tag, any_close_tag
-from datetime import datetime
-
-
-# some other useful expressions - using lower-case class name since we are really using this as a namespace
-class pyparsing_common:
- """Here are some common low-level expressions that may be useful in
- jump-starting parser development:
-
- - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
- :class:`scientific notation<sci_real>`)
- - common :class:`programming identifiers<identifier>`
- - network addresses (:class:`MAC<mac_address>`,
- :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
- - ISO8601 :class:`dates<iso8601_date>` and
- :class:`datetime<iso8601_datetime>`
- - :class:`UUID<uuid>`
- - :class:`comma-separated list<comma_separated_list>`
- - :class:`url`
-
- Parse actions:
-
- - :class:`convertToInteger`
- - :class:`convertToFloat`
- - :class:`convertToDate`
- - :class:`convertToDatetime`
- - :class:`stripHTMLTags`
- - :class:`upcaseTokens`
- - :class:`downcaseTokens`
-
- Example::
-
- pyparsing_common.number.runTests('''
- # any int or real number, returned as the appropriate type
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.fnumber.runTests('''
- # any int or real number, returned as float
- 100
- -100
- +100
- 3.14159
- 6.02e23
- 1e-12
- ''')
-
- pyparsing_common.hex_integer.runTests('''
- # hex numbers
- 100
- FF
- ''')
-
- pyparsing_common.fraction.runTests('''
- # fractions
- 1/2
- -3/4
- ''')
-
- pyparsing_common.mixed_integer.runTests('''
- # mixed fractions
- 1
- 1/2
- -3/4
- 1-3/4
- ''')
-
- import uuid
- pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
- pyparsing_common.uuid.runTests('''
- # uuid
- 12345678-1234-5678-1234-567812345678
- ''')
-
- prints::
-
- # any int or real number, returned as the appropriate type
- 100
- [100]
-
- -100
- [-100]
-
- +100
- [100]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # any int or real number, returned as float
- 100
- [100.0]
-
- -100
- [-100.0]
-
- +100
- [100.0]
-
- 3.14159
- [3.14159]
-
- 6.02e23
- [6.02e+23]
-
- 1e-12
- [1e-12]
-
- # hex numbers
- 100
- [256]
-
- FF
- [255]
-
- # fractions
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- # mixed fractions
- 1
- [1]
-
- 1/2
- [0.5]
-
- -3/4
- [-0.75]
-
- 1-3/4
- [1.75]
-
- # uuid
- 12345678-1234-5678-1234-567812345678
- [UUID('12345678-1234-5678-1234-567812345678')]
- """
-
- convert_to_integer = token_map(int)
- """
- Parse action for converting parsed integers to Python int
- """
-
- convert_to_float = token_map(float)
- """
- Parse action for converting parsed numbers to Python float
- """
-
- integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
- """expression that parses an unsigned integer, returns an int"""
-
- hex_integer = (
- Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
- )
- """expression that parses a hexadecimal integer, returns an int"""
-
- signed_integer = (
- Regex(r"[+-]?\d+")
- .set_name("signed integer")
- .set_parse_action(convert_to_integer)
- )
- """expression that parses an integer with optional leading sign, returns an int"""
-
- fraction = (
- signed_integer().set_parse_action(convert_to_float)
- + "/"
- + signed_integer().set_parse_action(convert_to_float)
- ).set_name("fraction")
- """fractional expression of an integer divided by an integer, returns a float"""
- fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
-
- mixed_integer = (
- fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
- ).set_name("fraction or mixed integer-fraction")
- """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
- mixed_integer.add_parse_action(sum)
-
- real = (
- Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
- .set_name("real number")
- .set_parse_action(convert_to_float)
- )
- """expression that parses a floating point number and returns a float"""
-
- sci_real = (
- Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
- .set_name("real number with scientific notation")
- .set_parse_action(convert_to_float)
- )
- """expression that parses a floating point number with optional
- scientific notation and returns a float"""
-
- # streamlining this expression makes the docs nicer-looking
- number = (sci_real | real | signed_integer).setName("number").streamline()
- """any numeric expression, returns the corresponding Python type"""
-
- fnumber = (
- Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
- .set_name("fnumber")
- .set_parse_action(convert_to_float)
- )
- """any int or real number, returned as float"""
-
- identifier = Word(identchars, identbodychars).set_name("identifier")
- """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
-
- ipv4_address = Regex(
- r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
- ).set_name("IPv4 address")
- "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
-
- _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
- _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
- "full IPv6 address"
- )
- _short_ipv6_address = (
- Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
- + "::"
- + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
- ).set_name("short IPv6 address")
- _short_ipv6_address.add_condition(
- lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
- )
- _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
- ipv6_address = Combine(
- (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
- "IPv6 address"
- )
- ).set_name("IPv6 address")
- "IPv6 address (long, short, or mixed form)"
-
- mac_address = Regex(
- r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
- ).set_name("MAC address")
- "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
-
- @staticmethod
- def convert_to_date(fmt: str = "%Y-%m-%d"):
- """
- Helper to create a parse action for converting parsed date string to Python datetime.date
-
- Params -
- - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
-
- Example::
-
- date_expr = pyparsing_common.iso8601_date.copy()
- date_expr.setParseAction(pyparsing_common.convertToDate())
- print(date_expr.parseString("1999-12-31"))
-
- prints::
-
- [datetime.date(1999, 12, 31)]
- """
-
- def cvt_fn(ss, ll, tt):
- try:
- return datetime.strptime(tt[0], fmt).date()
- except ValueError as ve:
- raise ParseException(ss, ll, str(ve))
-
- return cvt_fn
-
- @staticmethod
- def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
- """Helper to create a parse action for converting parsed
- datetime string to Python datetime.datetime
-
- Params -
- - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
-
- Example::
-
- dt_expr = pyparsing_common.iso8601_datetime.copy()
- dt_expr.setParseAction(pyparsing_common.convertToDatetime())
- print(dt_expr.parseString("1999-12-31T23:59:59.999"))
-
- prints::
-
- [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
- """
-
- def cvt_fn(s, l, t):
- try:
- return datetime.strptime(t[0], fmt)
- except ValueError as ve:
- raise ParseException(s, l, str(ve))
-
- return cvt_fn
-
- iso8601_date = Regex(
- r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
- ).set_name("ISO8601 date")
- "ISO8601 date (``yyyy-mm-dd``)"
-
- iso8601_datetime = Regex(
- r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
- ).set_name("ISO8601 datetime")
- "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
-
- uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
- "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
-
- _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
-
- @staticmethod
- def strip_html_tags(s: str, l: int, tokens: ParseResults):
- """Parse action to remove HTML tags from web page HTML source
-
- Example::
-
- # strip HTML links from normal text
- text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
- td, td_end = makeHTMLTags("TD")
- table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
- print(table_text.parseString(text).body)
-
- Prints::
-
- More info at the pyparsing wiki page
- """
- return pyparsing_common._html_stripper.transform_string(tokens[0])
-
- _commasepitem = (
- Combine(
- OneOrMore(
- ~Literal(",")
- + ~LineEnd()
- + Word(printables, exclude_chars=",")
- + Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
- )
- )
- .streamline()
- .set_name("commaItem")
- )
- comma_separated_list = delimited_list(
- Opt(quoted_string.copy() | _commasepitem, default="")
- ).set_name("comma separated list")
- """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
-
- upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
- """Parse action to convert tokens to upper case."""
-
- downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
- """Parse action to convert tokens to lower case."""
-
- # fmt: off
- url = Regex(
- # https://mathiasbynens.be/demo/url-regex
- # https://gist.github.com/dperini/729294
- r"^" +
- # protocol identifier (optional)
- # short syntax // still required
- r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
- # user:pass BasicAuth (optional)
- r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
- r"(?P<host>" +
- # IP address exclusion
- # private & local networks
- r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
- r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
- r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
- # IP address dotted notation octets
- # excludes loopback network 0.0.0.0
- # excludes reserved space >= 224.0.0.0
- # excludes network & broadcast addresses
- # (first & last IP address of each class)
- r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
- r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
- r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
- r"|" +
- # host & domain names, may end with dot
- # can be replaced by a shortest alternative
- # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
- r"(?:" +
- r"(?:" +
- r"[a-z0-9\u00a1-\uffff]" +
- r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
- r")?" +
- r"[a-z0-9\u00a1-\uffff]\." +
- r")+" +
- # TLD identifier name, may end with dot
- r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
- r")" +
- # port number (optional)
- r"(:(?P<port>\d{2,5}))?" +
- # resource path (optional)
- r"(?P<path>\/[^?# ]*)?" +
- # query string (optional)
- r"(\?(?P<query>[^#]*))?" +
- # fragment (optional)
- r"(#(?P<fragment>\S*))?" +
- r"$"
- ).set_name("url")
- # fmt: on
-
- # pre-PEP8 compatibility names
- convertToInteger = convert_to_integer
- convertToFloat = convert_to_float
- convertToDate = convert_to_date
- convertToDatetime = convert_to_datetime
- stripHTMLTags = strip_html_tags
- upcaseTokens = upcase_tokens
- downcaseTokens = downcase_tokens
-
-
-_builtin_exprs = [
- v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
-]
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/core.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/core.py
deleted file mode 100644
index 9acba3f..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/core.py
+++ /dev/null
@@ -1,5814 +0,0 @@
-#
-# core.py
-#
-import os
-import typing
-from typing import (
- NamedTuple,
- Union,
- Callable,
- Any,
- Generator,
- Tuple,
- List,
- TextIO,
- Set,
- Sequence,
-)
-from abc import ABC, abstractmethod
-from enum import Enum
-import string
-import copy
-import warnings
-import re
-import sys
-from collections.abc import Iterable
-import traceback
-import types
-from operator import itemgetter
-from functools import wraps
-from threading import RLock
-from pathlib import Path
-
-from .util import (
- _FifoCache,
- _UnboundedCache,
- __config_flags,
- _collapse_string_to_ranges,
- _escape_regex_range_chars,
- _bslash,
- _flatten,
- LRUMemo as _LRUMemo,
- UnboundedMemo as _UnboundedMemo,
-)
-from .exceptions import *
-from .actions import *
-from .results import ParseResults, _ParseResultsWithOffset
-from .unicode import pyparsing_unicode
-
-_MAX_INT = sys.maxsize
-str_type: Tuple[type, ...] = (str, bytes)
-
-#
-# Copyright (c) 2003-2022 Paul T. McGuire
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-
-
-if sys.version_info >= (3, 8):
- from functools import cached_property
-else:
-
- class cached_property:
- def __init__(self, func):
- self._func = func
-
- def __get__(self, instance, owner=None):
- ret = instance.__dict__[self._func.__name__] = self._func(instance)
- return ret
-
-
-class __compat__(__config_flags):
- """
- A cross-version compatibility configuration for pyparsing features that will be
- released in a future version. By setting values in this configuration to True,
- those features can be enabled in prior versions for compatibility development
- and testing.
-
- - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
- of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
- maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
- behavior
- """
-
- _type_desc = "compatibility"
-
- collect_all_And_tokens = True
-
- _all_names = [__ for __ in locals() if not __.startswith("_")]
- _fixed_names = """
- collect_all_And_tokens
- """.split()
-
-
-class __diag__(__config_flags):
- _type_desc = "diagnostic"
-
- warn_multiple_tokens_in_named_alternation = False
- warn_ungrouped_named_tokens_in_collection = False
- warn_name_set_on_empty_Forward = False
- warn_on_parse_using_empty_Forward = False
- warn_on_assignment_to_Forward = False
- warn_on_multiple_string_args_to_oneof = False
- warn_on_match_first_with_lshift_operator = False
- enable_debug_on_named_expressions = False
-
- _all_names = [__ for __ in locals() if not __.startswith("_")]
- _warning_names = [name for name in _all_names if name.startswith("warn")]
- _debug_names = [name for name in _all_names if name.startswith("enable_debug")]
-
- @classmethod
- def enable_all_warnings(cls) -> None:
- for name in cls._warning_names:
- cls.enable(name)
-
-
-class Diagnostics(Enum):
- """
- Diagnostic configuration (all default to disabled)
- - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
- name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
- - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
- name is defined on a containing expression with ungrouped subexpressions that also
- have results names
- - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
- with a results name, but has no contents defined
- - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
- defined in a grammar but has never had an expression attached to it
- - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
- but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
- - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
- incorrectly called with multiple str arguments
- - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
- calls to :class:`ParserElement.set_name`
-
- Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
- All warnings can be enabled by calling :class:`enable_all_warnings`.
- """
-
- warn_multiple_tokens_in_named_alternation = 0
- warn_ungrouped_named_tokens_in_collection = 1
- warn_name_set_on_empty_Forward = 2
- warn_on_parse_using_empty_Forward = 3
- warn_on_assignment_to_Forward = 4
- warn_on_multiple_string_args_to_oneof = 5
- warn_on_match_first_with_lshift_operator = 6
- enable_debug_on_named_expressions = 7
-
-
-def enable_diag(diag_enum: Diagnostics) -> None:
- """
- Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
- """
- __diag__.enable(diag_enum.name)
-
-
-def disable_diag(diag_enum: Diagnostics) -> None:
- """
- Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
- """
- __diag__.disable(diag_enum.name)
-
-
-def enable_all_warnings() -> None:
- """
- Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
- """
- __diag__.enable_all_warnings()
-
-
-# hide abstract class
-del __config_flags
-
-
-def _should_enable_warnings(
- cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
-) -> bool:
- enable = bool(warn_env_var)
- for warn_opt in cmd_line_warn_options:
- w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
- ":"
- )[:5]
- if not w_action.lower().startswith("i") and (
- not (w_message or w_category or w_module) or w_module == "pyparsing"
- ):
- enable = True
- elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
- enable = False
- return enable
-
-
-if _should_enable_warnings(
- sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
-):
- enable_all_warnings()
-
-
-# build list of single arg builtins, that can be used as parse actions
-_single_arg_builtins = {
- sum,
- len,
- sorted,
- reversed,
- list,
- tuple,
- set,
- any,
- all,
- min,
- max,
-}
-
-_generatorType = types.GeneratorType
-ParseAction = Union[
- Callable[[], Any],
- Callable[[ParseResults], Any],
- Callable[[int, ParseResults], Any],
- Callable[[str, int, ParseResults], Any],
-]
-ParseCondition = Union[
- Callable[[], bool],
- Callable[[ParseResults], bool],
- Callable[[int, ParseResults], bool],
- Callable[[str, int, ParseResults], bool],
-]
-ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
-DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
-DebugSuccessAction = Callable[
- [str, int, int, "ParserElement", ParseResults, bool], None
-]
-DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
-
-
-alphas = string.ascii_uppercase + string.ascii_lowercase
-identchars = pyparsing_unicode.Latin1.identchars
-identbodychars = pyparsing_unicode.Latin1.identbodychars
-nums = "0123456789"
-hexnums = nums + "ABCDEFabcdef"
-alphanums = alphas + nums
-printables = "".join([c for c in string.printable if c not in string.whitespace])
-
-_trim_arity_call_line: traceback.StackSummary = None
-
-
-def _trim_arity(func, max_limit=3):
- """decorator to trim function calls to match the arity of the target"""
- global _trim_arity_call_line
-
- if func in _single_arg_builtins:
- return lambda s, l, t: func(t)
-
- limit = 0
- found_arity = False
-
- def extract_tb(tb, limit=0):
- frames = traceback.extract_tb(tb, limit=limit)
- frame_summary = frames[-1]
- return [frame_summary[:2]]
-
- # synthesize what would be returned by traceback.extract_stack at the call to
- # user's parse action 'func', so that we don't incur call penalty at parse time
-
- # fmt: off
- LINE_DIFF = 7
- # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
- # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
- _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1])
- pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
-
- def wrapper(*args):
- nonlocal found_arity, limit
- while 1:
- try:
- ret = func(*args[limit:])
- found_arity = True
- return ret
- except TypeError as te:
- # re-raise TypeErrors if they did not come from our arity testing
- if found_arity:
- raise
- else:
- tb = te.__traceback__
- trim_arity_type_error = (
- extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
- )
- del tb
-
- if trim_arity_type_error:
- if limit < max_limit:
- limit += 1
- continue
-
- raise
- # fmt: on
-
- # copy func name to wrapper for sensible debug output
- # (can't use functools.wraps, since that messes with function signature)
- func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
- wrapper.__name__ = func_name
- wrapper.__doc__ = func.__doc__
-
- return wrapper
-
-
-def condition_as_parse_action(
- fn: ParseCondition, message: str = None, fatal: bool = False
-) -> ParseAction:
- """
- Function to convert a simple predicate function that returns ``True`` or ``False``
- into a parse action. Can be used in places when a parse action is required
- and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
- to an operator level in :class:`infix_notation`).
-
- Optional keyword arguments:
-
- - ``message`` - define a custom message to be used in the raised exception
- - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
- otherwise will raise :class:`ParseException`
-
- """
- msg = message if message is not None else "failed user-defined condition"
- exc_type = ParseFatalException if fatal else ParseException
- fn = _trim_arity(fn)
-
- @wraps(fn)
- def pa(s, l, t):
- if not bool(fn(s, l, t)):
- raise exc_type(s, l, msg)
-
- return pa
-
-
-def _default_start_debug_action(
- instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
-):
- cache_hit_str = "*" if cache_hit else ""
- print(
- (
- "{}Match {} at loc {}({},{})\n {}\n {}^".format(
- cache_hit_str,
- expr,
- loc,
- lineno(loc, instring),
- col(loc, instring),
- line(loc, instring),
- " " * (col(loc, instring) - 1),
- )
- )
- )
-
-
-def _default_success_debug_action(
- instring: str,
- startloc: int,
- endloc: int,
- expr: "ParserElement",
- toks: ParseResults,
- cache_hit: bool = False,
-):
- cache_hit_str = "*" if cache_hit else ""
- print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
-
-
-def _default_exception_debug_action(
- instring: str,
- loc: int,
- expr: "ParserElement",
- exc: Exception,
- cache_hit: bool = False,
-):
- cache_hit_str = "*" if cache_hit else ""
- print(
- "{}Match {} failed, {} raised: {}".format(
- cache_hit_str, expr, type(exc).__name__, exc
- )
- )
-
-
-def null_debug_action(*args):
- """'Do-nothing' debug action, to suppress debugging output during parsing."""
-
-
-class ParserElement(ABC):
- """Abstract base level parser element class."""
-
- DEFAULT_WHITE_CHARS: str = " \n\t\r"
- verbose_stacktrace: bool = False
- _literalStringClass: typing.Optional[type] = None
-
- @staticmethod
- def set_default_whitespace_chars(chars: str) -> None:
- r"""
- Overrides the default whitespace chars
-
- Example::
-
- # default whitespace chars are space, <TAB> and newline
- Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
-
- # change to just treat newline as significant
- ParserElement.set_default_whitespace_chars(" \t")
- Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
- """
- ParserElement.DEFAULT_WHITE_CHARS = chars
-
- # update whitespace all parse expressions defined in this module
- for expr in _builtin_exprs:
- if expr.copyDefaultWhiteChars:
- expr.whiteChars = set(chars)
-
- @staticmethod
- def inline_literals_using(cls: type) -> None:
- """
- Set class to be used for inclusion of string literals into a parser.
-
- Example::
-
- # default literal class used is Literal
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
-
-
- # change to Suppress
- ParserElement.inline_literals_using(Suppress)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
- """
- ParserElement._literalStringClass = cls
-
- class DebugActions(NamedTuple):
- debug_try: typing.Optional[DebugStartAction]
- debug_match: typing.Optional[DebugSuccessAction]
- debug_fail: typing.Optional[DebugExceptionAction]
-
- def __init__(self, savelist: bool = False):
- self.parseAction: List[ParseAction] = list()
- self.failAction: typing.Optional[ParseFailAction] = None
- self.customName = None
- self._defaultName = None
- self.resultsName = None
- self.saveAsList = savelist
- self.skipWhitespace = True
- self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
- self.copyDefaultWhiteChars = True
- # used when checking for left-recursion
- self.mayReturnEmpty = False
- self.keepTabs = False
- self.ignoreExprs: List["ParserElement"] = list()
- self.debug = False
- self.streamlined = False
- # optimize exception handling for subclasses that don't advance parse index
- self.mayIndexError = True
- self.errmsg = ""
- # mark results names as modal (report only last) or cumulative (list all)
- self.modalResults = True
- # custom debug actions
- self.debugActions = self.DebugActions(None, None, None)
- # avoid redundant calls to preParse
- self.callPreparse = True
- self.callDuringTry = False
- self.suppress_warnings_: List[Diagnostics] = []
-
- def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement":
- """
- Suppress warnings emitted for a particular diagnostic on this expression.
-
- Example::
-
- base = pp.Forward()
- base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
-
- # statement would normally raise a warning, but is now suppressed
- print(base.parseString("x"))
-
- """
- self.suppress_warnings_.append(warning_type)
- return self
-
- def copy(self) -> "ParserElement":
- """
- Make a copy of this :class:`ParserElement`. Useful for defining
- different parse actions for the same parsing pattern, using copies of
- the original parse element.
-
- Example::
-
- integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
- integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
- integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
-
- print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
-
- prints::
-
- [5120, 100, 655360, 268435456]
-
- Equivalent form of ``expr.copy()`` is just ``expr()``::
-
- integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
- """
- cpy = copy.copy(self)
- cpy.parseAction = self.parseAction[:]
- cpy.ignoreExprs = self.ignoreExprs[:]
- if self.copyDefaultWhiteChars:
- cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
- return cpy
-
- def set_results_name(
- self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
- ) -> "ParserElement":
- """
- Define name for referencing matching tokens as a nested attribute
- of the returned parse results.
-
- Normally, results names are assigned as you would assign keys in a dict:
- any existing value is overwritten by later values. If it is necessary to
- keep all values captured for a particular results name, call ``set_results_name``
- with ``list_all_matches`` = True.
-
- NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
- this is so that the client can define a basic element, such as an
- integer, and reference it in multiple places with different names.
-
- You can also set results names using the abbreviated syntax,
- ``expr("name")`` in place of ``expr.set_results_name("name")``
- - see :class:`__call__`. If ``list_all_matches`` is required, use
- ``expr("name*")``.
-
- Example::
-
- date_str = (integer.set_results_name("year") + '/'
- + integer.set_results_name("month") + '/'
- + integer.set_results_name("day"))
-
- # equivalent form:
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
- """
- listAllMatches = listAllMatches or list_all_matches
- return self._setResultsName(name, listAllMatches)
-
- def _setResultsName(self, name, listAllMatches=False):
- if name is None:
- return self
- newself = self.copy()
- if name.endswith("*"):
- name = name[:-1]
- listAllMatches = True
- newself.resultsName = name
- newself.modalResults = not listAllMatches
- return newself
-
- def set_break(self, break_flag: bool = True) -> "ParserElement":
- """
- Method to invoke the Python pdb debugger when this element is
- about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
- disable.
- """
- if break_flag:
- _parseMethod = self._parse
-
- def breaker(instring, loc, doActions=True, callPreParse=True):
- import pdb
-
- # this call to pdb.set_trace() is intentional, not a checkin error
- pdb.set_trace()
- return _parseMethod(instring, loc, doActions, callPreParse)
-
- breaker._originalParseMethod = _parseMethod
- self._parse = breaker
- else:
- if hasattr(self._parse, "_originalParseMethod"):
- self._parse = self._parse._originalParseMethod
- return self
-
- def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
- """
- Define one or more actions to perform when successfully matching parse element definition.
-
- Parse actions can be called to perform data conversions, do extra validation,
- update external data structures, or enhance or replace the parsed tokens.
- Each parse action ``fn`` is a callable method with 0-3 arguments, called as
- ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
-
- - s = the original string being parsed (see note below)
- - loc = the location of the matching substring
- - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
-
- The parsed tokens are passed to the parse action as ParseResults. They can be
- modified in place using list-style append, extend, and pop operations to update
- the parsed list elements; and with dictionary-style item set and del operations
- to add, update, or remove any named results. If the tokens are modified in place,
- it is not necessary to return them with a return statement.
-
- Parse actions can also completely replace the given tokens, with another ``ParseResults``
- object, or with some entirely different object (common for parse actions that perform data
- conversions). A convenient way to build a new parse result is to define the values
- using a dict, and then create the return value using :class:`ParseResults.from_dict`.
-
- If None is passed as the ``fn`` parse action, all previously added parse actions for this
- expression are cleared.
-
- Optional keyword arguments:
-
- - call_during_try = (default= ``False``) indicate if parse action should be run during
- lookaheads and alternate testing. For parse actions that have side effects, it is
- important to only call the parse action once it is determined that it is being
- called as part of a successful parse. For parse actions that perform additional
- validation, then call_during_try should be passed as True, so that the validation
- code is included in the preliminary "try" parses.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See :class:`parse_string` for more
- information on parsing strings containing ``<TAB>`` s, and suggested
- methods to maintain a consistent view of the parsed string, the parse
- location, and line and column positions within the parsed string.
-
- Example::
-
- # parse dates in the form YYYY/MM/DD
-
- # use parse action to convert toks from str to int at parse time
- def convert_to_int(toks):
- return int(toks[0])
-
- # use a parse action to verify that the date is a valid date
- def is_valid_date(instring, loc, toks):
- from datetime import date
- year, month, day = toks[::2]
- try:
- date(year, month, day)
- except ValueError:
- raise ParseException(instring, loc, "invalid date given")
-
- integer = Word(nums)
- date_str = integer + '/' + integer + '/' + integer
-
- # add parse actions
- integer.set_parse_action(convert_to_int)
- date_str.set_parse_action(is_valid_date)
-
- # note that integer fields are now ints, not strings
- date_str.run_tests('''
- # successful parse - note that integer fields were converted to ints
- 1999/12/31
-
- # fail - invalid date
- 1999/13/31
- ''')
- """
- if list(fns) == [None]:
- self.parseAction = []
- else:
- if not all(callable(fn) for fn in fns):
- raise TypeError("parse actions must be callable")
- self.parseAction = [_trim_arity(fn) for fn in fns]
- self.callDuringTry = kwargs.get(
- "call_during_try", kwargs.get("callDuringTry", False)
- )
- return self
-
- def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
- """
- Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
-
- See examples in :class:`copy`.
- """
- self.parseAction += [_trim_arity(fn) for fn in fns]
- self.callDuringTry = self.callDuringTry or kwargs.get(
- "call_during_try", kwargs.get("callDuringTry", False)
- )
- return self
-
- def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
- """Add a boolean predicate function to expression's list of parse actions. See
- :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
- functions passed to ``add_condition`` need to return boolean success/fail of the condition.
-
- Optional keyword arguments:
-
- - message = define a custom message to be used in the raised exception
- - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
- ParseException
- - call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
- default=False
-
- Example::
-
- integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
- year_int = integer.copy()
- year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
- date_str = year_int + '/' + integer + '/' + integer
-
- result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
- (line:1, col:1)
- """
- for fn in fns:
- self.parseAction.append(
- condition_as_parse_action(
- fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
- )
- )
-
- self.callDuringTry = self.callDuringTry or kwargs.get(
- "call_during_try", kwargs.get("callDuringTry", False)
- )
- return self
-
- def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
- """
- Define action to perform if parsing fails at this expression.
- Fail acton fn is a callable function that takes the arguments
- ``fn(s, loc, expr, err)`` where:
-
- - s = string being parsed
- - loc = location where expression match was attempted and failed
- - expr = the parse expression that failed
- - err = the exception thrown
-
- The function returns no value. It may throw :class:`ParseFatalException`
- if it is desired to stop parsing immediately."""
- self.failAction = fn
- return self
-
- def _skipIgnorables(self, instring, loc):
- exprsFound = True
- while exprsFound:
- exprsFound = False
- for e in self.ignoreExprs:
- try:
- while 1:
- loc, dummy = e._parse(instring, loc)
- exprsFound = True
- except ParseException:
- pass
- return loc
-
- def preParse(self, instring, loc):
- if self.ignoreExprs:
- loc = self._skipIgnorables(instring, loc)
-
- if self.skipWhitespace:
- instrlen = len(instring)
- white_chars = self.whiteChars
- while loc < instrlen and instring[loc] in white_chars:
- loc += 1
-
- return loc
-
- def parseImpl(self, instring, loc, doActions=True):
- return loc, []
-
- def postParse(self, instring, loc, tokenlist):
- return tokenlist
-
- # @profile
- def _parseNoCache(
- self, instring, loc, doActions=True, callPreParse=True
- ) -> Tuple[int, ParseResults]:
- TRY, MATCH, FAIL = 0, 1, 2
- debugging = self.debug # and doActions)
- len_instring = len(instring)
-
- if debugging or self.failAction:
- # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
- try:
- if callPreParse and self.callPreparse:
- pre_loc = self.preParse(instring, loc)
- else:
- pre_loc = loc
- tokens_start = pre_loc
- if self.debugActions.debug_try:
- self.debugActions.debug_try(instring, tokens_start, self, False)
- if self.mayIndexError or pre_loc >= len_instring:
- try:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
- except IndexError:
- raise ParseException(instring, len_instring, self.errmsg, self)
- else:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
- except Exception as err:
- # print("Exception raised:", err)
- if self.debugActions.debug_fail:
- self.debugActions.debug_fail(
- instring, tokens_start, self, err, False
- )
- if self.failAction:
- self.failAction(instring, tokens_start, self, err)
- raise
- else:
- if callPreParse and self.callPreparse:
- pre_loc = self.preParse(instring, loc)
- else:
- pre_loc = loc
- tokens_start = pre_loc
- if self.mayIndexError or pre_loc >= len_instring:
- try:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
- except IndexError:
- raise ParseException(instring, len_instring, self.errmsg, self)
- else:
- loc, tokens = self.parseImpl(instring, pre_loc, doActions)
-
- tokens = self.postParse(instring, loc, tokens)
-
- ret_tokens = ParseResults(
- tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
- )
- if self.parseAction and (doActions or self.callDuringTry):
- if debugging:
- try:
- for fn in self.parseAction:
- try:
- tokens = fn(instring, tokens_start, ret_tokens)
- except IndexError as parse_action_exc:
- exc = ParseException("exception raised in parse action")
- raise exc from parse_action_exc
-
- if tokens is not None and tokens is not ret_tokens:
- ret_tokens = ParseResults(
- tokens,
- self.resultsName,
- asList=self.saveAsList
- and isinstance(tokens, (ParseResults, list)),
- modal=self.modalResults,
- )
- except Exception as err:
- # print "Exception raised in user parse action:", err
- if self.debugActions.debug_fail:
- self.debugActions.debug_fail(
- instring, tokens_start, self, err, False
- )
- raise
- else:
- for fn in self.parseAction:
- try:
- tokens = fn(instring, tokens_start, ret_tokens)
- except IndexError as parse_action_exc:
- exc = ParseException("exception raised in parse action")
- raise exc from parse_action_exc
-
- if tokens is not None and tokens is not ret_tokens:
- ret_tokens = ParseResults(
- tokens,
- self.resultsName,
- asList=self.saveAsList
- and isinstance(tokens, (ParseResults, list)),
- modal=self.modalResults,
- )
- if debugging:
- # print("Matched", self, "->", ret_tokens.as_list())
- if self.debugActions.debug_match:
- self.debugActions.debug_match(
- instring, tokens_start, loc, self, ret_tokens, False
- )
-
- return loc, ret_tokens
-
- def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
- try:
- return self._parse(instring, loc, doActions=False)[0]
- except ParseFatalException:
- if raise_fatal:
- raise
- raise ParseException(instring, loc, self.errmsg, self)
-
- def can_parse_next(self, instring: str, loc: int) -> bool:
- try:
- self.try_parse(instring, loc)
- except (ParseException, IndexError):
- return False
- else:
- return True
-
- # cache for left-recursion in Forward references
- recursion_lock = RLock()
- recursion_memos: typing.Dict[
- Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
- ] = {}
-
- # argument cache for optimizing repeated calls when backtracking through recursive expressions
- packrat_cache = (
- {}
- ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
- packrat_cache_lock = RLock()
- packrat_cache_stats = [0, 0]
-
- # this method gets repeatedly called during backtracking with the same arguments -
- # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
- def _parseCache(
- self, instring, loc, doActions=True, callPreParse=True
- ) -> Tuple[int, ParseResults]:
- HIT, MISS = 0, 1
- TRY, MATCH, FAIL = 0, 1, 2
- lookup = (self, instring, loc, callPreParse, doActions)
- with ParserElement.packrat_cache_lock:
- cache = ParserElement.packrat_cache
- value = cache.get(lookup)
- if value is cache.not_in_cache:
- ParserElement.packrat_cache_stats[MISS] += 1
- try:
- value = self._parseNoCache(instring, loc, doActions, callPreParse)
- except ParseBaseException as pe:
- # cache a copy of the exception, without the traceback
- cache.set(lookup, pe.__class__(*pe.args))
- raise
- else:
- cache.set(lookup, (value[0], value[1].copy(), loc))
- return value
- else:
- ParserElement.packrat_cache_stats[HIT] += 1
- if self.debug and self.debugActions.debug_try:
- try:
- self.debugActions.debug_try(instring, loc, self, cache_hit=True)
- except TypeError:
- pass
- if isinstance(value, Exception):
- if self.debug and self.debugActions.debug_fail:
- try:
- self.debugActions.debug_fail(
- instring, loc, self, value, cache_hit=True
- )
- except TypeError:
- pass
- raise value
-
- loc_, result, endloc = value[0], value[1].copy(), value[2]
- if self.debug and self.debugActions.debug_match:
- try:
- self.debugActions.debug_match(
- instring, loc_, endloc, self, result, cache_hit=True
- )
- except TypeError:
- pass
-
- return loc_, result
-
- _parse = _parseNoCache
-
- @staticmethod
- def reset_cache() -> None:
- ParserElement.packrat_cache.clear()
- ParserElement.packrat_cache_stats[:] = [0] * len(
- ParserElement.packrat_cache_stats
- )
- ParserElement.recursion_memos.clear()
-
- _packratEnabled = False
- _left_recursion_enabled = False
-
- @staticmethod
- def disable_memoization() -> None:
- """
- Disables active Packrat or Left Recursion parsing and their memoization
-
- This method also works if neither Packrat nor Left Recursion are enabled.
- This makes it safe to call before activating Packrat nor Left Recursion
- to clear any previous settings.
- """
- ParserElement.reset_cache()
- ParserElement._left_recursion_enabled = False
- ParserElement._packratEnabled = False
- ParserElement._parse = ParserElement._parseNoCache
-
- @staticmethod
- def enable_left_recursion(
- cache_size_limit: typing.Optional[int] = None, *, force=False
- ) -> None:
- """
- Enables "bounded recursion" parsing, which allows for both direct and indirect
- left-recursion. During parsing, left-recursive :class:`Forward` elements are
- repeatedly matched with a fixed recursion depth that is gradually increased
- until finding the longest match.
-
- Example::
-
- import pyparsing as pp
- pp.ParserElement.enable_left_recursion()
-
- E = pp.Forward("E")
- num = pp.Word(pp.nums)
- # match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
- E <<= E + '+' - num | num
-
- print(E.parse_string("1+2+3"))
-
- Recursion search naturally memoizes matches of ``Forward`` elements and may
- thus skip reevaluation of parse actions during backtracking. This may break
- programs with parse actions which rely on strict ordering of side-effects.
-
- Parameters:
-
- - cache_size_limit - (default=``None``) - memoize at most this many
- ``Forward`` elements during matching; if ``None`` (the default),
- memoize all ``Forward`` elements.
-
- Bounded Recursion parsing works similar but not identical to Packrat parsing,
- thus the two cannot be used together. Use ``force=True`` to disable any
- previous, conflicting settings.
- """
- if force:
- ParserElement.disable_memoization()
- elif ParserElement._packratEnabled:
- raise RuntimeError("Packrat and Bounded Recursion are not compatible")
- if cache_size_limit is None:
- ParserElement.recursion_memos = _UnboundedMemo()
- elif cache_size_limit > 0:
- ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
- else:
- raise NotImplementedError("Memo size of %s" % cache_size_limit)
- ParserElement._left_recursion_enabled = True
-
- @staticmethod
- def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
- """
- Enables "packrat" parsing, which adds memoizing to the parsing logic.
- Repeated parse attempts at the same string location (which happens
- often in many complex grammars) can immediately return a cached value,
- instead of re-executing parsing/validating code. Memoizing is done of
- both valid results and parsing exceptions.
-
- Parameters:
-
- - cache_size_limit - (default= ``128``) - if an integer value is provided
- will limit the size of the packrat cache; if None is passed, then
- the cache size will be unbounded; if 0 is passed, the cache will
- be effectively disabled.
-
- This speedup may break existing programs that use parse actions that
- have side-effects. For this reason, packrat parsing is disabled when
- you first import pyparsing. To activate the packrat feature, your
- program must call the class method :class:`ParserElement.enable_packrat`.
- For best results, call ``enable_packrat()`` immediately after
- importing pyparsing.
-
- Example::
-
- import pyparsing
- pyparsing.ParserElement.enable_packrat()
-
- Packrat parsing works similar but not identical to Bounded Recursion parsing,
- thus the two cannot be used together. Use ``force=True`` to disable any
- previous, conflicting settings.
- """
- if force:
- ParserElement.disable_memoization()
- elif ParserElement._left_recursion_enabled:
- raise RuntimeError("Packrat and Bounded Recursion are not compatible")
- if not ParserElement._packratEnabled:
- ParserElement._packratEnabled = True
- if cache_size_limit is None:
- ParserElement.packrat_cache = _UnboundedCache()
- else:
- ParserElement.packrat_cache = _FifoCache(cache_size_limit)
- ParserElement._parse = ParserElement._parseCache
-
- def parse_string(
- self, instring: str, parse_all: bool = False, *, parseAll: bool = False
- ) -> ParseResults:
- """
- Parse a string with respect to the parser definition. This function is intended as the primary interface to the
- client code.
-
- :param instring: The input string to be parsed.
- :param parse_all: If set, the entire input string must match the grammar.
- :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
- :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
- :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
- an object with attributes if the given parser includes results names.
-
- If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
- is also equivalent to ending the grammar with :class:`StringEnd`().
-
- To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
- converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
- contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
- being parsed, one can ensure a consistent view of the input string by doing one of the following:
-
- - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
- - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
- parse action's ``s`` argument, or
- - explicitly expand the tabs in your input string before calling ``parse_string``.
-
- Examples:
-
- By default, partial matches are OK.
-
- >>> res = Word('a').parse_string('aaaaabaaa')
- >>> print(res)
- ['aaaaa']
-
- The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
- directly to see more examples.
-
- It raises an exception if parse_all flag is set and instring does not match the whole grammar.
-
- >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
- Traceback (most recent call last):
- ...
- pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
- """
- parseAll = parse_all or parseAll
-
- ParserElement.reset_cache()
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
- if not self.keepTabs:
- instring = instring.expandtabs()
- try:
- loc, tokens = self._parse(instring, 0)
- if parseAll:
- loc = self.preParse(instring, loc)
- se = Empty() + StringEnd()
- se._parse(instring, loc)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clearing out pyparsing internal stack trace
- raise exc.with_traceback(None)
- else:
- return tokens
-
- def scan_string(
- self,
- instring: str,
- max_matches: int = _MAX_INT,
- overlap: bool = False,
- *,
- debug: bool = False,
- maxMatches: int = _MAX_INT,
- ) -> Generator[Tuple[ParseResults, int, int], None, None]:
- """
- Scan the input string for expression matches. Each match will return the
- matching tokens, start location, and end location. May be called with optional
- ``max_matches`` argument, to clip scanning after 'n' matches are found. If
- ``overlap`` is specified, then overlapping matches will be reported.
-
- Note that the start and end locations are reported relative to the string
- being parsed. See :class:`parse_string` for more information on parsing
- strings with embedded tabs.
-
- Example::
-
- source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
- print(source)
- for tokens, start, end in Word(alphas).scan_string(source):
- print(' '*start + '^'*(end-start))
- print(' '*start + tokens[0])
-
- prints::
-
- sldjf123lsdjjkf345sldkjf879lkjsfd987
- ^^^^^
- sldjf
- ^^^^^^^
- lsdjjkf
- ^^^^^^
- sldkjf
- ^^^^^^
- lkjsfd
- """
- maxMatches = min(maxMatches, max_matches)
- if not self.streamlined:
- self.streamline()
- for e in self.ignoreExprs:
- e.streamline()
-
- if not self.keepTabs:
- instring = str(instring).expandtabs()
- instrlen = len(instring)
- loc = 0
- preparseFn = self.preParse
- parseFn = self._parse
- ParserElement.resetCache()
- matches = 0
- try:
- while loc <= instrlen and matches < maxMatches:
- try:
- preloc = preparseFn(instring, loc)
- nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
- except ParseException:
- loc = preloc + 1
- else:
- if nextLoc > loc:
- matches += 1
- if debug:
- print(
- {
- "tokens": tokens.asList(),
- "start": preloc,
- "end": nextLoc,
- }
- )
- yield tokens, preloc, nextLoc
- if overlap:
- nextloc = preparseFn(instring, loc)
- if nextloc > loc:
- loc = nextLoc
- else:
- loc += 1
- else:
- loc = nextLoc
- else:
- loc = preloc + 1
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def transform_string(self, instring: str, *, debug: bool = False) -> str:
- """
- Extension to :class:`scan_string`, to modify matching text with modified tokens that may
- be returned from a parse action. To use ``transform_string``, define a grammar and
- attach a parse action to it that modifies the returned token list.
- Invoking ``transform_string()`` on a target string will then scan for matches,
- and replace the matched text patterns according to the logic in the parse
- action. ``transform_string()`` returns the resulting transformed string.
-
- Example::
-
- wd = Word(alphas)
- wd.set_parse_action(lambda toks: toks[0].title())
-
- print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
-
- prints::
-
- Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
- """
- out: List[str] = []
- lastE = 0
- # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
- # keep string locs straight between transform_string and scan_string
- self.keepTabs = True
- try:
- for t, s, e in self.scan_string(instring, debug=debug):
- out.append(instring[lastE:s])
- if t:
- if isinstance(t, ParseResults):
- out += t.as_list()
- elif isinstance(t, Iterable) and not isinstance(t, str_type):
- out.extend(t)
- else:
- out.append(t)
- lastE = e
- out.append(instring[lastE:])
- out = [o for o in out if o]
- return "".join([str(s) for s in _flatten(out)])
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def search_string(
- self,
- instring: str,
- max_matches: int = _MAX_INT,
- *,
- debug: bool = False,
- maxMatches: int = _MAX_INT,
- ) -> ParseResults:
- """
- Another extension to :class:`scan_string`, simplifying the access to the tokens found
- to match the given parse expression. May be called with optional
- ``max_matches`` argument, to clip searching after 'n' matches are found.
-
- Example::
-
- # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
- cap_word = Word(alphas.upper(), alphas.lower())
-
- print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
-
- # the sum() builtin can be used to merge results into a single ParseResults object
- print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
-
- prints::
-
- [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
- ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
- """
- maxMatches = min(maxMatches, max_matches)
- try:
- return ParseResults(
- [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
- )
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def split(
- self,
- instring: str,
- maxsplit: int = _MAX_INT,
- include_separators: bool = False,
- *,
- includeSeparators=False,
- ) -> Generator[str, None, None]:
- """
- Generator method to split a string using the given expression as a separator.
- May be called with optional ``maxsplit`` argument, to limit the number of splits;
- and the optional ``include_separators`` argument (default= ``False``), if the separating
- matching text should be included in the split results.
-
- Example::
-
- punc = one_of(list(".,;:/-!?"))
- print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
-
- prints::
-
- ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
- """
- includeSeparators = includeSeparators or include_separators
- last = 0
- for t, s, e in self.scan_string(instring, max_matches=maxsplit):
- yield instring[last:s]
- if includeSeparators:
- yield t[0]
- last = e
- yield instring[last:]
-
- def __add__(self, other) -> "ParserElement":
- """
- Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
- converts them to :class:`Literal`s by default.
-
- Example::
-
- greet = Word(alphas) + "," + Word(alphas) + "!"
- hello = "Hello, World!"
- print(hello, "->", greet.parse_string(hello))
-
- prints::
-
- Hello, World! -> ['Hello', ',', 'World', '!']
-
- ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
-
- Literal('start') + ... + Literal('end')
-
- is equivalent to:
-
- Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
-
- Note that the skipped text is returned with '_skipped' as a results name,
- and to support having multiple skips in the same parser, the value returned is
- a list of all skipped text.
- """
- if other is Ellipsis:
- return _PendingSkip(self)
-
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return And([self, other])
-
- def __radd__(self, other) -> "ParserElement":
- """
- Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
- """
- if other is Ellipsis:
- return SkipTo(self)("_skipped*") + self
-
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other + self
-
- def __sub__(self, other) -> "ParserElement":
- """
- Implementation of ``-`` operator, returns :class:`And` with error stop
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return self + And._ErrorStop() + other
-
- def __rsub__(self, other) -> "ParserElement":
- """
- Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other - self
-
- def __mul__(self, other) -> "ParserElement":
- """
- Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
- ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
- tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
- may also include ``None`` as in:
- - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
- to ``expr*n + ZeroOrMore(expr)``
- (read as "at least n instances of ``expr``")
- - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
- (read as "0 to n instances of ``expr``")
- - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
- - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
-
- Note that ``expr*(None, n)`` does not raise an exception if
- more than n exprs exist in the input stream; that is,
- ``expr*(None, n)`` does not enforce a maximum number of expr
- occurrences. If this behavior is desired, then write
- ``expr*(None, n) + ~expr``
- """
- if other is Ellipsis:
- other = (0, None)
- elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
- other = ((0,) + other[1:] + (None,))[:2]
-
- if isinstance(other, int):
- minElements, optElements = other, 0
- elif isinstance(other, tuple):
- other = tuple(o if o is not Ellipsis else None for o in other)
- other = (other + (None, None))[:2]
- if other[0] is None:
- other = (0, other[1])
- if isinstance(other[0], int) and other[1] is None:
- if other[0] == 0:
- return ZeroOrMore(self)
- if other[0] == 1:
- return OneOrMore(self)
- else:
- return self * other[0] + ZeroOrMore(self)
- elif isinstance(other[0], int) and isinstance(other[1], int):
- minElements, optElements = other
- optElements -= minElements
- else:
- raise TypeError(
- "cannot multiply ParserElement and ({}) objects".format(
- ",".join(type(item).__name__ for item in other)
- )
- )
- else:
- raise TypeError(
- "cannot multiply ParserElement and {} objects".format(
- type(other).__name__
- )
- )
-
- if minElements < 0:
- raise ValueError("cannot multiply ParserElement by negative value")
- if optElements < 0:
- raise ValueError(
- "second tuple value must be greater or equal to first tuple value"
- )
- if minElements == optElements == 0:
- return And([])
-
- if optElements:
-
- def makeOptionalList(n):
- if n > 1:
- return Opt(self + makeOptionalList(n - 1))
- else:
- return Opt(self)
-
- if minElements:
- if minElements == 1:
- ret = self + makeOptionalList(optElements)
- else:
- ret = And([self] * minElements) + makeOptionalList(optElements)
- else:
- ret = makeOptionalList(optElements)
- else:
- if minElements == 1:
- ret = self
- else:
- ret = And([self] * minElements)
- return ret
-
- def __rmul__(self, other) -> "ParserElement":
- return self.__mul__(other)
-
- def __or__(self, other) -> "ParserElement":
- """
- Implementation of ``|`` operator - returns :class:`MatchFirst`
- """
- if other is Ellipsis:
- return _PendingSkip(self, must_skip=True)
-
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return MatchFirst([self, other])
-
- def __ror__(self, other) -> "ParserElement":
- """
- Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other | self
-
- def __xor__(self, other) -> "ParserElement":
- """
- Implementation of ``^`` operator - returns :class:`Or`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return Or([self, other])
-
- def __rxor__(self, other) -> "ParserElement":
- """
- Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other ^ self
-
- def __and__(self, other) -> "ParserElement":
- """
- Implementation of ``&`` operator - returns :class:`Each`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return Each([self, other])
-
- def __rand__(self, other) -> "ParserElement":
- """
- Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
- """
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- if not isinstance(other, ParserElement):
- raise TypeError(
- "Cannot combine element of type {} with ParserElement".format(
- type(other).__name__
- )
- )
- return other & self
-
- def __invert__(self) -> "ParserElement":
- """
- Implementation of ``~`` operator - returns :class:`NotAny`
- """
- return NotAny(self)
-
- # disable __iter__ to override legacy use of sequential access to __getitem__ to
- # iterate over a sequence
- __iter__ = None
-
- def __getitem__(self, key):
- """
- use ``[]`` indexing notation as a short form for expression repetition:
-
- - ``expr[n]`` is equivalent to ``expr*n``
- - ``expr[m, n]`` is equivalent to ``expr*(m, n)``
- - ``expr[n, ...]`` or ``expr[n,]`` is equivalent
- to ``expr*n + ZeroOrMore(expr)``
- (read as "at least n instances of ``expr``")
- - ``expr[..., n]`` is equivalent to ``expr*(0, n)``
- (read as "0 to n instances of ``expr``")
- - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
- - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
-
- ``None`` may be used in place of ``...``.
-
- Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
- if more than ``n`` ``expr``s exist in the input stream. If this behavior is
- desired, then write ``expr[..., n] + ~expr``.
- """
-
- # convert single arg keys to tuples
- try:
- if isinstance(key, str_type):
- key = (key,)
- iter(key)
- except TypeError:
- key = (key, key)
-
- if len(key) > 2:
- raise TypeError(
- "only 1 or 2 index arguments supported ({}{})".format(
- key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
- )
- )
-
- # clip to 2 elements
- ret = self * tuple(key[:2])
- return ret
-
- def __call__(self, name: str = None) -> "ParserElement":
- """
- Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
-
- If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
- passed as ``True``.
-
- If ``name` is omitted, same as calling :class:`copy`.
-
- Example::
-
- # these are equivalent
- userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
- userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
- """
- if name is not None:
- return self._setResultsName(name)
- else:
- return self.copy()
-
- def suppress(self) -> "ParserElement":
- """
- Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
- cluttering up returned output.
- """
- return Suppress(self)
-
- def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
- """
- Enables the skipping of whitespace before matching the characters in the
- :class:`ParserElement`'s defined pattern.
-
- :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
- """
- self.skipWhitespace = True
- return self
-
- def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
- """
- Disables the skipping of whitespace before matching the characters in the
- :class:`ParserElement`'s defined pattern. This is normally only used internally by
- the pyparsing module, but may be needed in some whitespace-sensitive grammars.
-
- :param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
- """
- self.skipWhitespace = False
- return self
-
- def set_whitespace_chars(
- self, chars: Union[Set[str], str], copy_defaults: bool = False
- ) -> "ParserElement":
- """
- Overrides the default whitespace chars
- """
- self.skipWhitespace = True
- self.whiteChars = set(chars)
- self.copyDefaultWhiteChars = copy_defaults
- return self
-
- def parse_with_tabs(self) -> "ParserElement":
- """
- Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.
- Must be called before ``parse_string`` when the input grammar contains elements that
- match ``<TAB>`` characters.
- """
- self.keepTabs = True
- return self
-
- def ignore(self, other: "ParserElement") -> "ParserElement":
- """
- Define expression to be ignored (e.g., comments) while doing pattern
- matching; may be called repeatedly, to define multiple comment or other
- ignorable patterns.
-
- Example::
-
- patt = Word(alphas)[1, ...]
- patt.parse_string('ablaj /* comment */ lskjd')
- # -> ['ablaj']
-
- patt.ignore(c_style_comment)
- patt.parse_string('ablaj /* comment */ lskjd')
- # -> ['ablaj', 'lskjd']
- """
- import typing
-
- if isinstance(other, str_type):
- other = Suppress(other)
-
- if isinstance(other, Suppress):
- if other not in self.ignoreExprs:
- self.ignoreExprs.append(other)
- else:
- self.ignoreExprs.append(Suppress(other.copy()))
- return self
-
- def set_debug_actions(
- self,
- start_action: DebugStartAction,
- success_action: DebugSuccessAction,
- exception_action: DebugExceptionAction,
- ) -> "ParserElement":
- """
- Customize display of debugging messages while doing pattern matching:
-
- - ``start_action`` - method to be called when an expression is about to be parsed;
- should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
-
- - ``success_action`` - method to be called when an expression has successfully parsed;
- should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
-
- - ``exception_action`` - method to be called when expression fails to parse;
- should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
- """
- self.debugActions = self.DebugActions(
- start_action or _default_start_debug_action,
- success_action or _default_success_debug_action,
- exception_action or _default_exception_debug_action,
- )
- self.debug = True
- return self
-
- def set_debug(self, flag: bool = True) -> "ParserElement":
- """
- Enable display of debugging messages while doing pattern matching.
- Set ``flag`` to ``True`` to enable, ``False`` to disable.
-
- Example::
-
- wd = Word(alphas).set_name("alphaword")
- integer = Word(nums).set_name("numword")
- term = wd | integer
-
- # turn on debugging for wd
- wd.set_debug()
-
- term[1, ...].parse_string("abc 123 xyz 890")
-
- prints::
-
- Match alphaword at loc 0(1,1)
- Matched alphaword -> ['abc']
- Match alphaword at loc 3(1,4)
- Exception raised:Expected alphaword (at char 4), (line:1, col:5)
- Match alphaword at loc 7(1,8)
- Matched alphaword -> ['xyz']
- Match alphaword at loc 11(1,12)
- Exception raised:Expected alphaword (at char 12), (line:1, col:13)
- Match alphaword at loc 15(1,16)
- Exception raised:Expected alphaword (at char 15), (line:1, col:16)
-
- The output shown is that produced by the default debug actions - custom debug actions can be
- specified using :class:`set_debug_actions`. Prior to attempting
- to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
- is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
- message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
- which makes debugging and exception messages easier to understand - for instance, the default
- name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
- """
- if flag:
- self.set_debug_actions(
- _default_start_debug_action,
- _default_success_debug_action,
- _default_exception_debug_action,
- )
- else:
- self.debug = False
- return self
-
- @property
- def default_name(self) -> str:
- if self._defaultName is None:
- self._defaultName = self._generateDefaultName()
- return self._defaultName
-
- @abstractmethod
- def _generateDefaultName(self):
- """
- Child classes must define this method, which defines how the ``default_name`` is set.
- """
-
- def set_name(self, name: str) -> "ParserElement":
- """
- Define name for this expression, makes debugging and exception messages clearer.
- Example::
- Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
- Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
- """
- self.customName = name
- self.errmsg = "Expected " + self.name
- if __diag__.enable_debug_on_named_expressions:
- self.set_debug()
- return self
-
- @property
- def name(self) -> str:
- # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
- return self.customName if self.customName is not None else self.default_name
-
- def __str__(self) -> str:
- return self.name
-
- def __repr__(self) -> str:
- return str(self)
-
- def streamline(self) -> "ParserElement":
- self.streamlined = True
- self._defaultName = None
- return self
-
- def recurse(self) -> Sequence["ParserElement"]:
- return []
-
- def _checkRecursion(self, parseElementList):
- subRecCheckList = parseElementList[:] + [self]
- for e in self.recurse():
- e._checkRecursion(subRecCheckList)
-
- def validate(self, validateTrace=None) -> None:
- """
- Check defined expressions for valid structure, check for infinite recursive definitions.
- """
- self._checkRecursion([])
-
- def parse_file(
- self,
- file_or_filename: Union[str, Path, TextIO],
- encoding: str = "utf-8",
- parse_all: bool = False,
- *,
- parseAll: bool = False,
- ) -> ParseResults:
- """
- Execute the parse expression on the given file or filename.
- If a filename is specified (instead of a file object),
- the entire file is opened, read, and closed before parsing.
- """
- parseAll = parseAll or parse_all
- try:
- file_contents = file_or_filename.read()
- except AttributeError:
- with open(file_or_filename, "r", encoding=encoding) as f:
- file_contents = f.read()
- try:
- return self.parse_string(file_contents, parseAll)
- except ParseBaseException as exc:
- if ParserElement.verbose_stacktrace:
- raise
- else:
- # catch and re-raise exception from here, clears out pyparsing internal stack trace
- raise exc.with_traceback(None)
-
- def __eq__(self, other):
- if self is other:
- return True
- elif isinstance(other, str_type):
- return self.matches(other, parse_all=True)
- elif isinstance(other, ParserElement):
- return vars(self) == vars(other)
- return False
-
- def __hash__(self):
- return id(self)
-
- def matches(
- self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
- ) -> bool:
- """
- Method for quick testing of a parser against a test string. Good for simple
- inline microtests of sub expressions while building up larger parser.
-
- Parameters:
- - ``test_string`` - to test against this expression for a match
- - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
-
- Example::
-
- expr = Word(nums)
- assert expr.matches("100")
- """
- parseAll = parseAll and parse_all
- try:
- self.parse_string(str(test_string), parse_all=parseAll)
- return True
- except ParseBaseException:
- return False
-
- def run_tests(
- self,
- tests: Union[str, List[str]],
- parse_all: bool = True,
- comment: typing.Optional[Union["ParserElement", str]] = "#",
- full_dump: bool = True,
- print_results: bool = True,
- failure_tests: bool = False,
- post_parse: Callable[[str, ParseResults], str] = None,
- file: typing.Optional[TextIO] = None,
- with_line_numbers: bool = False,
- *,
- parseAll: bool = True,
- fullDump: bool = True,
- printResults: bool = True,
- failureTests: bool = False,
- postParse: Callable[[str, ParseResults], str] = None,
- ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]:
- """
- Execute the parse expression on a series of test strings, showing each
- test, the parsed results or where the parse failed. Quick and easy way to
- run a parse expression against a list of sample strings.
-
- Parameters:
- - ``tests`` - a list of separate test strings, or a multiline string of test strings
- - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
- - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
- string; pass None to disable comment filtering
- - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
- if False, only dump nested list
- - ``print_results`` - (default= ``True``) prints test output to stdout
- - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
- - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
- `fn(test_string, parse_results)` and returns a string to be added to the test output
- - ``file`` - (default= ``None``) optional file-like object to which test output will be written;
- if None, will default to ``sys.stdout``
- - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
-
- Returns: a (success, results) tuple, where success indicates that all tests succeeded
- (or failed if ``failure_tests`` is True), and the results contain a list of lines of each
- test's output
-
- Example::
-
- number_expr = pyparsing_common.number.copy()
-
- result = number_expr.run_tests('''
- # unsigned integer
- 100
- # negative integer
- -100
- # float with scientific notation
- 6.02e23
- # integer with scientific notation
- 1e-12
- ''')
- print("Success" if result[0] else "Failed!")
-
- result = number_expr.run_tests('''
- # stray character
- 100Z
- # missing leading digit before '.'
- -.100
- # too many '.'
- 3.14.159
- ''', failure_tests=True)
- print("Success" if result[0] else "Failed!")
-
- prints::
-
- # unsigned integer
- 100
- [100]
-
- # negative integer
- -100
- [-100]
-
- # float with scientific notation
- 6.02e23
- [6.02e+23]
-
- # integer with scientific notation
- 1e-12
- [1e-12]
-
- Success
-
- # stray character
- 100Z
- ^
- FAIL: Expected end of text (at char 3), (line:1, col:4)
-
- # missing leading digit before '.'
- -.100
- ^
- FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
-
- # too many '.'
- 3.14.159
- ^
- FAIL: Expected end of text (at char 4), (line:1, col:5)
-
- Success
-
- Each test string must be on a single line. If you want to test a string that spans multiple
- lines, create a test like this::
-
- expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
-
- (Note that this is a raw string literal, you must include the leading ``'r'``.)
- """
- from .testing import pyparsing_test
-
- parseAll = parseAll and parse_all
- fullDump = fullDump and full_dump
- printResults = printResults and print_results
- failureTests = failureTests or failure_tests
- postParse = postParse or post_parse
- if isinstance(tests, str_type):
- line_strip = type(tests).strip
- tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
- if isinstance(comment, str_type):
- comment = Literal(comment)
- if file is None:
- file = sys.stdout
- print_ = file.write
-
- result: Union[ParseResults, Exception]
- allResults = []
- comments = []
- success = True
- NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
- BOM = "\ufeff"
- for t in tests:
- if comment is not None and comment.matches(t, False) or comments and not t:
- comments.append(
- pyparsing_test.with_line_numbers(t) if with_line_numbers else t
- )
- continue
- if not t:
- continue
- out = [
- "\n" + "\n".join(comments) if comments else "",
- pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
- ]
- comments = []
- try:
- # convert newline marks to actual newlines, and strip leading BOM if present
- t = NL.transform_string(t.lstrip(BOM))
- result = self.parse_string(t, parse_all=parseAll)
- except ParseBaseException as pe:
- fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
- out.append(pe.explain())
- out.append("FAIL: " + str(pe))
- if ParserElement.verbose_stacktrace:
- out.extend(traceback.format_tb(pe.__traceback__))
- success = success and failureTests
- result = pe
- except Exception as exc:
- out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
- if ParserElement.verbose_stacktrace:
- out.extend(traceback.format_tb(exc.__traceback__))
- success = success and failureTests
- result = exc
- else:
- success = success and not failureTests
- if postParse is not None:
- try:
- pp_value = postParse(t, result)
- if pp_value is not None:
- if isinstance(pp_value, ParseResults):
- out.append(pp_value.dump())
- else:
- out.append(str(pp_value))
- else:
- out.append(result.dump())
- except Exception as e:
- out.append(result.dump(full=fullDump))
- out.append(
- "{} failed: {}: {}".format(
- postParse.__name__, type(e).__name__, e
- )
- )
- else:
- out.append(result.dump(full=fullDump))
- out.append("")
-
- if printResults:
- print_("\n".join(out))
-
- allResults.append((t, result))
-
- return success, allResults
-
- def create_diagram(
- self,
- output_html: Union[TextIO, Path, str],
- vertical: int = 3,
- show_results_names: bool = False,
- show_groups: bool = False,
- **kwargs,
- ) -> None:
- """
- Create a railroad diagram for the parser.
-
- Parameters:
- - output_html (str or file-like object) - output target for generated
- diagram HTML
- - vertical (int) - threshold for formatting multiple alternatives vertically
- instead of horizontally (default=3)
- - show_results_names - bool flag whether diagram should show annotations for
- defined results names
- - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box
- Additional diagram-formatting keyword arguments can also be included;
- see railroad.Diagram class.
- """
-
- try:
- from .diagram import to_railroad, railroad_to_html
- except ImportError as ie:
- raise Exception(
- "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
- ) from ie
-
- self.streamline()
-
- railroad = to_railroad(
- self,
- vertical=vertical,
- show_results_names=show_results_names,
- show_groups=show_groups,
- diagram_kwargs=kwargs,
- )
- if isinstance(output_html, (str, Path)):
- with open(output_html, "w", encoding="utf-8") as diag_file:
- diag_file.write(railroad_to_html(railroad))
- else:
- # we were passed a file-like object, just write to it
- output_html.write(railroad_to_html(railroad))
-
- setDefaultWhitespaceChars = set_default_whitespace_chars
- inlineLiteralsUsing = inline_literals_using
- setResultsName = set_results_name
- setBreak = set_break
- setParseAction = set_parse_action
- addParseAction = add_parse_action
- addCondition = add_condition
- setFailAction = set_fail_action
- tryParse = try_parse
- canParseNext = can_parse_next
- resetCache = reset_cache
- enableLeftRecursion = enable_left_recursion
- enablePackrat = enable_packrat
- parseString = parse_string
- scanString = scan_string
- searchString = search_string
- transformString = transform_string
- setWhitespaceChars = set_whitespace_chars
- parseWithTabs = parse_with_tabs
- setDebugActions = set_debug_actions
- setDebug = set_debug
- defaultName = default_name
- setName = set_name
- parseFile = parse_file
- runTests = run_tests
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class _PendingSkip(ParserElement):
- # internal placeholder class to hold a place were '...' is added to a parser element,
- # once another ParserElement is added, this placeholder will be replaced with a SkipTo
- def __init__(self, expr: ParserElement, must_skip: bool = False):
- super().__init__()
- self.anchor = expr
- self.must_skip = must_skip
-
- def _generateDefaultName(self):
- return str(self.anchor + Empty()).replace("Empty", "...")
-
- def __add__(self, other) -> "ParserElement":
- skipper = SkipTo(other).set_name("...")("_skipped*")
- if self.must_skip:
-
- def must_skip(t):
- if not t._skipped or t._skipped.as_list() == [""]:
- del t[0]
- t.pop("_skipped", None)
-
- def show_skip(t):
- if t._skipped.as_list()[-1:] == [""]:
- t.pop("_skipped")
- t["_skipped"] = "missing <" + repr(self.anchor) + ">"
-
- return (
- self.anchor + skipper().add_parse_action(must_skip)
- | skipper().add_parse_action(show_skip)
- ) + other
-
- return self.anchor + skipper + other
-
- def __repr__(self):
- return self.defaultName
-
- def parseImpl(self, *args):
- raise Exception(
- "use of `...` expression without following SkipTo target expression"
- )
-
-
-class Token(ParserElement):
- """Abstract :class:`ParserElement` subclass, for defining atomic
- matching patterns.
- """
-
- def __init__(self):
- super().__init__(savelist=False)
-
- def _generateDefaultName(self):
- return type(self).__name__
-
-
-class Empty(Token):
- """
- An empty token, will always match.
- """
-
- def __init__(self):
- super().__init__()
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class NoMatch(Token):
- """
- A token that will never match.
- """
-
- def __init__(self):
- super().__init__()
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.errmsg = "Unmatchable token"
-
- def parseImpl(self, instring, loc, doActions=True):
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Literal(Token):
- """
- Token to exactly match a specified string.
-
- Example::
-
- Literal('blah').parse_string('blah') # -> ['blah']
- Literal('blah').parse_string('blahfooblah') # -> ['blah']
- Literal('blah').parse_string('bla') # -> Exception: Expected "blah"
-
- For case-insensitive matching, use :class:`CaselessLiteral`.
-
- For keyword matching (force word break before and after the matched string),
- use :class:`Keyword` or :class:`CaselessKeyword`.
- """
-
- def __init__(self, match_string: str = "", *, matchString: str = ""):
- super().__init__()
- match_string = matchString or match_string
- self.match = match_string
- self.matchLen = len(match_string)
- try:
- self.firstMatchChar = match_string[0]
- except IndexError:
- raise ValueError("null string passed to Literal; use Empty() instead")
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = False
- self.mayIndexError = False
-
- # Performance tuning: modify __class__ to select
- # a parseImpl optimized for single-character check
- if self.matchLen == 1 and type(self) is Literal:
- self.__class__ = _SingleCharLiteral
-
- def _generateDefaultName(self):
- return repr(self.match)
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] == self.firstMatchChar and instring.startswith(
- self.match, loc
- ):
- return loc + self.matchLen, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class _SingleCharLiteral(Literal):
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] == self.firstMatchChar:
- return loc + 1, self.match
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-ParserElement._literalStringClass = Literal
-
-
-class Keyword(Token):
- """
- Token to exactly match a specified string as a keyword, that is,
- it must be immediately followed by a non-keyword character. Compare
- with :class:`Literal`:
-
- - ``Literal("if")`` will match the leading ``'if'`` in
- ``'ifAndOnlyIf'``.
- - ``Keyword("if")`` will not; it will only match the leading
- ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
-
- Accepts two optional constructor arguments in addition to the
- keyword string:
-
- - ``identChars`` is a string of characters that would be valid
- identifier characters, defaulting to all alphanumerics + "_" and
- "$"
- - ``caseless`` allows case-insensitive matching, default is ``False``.
-
- Example::
-
- Keyword("start").parse_string("start") # -> ['start']
- Keyword("start").parse_string("starting") # -> Exception
-
- For case-insensitive matching, use :class:`CaselessKeyword`.
- """
-
- DEFAULT_KEYWORD_CHARS = alphanums + "_$"
-
- def __init__(
- self,
- match_string: str = "",
- ident_chars: typing.Optional[str] = None,
- caseless: bool = False,
- *,
- matchString: str = "",
- identChars: typing.Optional[str] = None,
- ):
- super().__init__()
- identChars = identChars or ident_chars
- if identChars is None:
- identChars = Keyword.DEFAULT_KEYWORD_CHARS
- match_string = matchString or match_string
- self.match = match_string
- self.matchLen = len(match_string)
- try:
- self.firstMatchChar = match_string[0]
- except IndexError:
- raise ValueError("null string passed to Keyword; use Empty() instead")
- self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
- self.mayReturnEmpty = False
- self.mayIndexError = False
- self.caseless = caseless
- if caseless:
- self.caselessmatch = match_string.upper()
- identChars = identChars.upper()
- self.identChars = set(identChars)
-
- def _generateDefaultName(self):
- return repr(self.match)
-
- def parseImpl(self, instring, loc, doActions=True):
- errmsg = self.errmsg
- errloc = loc
- if self.caseless:
- if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
- if loc == 0 or instring[loc - 1].upper() not in self.identChars:
- if (
- loc >= len(instring) - self.matchLen
- or instring[loc + self.matchLen].upper() not in self.identChars
- ):
- return loc + self.matchLen, self.match
- else:
- # followed by keyword char
- errmsg += ", was immediately followed by keyword character"
- errloc = loc + self.matchLen
- else:
- # preceded by keyword char
- errmsg += ", keyword was immediately preceded by keyword character"
- errloc = loc - 1
- # else no match just raise plain exception
-
- else:
- if (
- instring[loc] == self.firstMatchChar
- and self.matchLen == 1
- or instring.startswith(self.match, loc)
- ):
- if loc == 0 or instring[loc - 1] not in self.identChars:
- if (
- loc >= len(instring) - self.matchLen
- or instring[loc + self.matchLen] not in self.identChars
- ):
- return loc + self.matchLen, self.match
- else:
- # followed by keyword char
- errmsg += (
- ", keyword was immediately followed by keyword character"
- )
- errloc = loc + self.matchLen
- else:
- # preceded by keyword char
- errmsg += ", keyword was immediately preceded by keyword character"
- errloc = loc - 1
- # else no match just raise plain exception
-
- raise ParseException(instring, errloc, errmsg, self)
-
- @staticmethod
- def set_default_keyword_chars(chars) -> None:
- """
- Overrides the default characters used by :class:`Keyword` expressions.
- """
- Keyword.DEFAULT_KEYWORD_CHARS = chars
-
- setDefaultKeywordChars = set_default_keyword_chars
-
-
-class CaselessLiteral(Literal):
- """
- Token to match a specified string, ignoring case of letters.
- Note: the matched results will always be in the case of the given
- match string, NOT the case of the input text.
-
- Example::
-
- CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
- # -> ['CMD', 'CMD', 'CMD']
-
- (Contrast with example for :class:`CaselessKeyword`.)
- """
-
- def __init__(self, match_string: str = "", *, matchString: str = ""):
- match_string = matchString or match_string
- super().__init__(match_string.upper())
- # Preserve the defining literal.
- self.returnString = match_string
- self.errmsg = "Expected " + self.name
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc : loc + self.matchLen].upper() == self.match:
- return loc + self.matchLen, self.returnString
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class CaselessKeyword(Keyword):
- """
- Caseless version of :class:`Keyword`.
-
- Example::
-
- CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
- # -> ['CMD', 'CMD']
-
- (Contrast with example for :class:`CaselessLiteral`.)
- """
-
- def __init__(
- self,
- match_string: str = "",
- ident_chars: typing.Optional[str] = None,
- *,
- matchString: str = "",
- identChars: typing.Optional[str] = None,
- ):
- identChars = identChars or ident_chars
- match_string = matchString or match_string
- super().__init__(match_string, identChars, caseless=True)
-
-
-class CloseMatch(Token):
- """A variation on :class:`Literal` which matches "close" matches,
- that is, strings with at most 'n' mismatching characters.
- :class:`CloseMatch` takes parameters:
-
- - ``match_string`` - string to be matched
- - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
- - ``max_mismatches`` - (``default=1``) maximum number of
- mismatches allowed to count as a match
-
- The results from a successful parse will contain the matched text
- from the input string and the following named results:
-
- - ``mismatches`` - a list of the positions within the
- match_string where mismatches were found
- - ``original`` - the original match_string used to compare
- against the input string
-
- If ``mismatches`` is an empty list, then the match was an exact
- match.
-
- Example::
-
- patt = CloseMatch("ATCATCGAATGGA")
- patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
- patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
-
- # exact match
- patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
-
- # close match allowing up to 2 mismatches
- patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
- patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
- """
-
- def __init__(
- self,
- match_string: str,
- max_mismatches: int = None,
- *,
- maxMismatches: int = 1,
- caseless=False,
- ):
- maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
- super().__init__()
- self.match_string = match_string
- self.maxMismatches = maxMismatches
- self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
- self.match_string, self.maxMismatches
- )
- self.caseless = caseless
- self.mayIndexError = False
- self.mayReturnEmpty = False
-
- def _generateDefaultName(self):
- return "{}:{!r}".format(type(self).__name__, self.match_string)
-
- def parseImpl(self, instring, loc, doActions=True):
- start = loc
- instrlen = len(instring)
- maxloc = start + len(self.match_string)
-
- if maxloc <= instrlen:
- match_string = self.match_string
- match_stringloc = 0
- mismatches = []
- maxMismatches = self.maxMismatches
-
- for match_stringloc, s_m in enumerate(
- zip(instring[loc:maxloc], match_string)
- ):
- src, mat = s_m
- if self.caseless:
- src, mat = src.lower(), mat.lower()
-
- if src != mat:
- mismatches.append(match_stringloc)
- if len(mismatches) > maxMismatches:
- break
- else:
- loc = start + match_stringloc + 1
- results = ParseResults([instring[start:loc]])
- results["original"] = match_string
- results["mismatches"] = mismatches
- return loc, results
-
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class Word(Token):
- """Token for matching words composed of allowed character sets.
- Parameters:
- - ``init_chars`` - string of all characters that should be used to
- match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
- if ``body_chars`` is also specified, then this is the string of
- initial characters
- - ``body_chars`` - string of characters that
- can be used for matching after a matched initial character as
- given in ``init_chars``; if omitted, same as the initial characters
- (default=``None``)
- - ``min`` - minimum number of characters to match (default=1)
- - ``max`` - maximum number of characters to match (default=0)
- - ``exact`` - exact number of characters to match (default=0)
- - ``as_keyword`` - match as a keyword (default=``False``)
- - ``exclude_chars`` - characters that might be
- found in the input ``body_chars`` string but which should not be
- accepted for matching ;useful to define a word of all
- printables except for one or two characters, for instance
- (default=``None``)
-
- :class:`srange` is useful for defining custom character set strings
- for defining :class:`Word` expressions, using range notation from
- regular expression character sets.
-
- A common mistake is to use :class:`Word` to match a specific literal
- string, as in ``Word("Address")``. Remember that :class:`Word`
- uses the string argument to define *sets* of matchable characters.
- This expression would match "Add", "AAA", "dAred", or any other word
- made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
- exact literal string, use :class:`Literal` or :class:`Keyword`.
-
- pyparsing includes helper strings for building Words:
-
- - :class:`alphas`
- - :class:`nums`
- - :class:`alphanums`
- - :class:`hexnums`
- - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
- - accented, tilded, umlauted, etc.)
- - :class:`punc8bit` (non-alphabetic characters in ASCII range
- 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- - :class:`printables` (any non-whitespace character)
-
- ``alphas``, ``nums``, and ``printables`` are also defined in several
- Unicode sets - see :class:`pyparsing_unicode``.
-
- Example::
-
- # a word composed of digits
- integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
-
- # a word with a leading capital, and zero or more lowercase
- capital_word = Word(alphas.upper(), alphas.lower())
-
- # hostnames are alphanumeric, with leading alpha, and '-'
- hostname = Word(alphas, alphanums + '-')
-
- # roman numeral (not a strict parser, accepts invalid mix of characters)
- roman = Word("IVXLCDM")
-
- # any string of non-whitespace characters, except for ','
- csv_value = Word(printables, exclude_chars=",")
- """
-
- def __init__(
- self,
- init_chars: str = "",
- body_chars: typing.Optional[str] = None,
- min: int = 1,
- max: int = 0,
- exact: int = 0,
- as_keyword: bool = False,
- exclude_chars: typing.Optional[str] = None,
- *,
- initChars: typing.Optional[str] = None,
- bodyChars: typing.Optional[str] = None,
- asKeyword: bool = False,
- excludeChars: typing.Optional[str] = None,
- ):
- initChars = initChars or init_chars
- bodyChars = bodyChars or body_chars
- asKeyword = asKeyword or as_keyword
- excludeChars = excludeChars or exclude_chars
- super().__init__()
- if not initChars:
- raise ValueError(
- "invalid {}, initChars cannot be empty string".format(
- type(self).__name__
- )
- )
-
- initChars = set(initChars)
- self.initChars = initChars
- if excludeChars:
- excludeChars = set(excludeChars)
- initChars -= excludeChars
- if bodyChars:
- bodyChars = set(bodyChars) - excludeChars
- self.initCharsOrig = "".join(sorted(initChars))
-
- if bodyChars:
- self.bodyCharsOrig = "".join(sorted(bodyChars))
- self.bodyChars = set(bodyChars)
- else:
- self.bodyCharsOrig = "".join(sorted(initChars))
- self.bodyChars = set(initChars)
-
- self.maxSpecified = max > 0
-
- if min < 1:
- raise ValueError(
- "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
- )
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asKeyword = asKeyword
-
- # see if we can make a regex for this Word
- if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
- if self.bodyChars == self.initChars:
- if max == 0:
- repeat = "+"
- elif max == 1:
- repeat = ""
- else:
- repeat = "{{{},{}}}".format(
- self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
- )
- self.reString = "[{}]{}".format(
- _collapse_string_to_ranges(self.initChars),
- repeat,
- )
- elif len(self.initChars) == 1:
- if max == 0:
- repeat = "*"
- else:
- repeat = "{{0,{}}}".format(max - 1)
- self.reString = "{}[{}]{}".format(
- re.escape(self.initCharsOrig),
- _collapse_string_to_ranges(self.bodyChars),
- repeat,
- )
- else:
- if max == 0:
- repeat = "*"
- elif max == 2:
- repeat = ""
- else:
- repeat = "{{0,{}}}".format(max - 1)
- self.reString = "[{}][{}]{}".format(
- _collapse_string_to_ranges(self.initChars),
- _collapse_string_to_ranges(self.bodyChars),
- repeat,
- )
- if self.asKeyword:
- self.reString = r"\b" + self.reString + r"\b"
-
- try:
- self.re = re.compile(self.reString)
- except re.error:
- self.re = None
- else:
- self.re_match = self.re.match
- self.__class__ = _WordRegex
-
- def _generateDefaultName(self):
- def charsAsStr(s):
- max_repr_len = 16
- s = _collapse_string_to_ranges(s, re_escape=False)
- if len(s) > max_repr_len:
- return s[: max_repr_len - 3] + "..."
- else:
- return s
-
- if self.initChars != self.bodyChars:
- base = "W:({}, {})".format(
- charsAsStr(self.initChars), charsAsStr(self.bodyChars)
- )
- else:
- base = "W:({})".format(charsAsStr(self.initChars))
-
- # add length specification
- if self.minLen > 1 or self.maxLen != _MAX_INT:
- if self.minLen == self.maxLen:
- if self.minLen == 1:
- return base[2:]
- else:
- return base + "{{{}}}".format(self.minLen)
- elif self.maxLen == _MAX_INT:
- return base + "{{{},...}}".format(self.minLen)
- else:
- return base + "{{{},{}}}".format(self.minLen, self.maxLen)
- return base
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] not in self.initChars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- instrlen = len(instring)
- bodychars = self.bodyChars
- maxloc = start + self.maxLen
- maxloc = min(maxloc, instrlen)
- while loc < maxloc and instring[loc] in bodychars:
- loc += 1
-
- throwException = False
- if loc - start < self.minLen:
- throwException = True
- elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
- throwException = True
- elif self.asKeyword:
- if (
- start > 0
- and instring[start - 1] in bodychars
- or loc < instrlen
- and instring[loc] in bodychars
- ):
- throwException = True
-
- if throwException:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class _WordRegex(Word):
- def parseImpl(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- return loc, result.group()
-
-
-class Char(_WordRegex):
- """A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
- when defining a match of any single character in a string of
- characters.
- """
-
- def __init__(
- self,
- charset: str,
- as_keyword: bool = False,
- exclude_chars: typing.Optional[str] = None,
- *,
- asKeyword: bool = False,
- excludeChars: typing.Optional[str] = None,
- ):
- asKeyword = asKeyword or as_keyword
- excludeChars = excludeChars or exclude_chars
- super().__init__(
- charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
- )
- self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
- if asKeyword:
- self.reString = r"\b{}\b".format(self.reString)
- self.re = re.compile(self.reString)
- self.re_match = self.re.match
-
-
-class Regex(Token):
- r"""Token for matching strings that match a given regular
- expression. Defined with string specifying the regular expression in
- a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
- If the given regex contains named groups (defined using ``(?P<name>...)``),
- these will be preserved as named :class:`ParseResults`.
-
- If instead of the Python stdlib ``re`` module you wish to use a different RE module
- (such as the ``regex`` module), you can do so by building your ``Regex`` object with
- a compiled RE that was compiled using ``regex``.
-
- Example::
-
- realnum = Regex(r"[+-]?\d+\.\d*")
- # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
- roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
-
- # named fields in a regex will be returned as named results
- date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
-
- # the Regex class will accept re's compiled using the regex module
- import regex
- parser = pp.Regex(regex.compile(r'[0-9]'))
- """
-
- def __init__(
- self,
- pattern: Any,
- flags: Union[re.RegexFlag, int] = 0,
- as_group_list: bool = False,
- as_match: bool = False,
- *,
- asGroupList: bool = False,
- asMatch: bool = False,
- ):
- """The parameters ``pattern`` and ``flags`` are passed
- to the ``re.compile()`` function as-is. See the Python
- `re module <https://docs.python.org/3/library/re.html>`_ module for an
- explanation of the acceptable patterns and flags.
- """
- super().__init__()
- asGroupList = asGroupList or as_group_list
- asMatch = asMatch or as_match
-
- if isinstance(pattern, str_type):
- if not pattern:
- raise ValueError("null string passed to Regex; use Empty() instead")
-
- self._re = None
- self.reString = self.pattern = pattern
- self.flags = flags
-
- elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
- self._re = pattern
- self.pattern = self.reString = pattern.pattern
- self.flags = flags
-
- else:
- raise TypeError(
- "Regex may only be constructed with a string or a compiled RE object"
- )
-
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.asGroupList = asGroupList
- self.asMatch = asMatch
- if self.asGroupList:
- self.parseImpl = self.parseImplAsGroupList
- if self.asMatch:
- self.parseImpl = self.parseImplAsMatch
-
- @cached_property
- def re(self):
- if self._re:
- return self._re
- else:
- try:
- return re.compile(self.pattern, self.flags)
- except re.error:
- raise ValueError(
- "invalid pattern ({!r}) passed to Regex".format(self.pattern)
- )
-
- @cached_property
- def re_match(self):
- return self.re.match
-
- @cached_property
- def mayReturnEmpty(self):
- return self.re_match("") is not None
-
- def _generateDefaultName(self):
- return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
-
- def parseImpl(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = ParseResults(result.group())
- d = result.groupdict()
- if d:
- for k, v in d.items():
- ret[k] = v
- return loc, ret
-
- def parseImplAsGroupList(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.groups()
- return loc, ret
-
- def parseImplAsMatch(self, instring, loc, doActions=True):
- result = self.re_match(instring, loc)
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result
- return loc, ret
-
- def sub(self, repl: str) -> ParserElement:
- r"""
- Return :class:`Regex` with an attached parse action to transform the parsed
- result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
-
- Example::
-
- make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
- print(make_html.transform_string("h1:main title:"))
- # prints "<h1>main title</h1>"
- """
- if self.asGroupList:
- raise TypeError("cannot use sub() with Regex(asGroupList=True)")
-
- if self.asMatch and callable(repl):
- raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
-
- if self.asMatch:
-
- def pa(tokens):
- return tokens[0].expand(repl)
-
- else:
-
- def pa(tokens):
- return self.re.sub(repl, tokens[0])
-
- return self.add_parse_action(pa)
-
-
-class QuotedString(Token):
- r"""
- Token for matching strings that are delimited by quoting characters.
-
- Defined with the following parameters:
-
- - ``quote_char`` - string of one or more characters defining the
- quote delimiting string
- - ``esc_char`` - character to re_escape quotes, typically backslash
- (default= ``None``)
- - ``esc_quote`` - special quote sequence to re_escape an embedded quote
- string (such as SQL's ``""`` to re_escape an embedded ``"``)
- (default= ``None``)
- - ``multiline`` - boolean indicating whether quotes can span
- multiple lines (default= ``False``)
- - ``unquote_results`` - boolean indicating whether the matched text
- should be unquoted (default= ``True``)
- - ``end_quote_char`` - string of one or more characters defining the
- end of the quote delimited string (default= ``None`` => same as
- quote_char)
- - ``convert_whitespace_escapes`` - convert escaped whitespace
- (``'\t'``, ``'\n'``, etc.) to actual whitespace
- (default= ``True``)
-
- Example::
-
- qs = QuotedString('"')
- print(qs.search_string('lsjdf "This is the quote" sldjf'))
- complex_qs = QuotedString('{{', end_quote_char='}}')
- print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
- sql_qs = QuotedString('"', esc_quote='""')
- print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
-
- prints::
-
- [['This is the quote']]
- [['This is the "quote"']]
- [['This is the quote with "embedded" quotes']]
- """
- ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
-
- def __init__(
- self,
- quote_char: str = "",
- esc_char: typing.Optional[str] = None,
- esc_quote: typing.Optional[str] = None,
- multiline: bool = False,
- unquote_results: bool = True,
- end_quote_char: typing.Optional[str] = None,
- convert_whitespace_escapes: bool = True,
- *,
- quoteChar: str = "",
- escChar: typing.Optional[str] = None,
- escQuote: typing.Optional[str] = None,
- unquoteResults: bool = True,
- endQuoteChar: typing.Optional[str] = None,
- convertWhitespaceEscapes: bool = True,
- ):
- super().__init__()
- escChar = escChar or esc_char
- escQuote = escQuote or esc_quote
- unquoteResults = unquoteResults and unquote_results
- endQuoteChar = endQuoteChar or end_quote_char
- convertWhitespaceEscapes = (
- convertWhitespaceEscapes and convert_whitespace_escapes
- )
- quote_char = quoteChar or quote_char
-
- # remove white space from quote chars - wont work anyway
- quote_char = quote_char.strip()
- if not quote_char:
- raise ValueError("quote_char cannot be the empty string")
-
- if endQuoteChar is None:
- endQuoteChar = quote_char
- else:
- endQuoteChar = endQuoteChar.strip()
- if not endQuoteChar:
- raise ValueError("endQuoteChar cannot be the empty string")
-
- self.quoteChar = quote_char
- self.quoteCharLen = len(quote_char)
- self.firstQuoteChar = quote_char[0]
- self.endQuoteChar = endQuoteChar
- self.endQuoteCharLen = len(endQuoteChar)
- self.escChar = escChar
- self.escQuote = escQuote
- self.unquoteResults = unquoteResults
- self.convertWhitespaceEscapes = convertWhitespaceEscapes
-
- sep = ""
- inner_pattern = ""
-
- if escQuote:
- inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
- sep = "|"
-
- if escChar:
- inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
- sep = "|"
- self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
-
- if len(self.endQuoteChar) > 1:
- inner_pattern += (
- "{}(?:".format(sep)
- + "|".join(
- "(?:{}(?!{}))".format(
- re.escape(self.endQuoteChar[:i]),
- re.escape(self.endQuoteChar[i:]),
- )
- for i in range(len(self.endQuoteChar) - 1, 0, -1)
- )
- + ")"
- )
- sep = "|"
-
- if multiline:
- self.flags = re.MULTILINE | re.DOTALL
- inner_pattern += r"{}(?:[^{}{}])".format(
- sep,
- _escape_regex_range_chars(self.endQuoteChar[0]),
- (_escape_regex_range_chars(escChar) if escChar is not None else ""),
- )
- else:
- self.flags = 0
- inner_pattern += r"{}(?:[^{}\n\r{}])".format(
- sep,
- _escape_regex_range_chars(self.endQuoteChar[0]),
- (_escape_regex_range_chars(escChar) if escChar is not None else ""),
- )
-
- self.pattern = "".join(
- [
- re.escape(self.quoteChar),
- "(?:",
- inner_pattern,
- ")*",
- re.escape(self.endQuoteChar),
- ]
- )
-
- try:
- self.re = re.compile(self.pattern, self.flags)
- self.reString = self.pattern
- self.re_match = self.re.match
- except re.error:
- raise ValueError(
- "invalid pattern {!r} passed to Regex".format(self.pattern)
- )
-
- self.errmsg = "Expected " + self.name
- self.mayIndexError = False
- self.mayReturnEmpty = True
-
- def _generateDefaultName(self):
- if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
- return "string enclosed in {!r}".format(self.quoteChar)
-
- return "quoted string, starting with {} ending with {}".format(
- self.quoteChar, self.endQuoteChar
- )
-
- def parseImpl(self, instring, loc, doActions=True):
- result = (
- instring[loc] == self.firstQuoteChar
- and self.re_match(instring, loc)
- or None
- )
- if not result:
- raise ParseException(instring, loc, self.errmsg, self)
-
- loc = result.end()
- ret = result.group()
-
- if self.unquoteResults:
-
- # strip off quotes
- ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
-
- if isinstance(ret, str_type):
- # replace escaped whitespace
- if "\\" in ret and self.convertWhitespaceEscapes:
- for wslit, wschar in self.ws_map:
- ret = ret.replace(wslit, wschar)
-
- # replace escaped characters
- if self.escChar:
- ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
-
- # replace escaped quotes
- if self.escQuote:
- ret = ret.replace(self.escQuote, self.endQuoteChar)
-
- return loc, ret
-
-
-class CharsNotIn(Token):
- """Token for matching words composed of characters *not* in a given
- set (will include whitespace in matched characters if not listed in
- the provided exclusion set - see example). Defined with string
- containing all disallowed characters, and an optional minimum,
- maximum, and/or exact length. The default value for ``min`` is
- 1 (a minimum value < 1 is not valid); the default values for
- ``max`` and ``exact`` are 0, meaning no maximum or exact
- length restriction.
-
- Example::
-
- # define a comma-separated-value as anything that is not a ','
- csv_value = CharsNotIn(',')
- print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
-
- prints::
-
- ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
- """
-
- def __init__(
- self,
- not_chars: str = "",
- min: int = 1,
- max: int = 0,
- exact: int = 0,
- *,
- notChars: str = "",
- ):
- super().__init__()
- self.skipWhitespace = False
- self.notChars = not_chars or notChars
- self.notCharsSet = set(self.notChars)
-
- if min < 1:
- raise ValueError(
- "cannot specify a minimum length < 1; use "
- "Opt(CharsNotIn()) if zero-length char group is permitted"
- )
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- self.errmsg = "Expected " + self.name
- self.mayReturnEmpty = self.minLen == 0
- self.mayIndexError = False
-
- def _generateDefaultName(self):
- not_chars_str = _collapse_string_to_ranges(self.notChars)
- if len(not_chars_str) > 16:
- return "!W:({}...)".format(self.notChars[: 16 - 3])
- else:
- return "!W:({})".format(self.notChars)
-
- def parseImpl(self, instring, loc, doActions=True):
- notchars = self.notCharsSet
- if instring[loc] in notchars:
- raise ParseException(instring, loc, self.errmsg, self)
-
- start = loc
- loc += 1
- maxlen = min(start + self.maxLen, len(instring))
- while loc < maxlen and instring[loc] not in notchars:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class White(Token):
- """Special matching class for matching whitespace. Normally,
- whitespace is ignored by pyparsing grammars. This class is included
- when some whitespace structures are significant. Define with
- a string containing the whitespace characters to be matched; default
- is ``" \\t\\r\\n"``. Also takes optional ``min``,
- ``max``, and ``exact`` arguments, as defined for the
- :class:`Word` class.
- """
-
- whiteStrs = {
- " ": "<SP>",
- "\t": "<TAB>",
- "\n": "<LF>",
- "\r": "<CR>",
- "\f": "<FF>",
- "\u00A0": "<NBSP>",
- "\u1680": "<OGHAM_SPACE_MARK>",
- "\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
- "\u2000": "<EN_QUAD>",
- "\u2001": "<EM_QUAD>",
- "\u2002": "<EN_SPACE>",
- "\u2003": "<EM_SPACE>",
- "\u2004": "<THREE-PER-EM_SPACE>",
- "\u2005": "<FOUR-PER-EM_SPACE>",
- "\u2006": "<SIX-PER-EM_SPACE>",
- "\u2007": "<FIGURE_SPACE>",
- "\u2008": "<PUNCTUATION_SPACE>",
- "\u2009": "<THIN_SPACE>",
- "\u200A": "<HAIR_SPACE>",
- "\u200B": "<ZERO_WIDTH_SPACE>",
- "\u202F": "<NNBSP>",
- "\u205F": "<MMSP>",
- "\u3000": "<IDEOGRAPHIC_SPACE>",
- }
-
- def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
- super().__init__()
- self.matchWhite = ws
- self.set_whitespace_chars(
- "".join(c for c in self.whiteStrs if c not in self.matchWhite),
- copy_defaults=True,
- )
- # self.leave_whitespace()
- self.mayReturnEmpty = True
- self.errmsg = "Expected " + self.name
-
- self.minLen = min
-
- if max > 0:
- self.maxLen = max
- else:
- self.maxLen = _MAX_INT
-
- if exact > 0:
- self.maxLen = exact
- self.minLen = exact
-
- def _generateDefaultName(self):
- return "".join(White.whiteStrs[c] for c in self.matchWhite)
-
- def parseImpl(self, instring, loc, doActions=True):
- if instring[loc] not in self.matchWhite:
- raise ParseException(instring, loc, self.errmsg, self)
- start = loc
- loc += 1
- maxloc = start + self.maxLen
- maxloc = min(maxloc, len(instring))
- while loc < maxloc and instring[loc] in self.matchWhite:
- loc += 1
-
- if loc - start < self.minLen:
- raise ParseException(instring, loc, self.errmsg, self)
-
- return loc, instring[start:loc]
-
-
-class PositionToken(Token):
- def __init__(self):
- super().__init__()
- self.mayReturnEmpty = True
- self.mayIndexError = False
-
-
-class GoToColumn(PositionToken):
- """Token to advance to a specific column of input text; useful for
- tabular report scraping.
- """
-
- def __init__(self, colno: int):
- super().__init__()
- self.col = colno
-
- def preParse(self, instring, loc):
- if col(loc, instring) != self.col:
- instrlen = len(instring)
- if self.ignoreExprs:
- loc = self._skipIgnorables(instring, loc)
- while (
- loc < instrlen
- and instring[loc].isspace()
- and col(loc, instring) != self.col
- ):
- loc += 1
- return loc
-
- def parseImpl(self, instring, loc, doActions=True):
- thiscol = col(loc, instring)
- if thiscol > self.col:
- raise ParseException(instring, loc, "Text not in expected column", self)
- newloc = loc + self.col - thiscol
- ret = instring[loc:newloc]
- return newloc, ret
-
-
-class LineStart(PositionToken):
- r"""Matches if current position is at the beginning of a line within
- the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
- print(t)
-
- prints::
-
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
-
- def __init__(self):
- super().__init__()
- self.leave_whitespace()
- self.orig_whiteChars = set() | self.whiteChars
- self.whiteChars.discard("\n")
- self.skipper = Empty().set_whitespace_chars(self.whiteChars)
- self.errmsg = "Expected start of line"
-
- def preParse(self, instring, loc):
- if loc == 0:
- return loc
- else:
- ret = self.skipper.preParse(instring, loc)
- if "\n" in self.orig_whiteChars:
- while instring[ret : ret + 1] == "\n":
- ret = self.skipper.preParse(instring, ret + 1)
- return ret
-
- def parseImpl(self, instring, loc, doActions=True):
- if col(loc, instring) == 1:
- return loc, []
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class LineEnd(PositionToken):
- """Matches if current position is at the end of a line within the
- parse string
- """
-
- def __init__(self):
- super().__init__()
- self.whiteChars.discard("\n")
- self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
- self.errmsg = "Expected end of line"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc < len(instring):
- if instring[loc] == "\n":
- return loc + 1, "\n"
- else:
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc + 1, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class StringStart(PositionToken):
- """Matches if current position is at the beginning of the parse
- string
- """
-
- def __init__(self):
- super().__init__()
- self.errmsg = "Expected start of text"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc != 0:
- # see if entire string up to here is just whitespace and ignoreables
- if loc != self.preParse(instring, 0):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class StringEnd(PositionToken):
- """
- Matches if current position is at the end of the parse string
- """
-
- def __init__(self):
- super().__init__()
- self.errmsg = "Expected end of text"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc < len(instring):
- raise ParseException(instring, loc, self.errmsg, self)
- elif loc == len(instring):
- return loc + 1, []
- elif loc > len(instring):
- return loc, []
- else:
- raise ParseException(instring, loc, self.errmsg, self)
-
-
-class WordStart(PositionToken):
- """Matches if the current position is at the beginning of a
- :class:`Word`, and is not preceded by any character in a given
- set of ``word_chars`` (default= ``printables``). To emulate the
- ``\b`` behavior of regular expressions, use
- ``WordStart(alphanums)``. ``WordStart`` will also match at
- the beginning of the string being parsed, or at the beginning of
- a line.
- """
-
- def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
- wordChars = word_chars if wordChars == printables else wordChars
- super().__init__()
- self.wordChars = set(wordChars)
- self.errmsg = "Not at the start of a word"
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc != 0:
- if (
- instring[loc - 1] in self.wordChars
- or instring[loc] not in self.wordChars
- ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class WordEnd(PositionToken):
- """Matches if the current position is at the end of a :class:`Word`,
- and is not followed by any character in a given set of ``word_chars``
- (default= ``printables``). To emulate the ``\b`` behavior of
- regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
- will also match at the end of the string being parsed, or at the end
- of a line.
- """
-
- def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
- wordChars = word_chars if wordChars == printables else wordChars
- super().__init__()
- self.wordChars = set(wordChars)
- self.skipWhitespace = False
- self.errmsg = "Not at the end of a word"
-
- def parseImpl(self, instring, loc, doActions=True):
- instrlen = len(instring)
- if instrlen > 0 and loc < instrlen:
- if (
- instring[loc] in self.wordChars
- or instring[loc - 1] not in self.wordChars
- ):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
-
-class ParseExpression(ParserElement):
- """Abstract subclass of ParserElement, for combining and
- post-processing parsed tokens.
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
- super().__init__(savelist)
- self.exprs: List[ParserElement]
- if isinstance(exprs, _generatorType):
- exprs = list(exprs)
-
- if isinstance(exprs, str_type):
- self.exprs = [self._literalStringClass(exprs)]
- elif isinstance(exprs, ParserElement):
- self.exprs = [exprs]
- elif isinstance(exprs, Iterable):
- exprs = list(exprs)
- # if sequence of strings provided, wrap with Literal
- if any(isinstance(expr, str_type) for expr in exprs):
- exprs = (
- self._literalStringClass(e) if isinstance(e, str_type) else e
- for e in exprs
- )
- self.exprs = list(exprs)
- else:
- try:
- self.exprs = list(exprs)
- except TypeError:
- self.exprs = [exprs]
- self.callPreparse = False
-
- def recurse(self) -> Sequence[ParserElement]:
- return self.exprs[:]
-
- def append(self, other) -> ParserElement:
- self.exprs.append(other)
- self._defaultName = None
- return self
-
- def leave_whitespace(self, recursive: bool = True) -> ParserElement:
- """
- Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
- all contained expressions.
- """
- super().leave_whitespace(recursive)
-
- if recursive:
- self.exprs = [e.copy() for e in self.exprs]
- for e in self.exprs:
- e.leave_whitespace(recursive)
- return self
-
- def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
- """
- Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
- all contained expressions.
- """
- super().ignore_whitespace(recursive)
- if recursive:
- self.exprs = [e.copy() for e in self.exprs]
- for e in self.exprs:
- e.ignore_whitespace(recursive)
- return self
-
- def ignore(self, other) -> ParserElement:
- if isinstance(other, Suppress):
- if other not in self.ignoreExprs:
- super().ignore(other)
- for e in self.exprs:
- e.ignore(self.ignoreExprs[-1])
- else:
- super().ignore(other)
- for e in self.exprs:
- e.ignore(self.ignoreExprs[-1])
- return self
-
- def _generateDefaultName(self):
- return "{}:({})".format(self.__class__.__name__, str(self.exprs))
-
- def streamline(self) -> ParserElement:
- if self.streamlined:
- return self
-
- super().streamline()
-
- for e in self.exprs:
- e.streamline()
-
- # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
- # but only if there are no parse actions or resultsNames on the nested And's
- # (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
- if len(self.exprs) == 2:
- other = self.exprs[0]
- if (
- isinstance(other, self.__class__)
- and not other.parseAction
- and other.resultsName is None
- and not other.debug
- ):
- self.exprs = other.exprs[:] + [self.exprs[1]]
- self._defaultName = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- other = self.exprs[-1]
- if (
- isinstance(other, self.__class__)
- and not other.parseAction
- and other.resultsName is None
- and not other.debug
- ):
- self.exprs = self.exprs[:-1] + other.exprs[:]
- self._defaultName = None
- self.mayReturnEmpty |= other.mayReturnEmpty
- self.mayIndexError |= other.mayIndexError
-
- self.errmsg = "Expected " + str(self)
-
- return self
-
- def validate(self, validateTrace=None) -> None:
- tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
- for e in self.exprs:
- e.validate(tmp)
- self._checkRecursion([])
-
- def copy(self) -> ParserElement:
- ret = super().copy()
- ret.exprs = [e.copy() for e in self.exprs]
- return ret
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_ungrouped_named_tokens_in_collection
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in self.suppress_warnings_
- ):
- for e in self.exprs:
- if (
- isinstance(e, ParserElement)
- and e.resultsName
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in e.suppress_warnings_
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "collides with {!r} on contained expression".format(
- "warn_ungrouped_named_tokens_in_collection",
- name,
- type(self).__name__,
- e.resultsName,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class And(ParseExpression):
- """
- Requires all given :class:`ParseExpression` s to be found in the given order.
- Expressions may be separated by whitespace.
- May be constructed using the ``'+'`` operator.
- May also be constructed using the ``'-'`` operator, which will
- suppress backtracking.
-
- Example::
-
- integer = Word(nums)
- name_expr = Word(alphas)[1, ...]
-
- expr = And([integer("id"), name_expr("name"), integer("age")])
- # more easily written as:
- expr = integer("id") + name_expr("name") + integer("age")
- """
-
- class _ErrorStop(Empty):
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- self.leave_whitespace()
-
- def _generateDefaultName(self):
- return "-"
-
- def __init__(
- self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True
- ):
- exprs: List[ParserElement] = list(exprs_arg)
- if exprs and Ellipsis in exprs:
- tmp = []
- for i, expr in enumerate(exprs):
- if expr is Ellipsis:
- if i < len(exprs) - 1:
- skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
- tmp.append(SkipTo(skipto_arg)("_skipped*"))
- else:
- raise Exception(
- "cannot construct And with sequence ending in ..."
- )
- else:
- tmp.append(expr)
- exprs[:] = tmp
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- if not isinstance(self.exprs[0], White):
- self.set_whitespace_chars(
- self.exprs[0].whiteChars,
- copy_defaults=self.exprs[0].copyDefaultWhiteChars,
- )
- self.skipWhitespace = self.exprs[0].skipWhitespace
- else:
- self.skipWhitespace = False
- else:
- self.mayReturnEmpty = True
- self.callPreparse = True
-
- def streamline(self) -> ParserElement:
- # collapse any _PendingSkip's
- if self.exprs:
- if any(
- isinstance(e, ParseExpression)
- and e.exprs
- and isinstance(e.exprs[-1], _PendingSkip)
- for e in self.exprs[:-1]
- ):
- for i, e in enumerate(self.exprs[:-1]):
- if e is None:
- continue
- if (
- isinstance(e, ParseExpression)
- and e.exprs
- and isinstance(e.exprs[-1], _PendingSkip)
- ):
- e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
- self.exprs[i + 1] = None
- self.exprs = [e for e in self.exprs if e is not None]
-
- super().streamline()
-
- # link any IndentedBlocks to the prior expression
- for prev, cur in zip(self.exprs, self.exprs[1:]):
- # traverse cur or any first embedded expr of cur looking for an IndentedBlock
- # (but watch out for recursive grammar)
- seen = set()
- while cur:
- if id(cur) in seen:
- break
- seen.add(id(cur))
- if isinstance(cur, IndentedBlock):
- prev.add_parse_action(
- lambda s, l, t, cur_=cur: setattr(
- cur_, "parent_anchor", col(l, s)
- )
- )
- break
- subs = cur.recurse()
- cur = next(iter(subs), None)
-
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- # pass False as callPreParse arg to _parse for first element, since we already
- # pre-parsed the string as part of our And pre-parsing
- loc, resultlist = self.exprs[0]._parse(
- instring, loc, doActions, callPreParse=False
- )
- errorStop = False
- for e in self.exprs[1:]:
- # if isinstance(e, And._ErrorStop):
- if type(e) is And._ErrorStop:
- errorStop = True
- continue
- if errorStop:
- try:
- loc, exprtokens = e._parse(instring, loc, doActions)
- except ParseSyntaxException:
- raise
- except ParseBaseException as pe:
- pe.__traceback__ = None
- raise ParseSyntaxException._from_exception(pe)
- except IndexError:
- raise ParseSyntaxException(
- instring, len(instring), self.errmsg, self
- )
- else:
- loc, exprtokens = e._parse(instring, loc, doActions)
- if exprtokens or exprtokens.haskeys():
- resultlist += exprtokens
- return loc, resultlist
-
- def __iadd__(self, other):
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- return self.append(other) # And([self, other])
-
- def _checkRecursion(self, parseElementList):
- subRecCheckList = parseElementList[:] + [self]
- for e in self.exprs:
- e._checkRecursion(subRecCheckList)
- if not e.mayReturnEmpty:
- break
-
- def _generateDefaultName(self):
- inner = " ".join(str(e) for e in self.exprs)
- # strip off redundant inner {}'s
- while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
- inner = inner[1:-1]
- return "{" + inner + "}"
-
-
-class Or(ParseExpression):
- """Requires that at least one :class:`ParseExpression` is found. If
- two expressions match, the expression that matches the longest
- string will be used. May be constructed using the ``'^'``
- operator.
-
- Example::
-
- # construct Or using '^' operator
-
- number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
- print(number.search_string("123 3.1416 789"))
-
- prints::
-
- [['123'], ['3.1416'], ['789']]
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def streamline(self) -> ParserElement:
- super().streamline()
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.saveAsList = any(e.saveAsList for e in self.exprs)
- self.skipWhitespace = all(
- e.skipWhitespace and not isinstance(e, White) for e in self.exprs
- )
- else:
- self.saveAsList = False
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- maxExcLoc = -1
- maxException = None
- matches = []
- fatals = []
- if all(e.callPreparse for e in self.exprs):
- loc = self.preParse(instring, loc)
- for e in self.exprs:
- try:
- loc2 = e.try_parse(instring, loc, raise_fatal=True)
- except ParseFatalException as pfe:
- pfe.__traceback__ = None
- pfe.parserElement = e
- fatals.append(pfe)
- maxException = None
- maxExcLoc = -1
- except ParseException as err:
- if not fatals:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(
- instring, len(instring), e.errmsg, self
- )
- maxExcLoc = len(instring)
- else:
- # save match among all matches, to retry longest to shortest
- matches.append((loc2, e))
-
- if matches:
- # re-evaluate all matches in descending order of length of match, in case attached actions
- # might change whether or how much they match of the input.
- matches.sort(key=itemgetter(0), reverse=True)
-
- if not doActions:
- # no further conditions or parse actions to change the selection of
- # alternative, so the first match will be the best match
- best_expr = matches[0][1]
- return best_expr._parse(instring, loc, doActions)
-
- longest = -1, None
- for loc1, expr1 in matches:
- if loc1 <= longest[0]:
- # already have a longer match than this one will deliver, we are done
- return longest
-
- try:
- loc2, toks = expr1._parse(instring, loc, doActions)
- except ParseException as err:
- err.__traceback__ = None
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- else:
- if loc2 >= loc1:
- return loc2, toks
- # didn't match as much as before
- elif loc2 > longest[0]:
- longest = loc2, toks
-
- if longest != (-1, None):
- return longest
-
- if fatals:
- if len(fatals) > 1:
- fatals.sort(key=lambda e: -e.loc)
- if fatals[0].loc == fatals[1].loc:
- fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
- max_fatal = fatals[0]
- raise max_fatal
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(
- instring, loc, "no defined alternatives to match", self
- )
-
- def __ixor__(self, other):
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- return self.append(other) # Or([self, other])
-
- def _generateDefaultName(self):
- return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_multiple_tokens_in_named_alternation
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in self.suppress_warnings_
- ):
- if any(
- isinstance(e, And)
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in e.suppress_warnings_
- for e in self.exprs
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "will return a list of all parsed tokens in an And alternative, "
- "in prior versions only the first token was returned; enclose "
- "contained argument in Group".format(
- "warn_multiple_tokens_in_named_alternation",
- name,
- type(self).__name__,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
-
-class MatchFirst(ParseExpression):
- """Requires that at least one :class:`ParseExpression` is found. If
- more than one expression matches, the first one listed is the one that will
- match. May be constructed using the ``'|'`` operator.
-
- Example::
-
- # construct MatchFirst using '|' operator
-
- # watch the order of expressions to match
- number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
- print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
-
- # put more selective expression first
- number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
- print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
- else:
- self.mayReturnEmpty = True
-
- def streamline(self) -> ParserElement:
- if self.streamlined:
- return self
-
- super().streamline()
- if self.exprs:
- self.saveAsList = any(e.saveAsList for e in self.exprs)
- self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
- self.skipWhitespace = all(
- e.skipWhitespace and not isinstance(e, White) for e in self.exprs
- )
- else:
- self.saveAsList = False
- self.mayReturnEmpty = True
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- maxExcLoc = -1
- maxException = None
-
- for e in self.exprs:
- try:
- return e._parse(
- instring,
- loc,
- doActions,
- )
- except ParseFatalException as pfe:
- pfe.__traceback__ = None
- pfe.parserElement = e
- raise
- except ParseException as err:
- if err.loc > maxExcLoc:
- maxException = err
- maxExcLoc = err.loc
- except IndexError:
- if len(instring) > maxExcLoc:
- maxException = ParseException(
- instring, len(instring), e.errmsg, self
- )
- maxExcLoc = len(instring)
-
- if maxException is not None:
- maxException.msg = self.errmsg
- raise maxException
- else:
- raise ParseException(
- instring, loc, "no defined alternatives to match", self
- )
-
- def __ior__(self, other):
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- return self.append(other) # MatchFirst([self, other])
-
- def _generateDefaultName(self):
- return "{" + " | ".join(str(e) for e in self.exprs) + "}"
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_multiple_tokens_in_named_alternation
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in self.suppress_warnings_
- ):
- if any(
- isinstance(e, And)
- and Diagnostics.warn_multiple_tokens_in_named_alternation
- not in e.suppress_warnings_
- for e in self.exprs
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "will return a list of all parsed tokens in an And alternative, "
- "in prior versions only the first token was returned; enclose "
- "contained argument in Group".format(
- "warn_multiple_tokens_in_named_alternation",
- name,
- type(self).__name__,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
-
-class Each(ParseExpression):
- """Requires all given :class:`ParseExpression` s to be found, but in
- any order. Expressions may be separated by whitespace.
-
- May be constructed using the ``'&'`` operator.
-
- Example::
-
- color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
- shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
- integer = Word(nums)
- shape_attr = "shape:" + shape_type("shape")
- posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
- color_attr = "color:" + color("color")
- size_attr = "size:" + integer("size")
-
- # use Each (using operator '&') to accept attributes in any order
- # (shape and posn are required, color and size are optional)
- shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
-
- shape_spec.run_tests('''
- shape: SQUARE color: BLACK posn: 100, 120
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- color:GREEN size:20 shape:TRIANGLE posn:20,40
- '''
- )
-
- prints::
-
- shape: SQUARE color: BLACK posn: 100, 120
- ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- - color: BLACK
- - posn: ['100', ',', '120']
- - x: 100
- - y: 120
- - shape: SQUARE
-
-
- shape: CIRCLE size: 50 color: BLUE posn: 50,80
- ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- - color: BLUE
- - posn: ['50', ',', '80']
- - x: 50
- - y: 80
- - shape: CIRCLE
- - size: 50
-
-
- color: GREEN size: 20 shape: TRIANGLE posn: 20,40
- ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- - color: GREEN
- - posn: ['20', ',', '40']
- - x: 20
- - y: 40
- - shape: TRIANGLE
- - size: 20
- """
-
- def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True):
- super().__init__(exprs, savelist)
- if self.exprs:
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
- self.skipWhitespace = True
- self.initExprGroups = True
- self.saveAsList = True
-
- def streamline(self) -> ParserElement:
- super().streamline()
- if self.exprs:
- self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
- else:
- self.mayReturnEmpty = True
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- if self.initExprGroups:
- self.opt1map = dict(
- (id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
- )
- opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
- opt2 = [
- e
- for e in self.exprs
- if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
- ]
- self.optionals = opt1 + opt2
- self.multioptionals = [
- e.expr.set_results_name(e.resultsName, list_all_matches=True)
- for e in self.exprs
- if isinstance(e, _MultipleMatch)
- ]
- self.multirequired = [
- e.expr.set_results_name(e.resultsName, list_all_matches=True)
- for e in self.exprs
- if isinstance(e, OneOrMore)
- ]
- self.required = [
- e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
- ]
- self.required += self.multirequired
- self.initExprGroups = False
-
- tmpLoc = loc
- tmpReqd = self.required[:]
- tmpOpt = self.optionals[:]
- multis = self.multioptionals[:]
- matchOrder = []
-
- keepMatching = True
- failed = []
- fatals = []
- while keepMatching:
- tmpExprs = tmpReqd + tmpOpt + multis
- failed.clear()
- fatals.clear()
- for e in tmpExprs:
- try:
- tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
- except ParseFatalException as pfe:
- pfe.__traceback__ = None
- pfe.parserElement = e
- fatals.append(pfe)
- failed.append(e)
- except ParseException:
- failed.append(e)
- else:
- matchOrder.append(self.opt1map.get(id(e), e))
- if e in tmpReqd:
- tmpReqd.remove(e)
- elif e in tmpOpt:
- tmpOpt.remove(e)
- if len(failed) == len(tmpExprs):
- keepMatching = False
-
- # look for any ParseFatalExceptions
- if fatals:
- if len(fatals) > 1:
- fatals.sort(key=lambda e: -e.loc)
- if fatals[0].loc == fatals[1].loc:
- fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
- max_fatal = fatals[0]
- raise max_fatal
-
- if tmpReqd:
- missing = ", ".join([str(e) for e in tmpReqd])
- raise ParseException(
- instring,
- loc,
- "Missing one or more required elements ({})".format(missing),
- )
-
- # add any unmatched Opts, in case they have default values defined
- matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
-
- total_results = ParseResults([])
- for e in matchOrder:
- loc, results = e._parse(instring, loc, doActions)
- total_results += results
-
- return loc, total_results
-
- def _generateDefaultName(self):
- return "{" + " & ".join(str(e) for e in self.exprs) + "}"
-
-
-class ParseElementEnhance(ParserElement):
- """Abstract subclass of :class:`ParserElement`, for combining and
- post-processing parsed tokens.
- """
-
- def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
- super().__init__(savelist)
- if isinstance(expr, str_type):
- if issubclass(self._literalStringClass, Token):
- expr = self._literalStringClass(expr)
- elif issubclass(type(self), self._literalStringClass):
- expr = Literal(expr)
- else:
- expr = self._literalStringClass(Literal(expr))
- self.expr = expr
- if expr is not None:
- self.mayIndexError = expr.mayIndexError
- self.mayReturnEmpty = expr.mayReturnEmpty
- self.set_whitespace_chars(
- expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
- )
- self.skipWhitespace = expr.skipWhitespace
- self.saveAsList = expr.saveAsList
- self.callPreparse = expr.callPreparse
- self.ignoreExprs.extend(expr.ignoreExprs)
-
- def recurse(self) -> Sequence[ParserElement]:
- return [self.expr] if self.expr is not None else []
-
- def parseImpl(self, instring, loc, doActions=True):
- if self.expr is not None:
- return self.expr._parse(instring, loc, doActions, callPreParse=False)
- else:
- raise ParseException(instring, loc, "No expression defined", self)
-
- def leave_whitespace(self, recursive: bool = True) -> ParserElement:
- super().leave_whitespace(recursive)
-
- if recursive:
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.leave_whitespace(recursive)
- return self
-
- def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
- super().ignore_whitespace(recursive)
-
- if recursive:
- self.expr = self.expr.copy()
- if self.expr is not None:
- self.expr.ignore_whitespace(recursive)
- return self
-
- def ignore(self, other) -> ParserElement:
- if isinstance(other, Suppress):
- if other not in self.ignoreExprs:
- super().ignore(other)
- if self.expr is not None:
- self.expr.ignore(self.ignoreExprs[-1])
- else:
- super().ignore(other)
- if self.expr is not None:
- self.expr.ignore(self.ignoreExprs[-1])
- return self
-
- def streamline(self) -> ParserElement:
- super().streamline()
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def _checkRecursion(self, parseElementList):
- if self in parseElementList:
- raise RecursiveGrammarException(parseElementList + [self])
- subRecCheckList = parseElementList[:] + [self]
- if self.expr is not None:
- self.expr._checkRecursion(subRecCheckList)
-
- def validate(self, validateTrace=None) -> None:
- if validateTrace is None:
- validateTrace = []
- tmp = validateTrace[:] + [self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self._checkRecursion([])
-
- def _generateDefaultName(self):
- return "{}:({})".format(self.__class__.__name__, str(self.expr))
-
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class IndentedBlock(ParseElementEnhance):
- """
- Expression to match one or more expressions at a given indentation level.
- Useful for parsing text where structure is implied by indentation (like Python source code).
- """
-
- class _Indent(Empty):
- def __init__(self, ref_col: int):
- super().__init__()
- self.errmsg = "expected indent at column {}".format(ref_col)
- self.add_condition(lambda s, l, t: col(l, s) == ref_col)
-
- class _IndentGreater(Empty):
- def __init__(self, ref_col: int):
- super().__init__()
- self.errmsg = "expected indent at column greater than {}".format(ref_col)
- self.add_condition(lambda s, l, t: col(l, s) > ref_col)
-
- def __init__(
- self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
- ):
- super().__init__(expr, savelist=True)
- # if recursive:
- # raise NotImplementedError("IndentedBlock with recursive is not implemented")
- self._recursive = recursive
- self._grouped = grouped
- self.parent_anchor = 1
-
- def parseImpl(self, instring, loc, doActions=True):
- # advance parse position to non-whitespace by using an Empty()
- # this should be the column to be used for all subsequent indented lines
- anchor_loc = Empty().preParse(instring, loc)
-
- # see if self.expr matches at the current location - if not it will raise an exception
- # and no further work is necessary
- self.expr.try_parse(instring, anchor_loc, doActions)
-
- indent_col = col(anchor_loc, instring)
- peer_detect_expr = self._Indent(indent_col)
-
- inner_expr = Empty() + peer_detect_expr + self.expr
- if self._recursive:
- sub_indent = self._IndentGreater(indent_col)
- nested_block = IndentedBlock(
- self.expr, recursive=self._recursive, grouped=self._grouped
- )
- nested_block.set_debug(self.debug)
- nested_block.parent_anchor = indent_col
- inner_expr += Opt(sub_indent + nested_block)
-
- inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
- block = OneOrMore(inner_expr)
-
- trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
-
- if self._grouped:
- wrapper = Group
- else:
- wrapper = lambda expr: expr
- return (wrapper(block) + Optional(trailing_undent)).parseImpl(
- instring, anchor_loc, doActions
- )
-
-
-class AtStringStart(ParseElementEnhance):
- """Matches if expression matches at the beginning of the parse
- string::
-
- AtStringStart(Word(nums)).parse_string("123")
- # prints ["123"]
-
- AtStringStart(Word(nums)).parse_string(" 123")
- # raises ParseException
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- self.callPreparse = False
-
- def parseImpl(self, instring, loc, doActions=True):
- if loc != 0:
- raise ParseException(instring, loc, "not found at string start")
- return super().parseImpl(instring, loc, doActions)
-
-
-class AtLineStart(ParseElementEnhance):
- r"""Matches if an expression matches at the beginning of a line within
- the parse string
-
- Example::
-
- test = '''\
- AAA this line
- AAA and this line
- AAA but not this one
- B AAA and definitely not this one
- '''
-
- for t in (AtLineStart('AAA') + restOfLine).search_string(test):
- print(t)
-
- prints::
-
- ['AAA', ' this line']
- ['AAA', ' and this line']
-
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- self.callPreparse = False
-
- def parseImpl(self, instring, loc, doActions=True):
- if col(loc, instring) != 1:
- raise ParseException(instring, loc, "not found at line start")
- return super().parseImpl(instring, loc, doActions)
-
-
-class FollowedBy(ParseElementEnhance):
- """Lookahead matching of the given parse expression.
- ``FollowedBy`` does *not* advance the parsing position within
- the input string, it only verifies that the specified parse
- expression matches at the current position. ``FollowedBy``
- always returns a null token list. If any results names are defined
- in the lookahead expression, those *will* be returned for access by
- name.
-
- Example::
-
- # use FollowedBy to match a label only if it is followed by a ':'
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-
- attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
-
- prints::
-
- [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- self.mayReturnEmpty = True
-
- def parseImpl(self, instring, loc, doActions=True):
- # by using self._expr.parse and deleting the contents of the returned ParseResults list
- # we keep any named results that were defined in the FollowedBy expression
- _, ret = self.expr._parse(instring, loc, doActions=doActions)
- del ret[:]
-
- return loc, ret
-
-
-class PrecededBy(ParseElementEnhance):
- """Lookbehind matching of the given parse expression.
- ``PrecededBy`` does not advance the parsing position within the
- input string, it only verifies that the specified parse expression
- matches prior to the current position. ``PrecededBy`` always
- returns a null token list, but if a results name is defined on the
- given expression, it is returned.
-
- Parameters:
-
- - expr - expression that must match prior to the current parse
- location
- - retreat - (default= ``None``) - (int) maximum number of characters
- to lookbehind prior to the current parse location
-
- If the lookbehind expression is a string, :class:`Literal`,
- :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
- with a specified exact or maximum length, then the retreat
- parameter is not required. Otherwise, retreat must be specified to
- give a maximum number of characters to look back from
- the current parse position for a lookbehind match.
-
- Example::
-
- # VB-style variable names with type prefixes
- int_var = PrecededBy("#") + pyparsing_common.identifier
- str_var = PrecededBy("$") + pyparsing_common.identifier
-
- """
-
- def __init__(
- self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None
- ):
- super().__init__(expr)
- self.expr = self.expr().leave_whitespace()
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.exact = False
- if isinstance(expr, str_type):
- retreat = len(expr)
- self.exact = True
- elif isinstance(expr, (Literal, Keyword)):
- retreat = expr.matchLen
- self.exact = True
- elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
- retreat = expr.maxLen
- self.exact = True
- elif isinstance(expr, PositionToken):
- retreat = 0
- self.exact = True
- self.retreat = retreat
- self.errmsg = "not preceded by " + str(expr)
- self.skipWhitespace = False
- self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
-
- def parseImpl(self, instring, loc=0, doActions=True):
- if self.exact:
- if loc < self.retreat:
- raise ParseException(instring, loc, self.errmsg)
- start = loc - self.retreat
- _, ret = self.expr._parse(instring, start)
- else:
- # retreat specified a maximum lookbehind window, iterate
- test_expr = self.expr + StringEnd()
- instring_slice = instring[max(0, loc - self.retreat) : loc]
- last_expr = ParseException(instring, loc, self.errmsg)
- for offset in range(1, min(loc, self.retreat + 1) + 1):
- try:
- # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
- _, ret = test_expr._parse(
- instring_slice, len(instring_slice) - offset
- )
- except ParseBaseException as pbe:
- last_expr = pbe
- else:
- break
- else:
- raise last_expr
- return loc, ret
-
-
-class Located(ParseElementEnhance):
- """
- Decorates a returned token with its starting and ending
- locations in the input string.
-
- This helper adds the following results names:
-
- - ``locn_start`` - location where matched expression begins
- - ``locn_end`` - location where matched expression ends
- - ``value`` - the actual parsed results
-
- Be careful if the input text contains ``<TAB>`` characters, you
- may want to call :class:`ParserElement.parse_with_tabs`
-
- Example::
-
- wd = Word(alphas)
- for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
-
- prints::
-
- [0, ['ljsdf'], 5]
- [8, ['lksdjjf'], 15]
- [18, ['lkkjj'], 23]
-
- """
-
- def parseImpl(self, instring, loc, doActions=True):
- start = loc
- loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
- ret_tokens = ParseResults([start, tokens, loc])
- ret_tokens["locn_start"] = start
- ret_tokens["value"] = tokens
- ret_tokens["locn_end"] = loc
- if self.resultsName:
- # must return as a list, so that the name will be attached to the complete group
- return loc, [ret_tokens]
- else:
- return loc, ret_tokens
-
-
-class NotAny(ParseElementEnhance):
- """
- Lookahead to disallow matching with the given parse expression.
- ``NotAny`` does *not* advance the parsing position within the
- input string, it only verifies that the specified parse expression
- does *not* match at the current position. Also, ``NotAny`` does
- *not* skip over leading whitespace. ``NotAny`` always returns
- a null token list. May be constructed using the ``'~'`` operator.
-
- Example::
-
- AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
-
- # take care not to mistake keywords for identifiers
- ident = ~(AND | OR | NOT) + Word(alphas)
- boolean_term = Opt(NOT) + ident
-
- # very crude boolean expression - to support parenthesis groups and
- # operation hierarchy, use infix_notation
- boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
-
- # integers that are followed by "." are actually floats
- integer = Word(nums) + ~Char(".")
- """
-
- def __init__(self, expr: Union[ParserElement, str]):
- super().__init__(expr)
- # do NOT use self.leave_whitespace(), don't want to propagate to exprs
- # self.leave_whitespace()
- self.skipWhitespace = False
-
- self.mayReturnEmpty = True
- self.errmsg = "Found unwanted token, " + str(self.expr)
-
- def parseImpl(self, instring, loc, doActions=True):
- if self.expr.can_parse_next(instring, loc):
- raise ParseException(instring, loc, self.errmsg, self)
- return loc, []
-
- def _generateDefaultName(self):
- return "~{" + str(self.expr) + "}"
-
-
-class _MultipleMatch(ParseElementEnhance):
- def __init__(
- self,
- expr: ParserElement,
- stop_on: typing.Optional[Union[ParserElement, str]] = None,
- *,
- stopOn: typing.Optional[Union[ParserElement, str]] = None,
- ):
- super().__init__(expr)
- stopOn = stopOn or stop_on
- self.saveAsList = True
- ender = stopOn
- if isinstance(ender, str_type):
- ender = self._literalStringClass(ender)
- self.stopOn(ender)
-
- def stopOn(self, ender) -> ParserElement:
- if isinstance(ender, str_type):
- ender = self._literalStringClass(ender)
- self.not_ender = ~ender if ender is not None else None
- return self
-
- def parseImpl(self, instring, loc, doActions=True):
- self_expr_parse = self.expr._parse
- self_skip_ignorables = self._skipIgnorables
- check_ender = self.not_ender is not None
- if check_ender:
- try_not_ender = self.not_ender.tryParse
-
- # must be at least one (but first see if we are the stopOn sentinel;
- # if so, fail)
- if check_ender:
- try_not_ender(instring, loc)
- loc, tokens = self_expr_parse(instring, loc, doActions)
- try:
- hasIgnoreExprs = not not self.ignoreExprs
- while 1:
- if check_ender:
- try_not_ender(instring, loc)
- if hasIgnoreExprs:
- preloc = self_skip_ignorables(instring, loc)
- else:
- preloc = loc
- loc, tmptokens = self_expr_parse(instring, preloc, doActions)
- if tmptokens or tmptokens.haskeys():
- tokens += tmptokens
- except (ParseException, IndexError):
- pass
-
- return loc, tokens
-
- def _setResultsName(self, name, listAllMatches=False):
- if (
- __diag__.warn_ungrouped_named_tokens_in_collection
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in self.suppress_warnings_
- ):
- for e in [self.expr] + self.expr.recurse():
- if (
- isinstance(e, ParserElement)
- and e.resultsName
- and Diagnostics.warn_ungrouped_named_tokens_in_collection
- not in e.suppress_warnings_
- ):
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "collides with {!r} on contained expression".format(
- "warn_ungrouped_named_tokens_in_collection",
- name,
- type(self).__name__,
- e.resultsName,
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, listAllMatches)
-
-
-class OneOrMore(_MultipleMatch):
- """
- Repetition of one or more of the given expression.
-
- Parameters:
- - expr - expression that must match one or more times
- - stop_on - (default= ``None``) - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression)
-
- Example::
-
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
-
- text = "shape: SQUARE posn: upper left color: BLACK"
- attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
-
- # use stop_on attribute for OneOrMore to avoid reading label string as part of the data
- attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
- OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
-
- # could also be written as
- (attr_expr * (1,)).parse_string(text).pprint()
- """
-
- def _generateDefaultName(self):
- return "{" + str(self.expr) + "}..."
-
-
-class ZeroOrMore(_MultipleMatch):
- """
- Optional repetition of zero or more of the given expression.
-
- Parameters:
- - ``expr`` - expression that must match zero or more times
- - ``stop_on`` - expression for a terminating sentinel
- (only required if the sentinel would ordinarily match the repetition
- expression) - (default= ``None``)
-
- Example: similar to :class:`OneOrMore`
- """
-
- def __init__(
- self,
- expr: ParserElement,
- stop_on: typing.Optional[Union[ParserElement, str]] = None,
- *,
- stopOn: typing.Optional[Union[ParserElement, str]] = None,
- ):
- super().__init__(expr, stopOn=stopOn or stop_on)
- self.mayReturnEmpty = True
-
- def parseImpl(self, instring, loc, doActions=True):
- try:
- return super().parseImpl(instring, loc, doActions)
- except (ParseException, IndexError):
- return loc, ParseResults([], name=self.resultsName)
-
- def _generateDefaultName(self):
- return "[" + str(self.expr) + "]..."
-
-
-class _NullToken:
- def __bool__(self):
- return False
-
- def __str__(self):
- return ""
-
-
-class Opt(ParseElementEnhance):
- """
- Optional matching of the given expression.
-
- Parameters:
- - ``expr`` - expression that must match zero or more times
- - ``default`` (optional) - value to be returned if the optional expression is not found.
-
- Example::
-
- # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
- zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
- zip.run_tests('''
- # traditional ZIP code
- 12345
-
- # ZIP+4 form
- 12101-0001
-
- # invalid ZIP
- 98765-
- ''')
-
- prints::
-
- # traditional ZIP code
- 12345
- ['12345']
-
- # ZIP+4 form
- 12101-0001
- ['12101-0001']
-
- # invalid ZIP
- 98765-
- ^
- FAIL: Expected end of text (at char 5), (line:1, col:6)
- """
-
- __optionalNotMatched = _NullToken()
-
- def __init__(
- self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
- ):
- super().__init__(expr, savelist=False)
- self.saveAsList = self.expr.saveAsList
- self.defaultValue = default
- self.mayReturnEmpty = True
-
- def parseImpl(self, instring, loc, doActions=True):
- self_expr = self.expr
- try:
- loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
- except (ParseException, IndexError):
- default_value = self.defaultValue
- if default_value is not self.__optionalNotMatched:
- if self_expr.resultsName:
- tokens = ParseResults([default_value])
- tokens[self_expr.resultsName] = default_value
- else:
- tokens = [default_value]
- else:
- tokens = []
- return loc, tokens
-
- def _generateDefaultName(self):
- inner = str(self.expr)
- # strip off redundant inner {}'s
- while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
- inner = inner[1:-1]
- return "[" + inner + "]"
-
-
-Optional = Opt
-
-
-class SkipTo(ParseElementEnhance):
- """
- Token for skipping over all undefined text until the matched
- expression is found.
-
- Parameters:
- - ``expr`` - target expression marking the end of the data to be skipped
- - ``include`` - if ``True``, the target expression is also parsed
- (the skipped text and target expression are returned as a 2-element
- list) (default= ``False``).
- - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
- comments) that might contain false matches to the target expression
- - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
- included in the skipped test; if found before the target expression is found,
- the :class:`SkipTo` is not a match
-
- Example::
-
- report = '''
- Outstanding Issues Report - 1 Jan 2000
-
- # | Severity | Description | Days Open
- -----+----------+-------------------------------------------+-----------
- 101 | Critical | Intermittent system crash | 6
- 94 | Cosmetic | Spelling error on Login ('log|n') | 14
- 79 | Minor | System slow when running too many reports | 47
- '''
- integer = Word(nums)
- SEP = Suppress('|')
- # use SkipTo to simply match everything up until the next SEP
- # - ignore quoted strings, so that a '|' character inside a quoted string does not match
- # - parse action will call token.strip() for each matched token, i.e., the description body
- string_data = SkipTo(SEP, ignore=quoted_string)
- string_data.set_parse_action(token_map(str.strip))
- ticket_expr = (integer("issue_num") + SEP
- + string_data("sev") + SEP
- + string_data("desc") + SEP
- + integer("days_open"))
-
- for tkt in ticket_expr.search_string(report):
- print tkt.dump()
-
- prints::
-
- ['101', 'Critical', 'Intermittent system crash', '6']
- - days_open: '6'
- - desc: 'Intermittent system crash'
- - issue_num: '101'
- - sev: 'Critical'
- ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- - days_open: '14'
- - desc: "Spelling error on Login ('log|n')"
- - issue_num: '94'
- - sev: 'Cosmetic'
- ['79', 'Minor', 'System slow when running too many reports', '47']
- - days_open: '47'
- - desc: 'System slow when running too many reports'
- - issue_num: '79'
- - sev: 'Minor'
- """
-
- def __init__(
- self,
- other: Union[ParserElement, str],
- include: bool = False,
- ignore: bool = None,
- fail_on: typing.Optional[Union[ParserElement, str]] = None,
- *,
- failOn: Union[ParserElement, str] = None,
- ):
- super().__init__(other)
- failOn = failOn or fail_on
- self.ignoreExpr = ignore
- self.mayReturnEmpty = True
- self.mayIndexError = False
- self.includeMatch = include
- self.saveAsList = False
- if isinstance(failOn, str_type):
- self.failOn = self._literalStringClass(failOn)
- else:
- self.failOn = failOn
- self.errmsg = "No match found for " + str(self.expr)
-
- def parseImpl(self, instring, loc, doActions=True):
- startloc = loc
- instrlen = len(instring)
- self_expr_parse = self.expr._parse
- self_failOn_canParseNext = (
- self.failOn.canParseNext if self.failOn is not None else None
- )
- self_ignoreExpr_tryParse = (
- self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
- )
-
- tmploc = loc
- while tmploc <= instrlen:
- if self_failOn_canParseNext is not None:
- # break if failOn expression matches
- if self_failOn_canParseNext(instring, tmploc):
- break
-
- if self_ignoreExpr_tryParse is not None:
- # advance past ignore expressions
- while 1:
- try:
- tmploc = self_ignoreExpr_tryParse(instring, tmploc)
- except ParseBaseException:
- break
-
- try:
- self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
- except (ParseException, IndexError):
- # no match, advance loc in string
- tmploc += 1
- else:
- # matched skipto expr, done
- break
-
- else:
- # ran off the end of the input string without matching skipto expr, fail
- raise ParseException(instring, loc, self.errmsg, self)
-
- # build up return values
- loc = tmploc
- skiptext = instring[startloc:loc]
- skipresult = ParseResults(skiptext)
-
- if self.includeMatch:
- loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
- skipresult += mat
-
- return loc, skipresult
-
-
-class Forward(ParseElementEnhance):
- """
- Forward declaration of an expression to be defined later -
- used for recursive grammars, such as algebraic infix notation.
- When the expression is known, it is assigned to the ``Forward``
- variable using the ``'<<'`` operator.
-
- Note: take care when assigning to ``Forward`` not to overlook
- precedence of operators.
-
- Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
-
- fwd_expr << a | b | c
-
- will actually be evaluated as::
-
- (fwd_expr << a) | b | c
-
- thereby leaving b and c out as parseable alternatives. It is recommended that you
- explicitly group the values inserted into the ``Forward``::
-
- fwd_expr << (a | b | c)
-
- Converting to use the ``'<<='`` operator instead will avoid this problem.
-
- See :class:`ParseResults.pprint` for an example of a recursive
- parser created using ``Forward``.
- """
-
- def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None):
- self.caller_frame = traceback.extract_stack(limit=2)[0]
- super().__init__(other, savelist=False)
- self.lshift_line = None
-
- def __lshift__(self, other):
- if hasattr(self, "caller_frame"):
- del self.caller_frame
- if isinstance(other, str_type):
- other = self._literalStringClass(other)
- self.expr = other
- self.mayIndexError = self.expr.mayIndexError
- self.mayReturnEmpty = self.expr.mayReturnEmpty
- self.set_whitespace_chars(
- self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
- )
- self.skipWhitespace = self.expr.skipWhitespace
- self.saveAsList = self.expr.saveAsList
- self.ignoreExprs.extend(self.expr.ignoreExprs)
- self.lshift_line = traceback.extract_stack(limit=2)[-2]
- return self
-
- def __ilshift__(self, other):
- return self << other
-
- def __or__(self, other):
- caller_line = traceback.extract_stack(limit=2)[-2]
- if (
- __diag__.warn_on_match_first_with_lshift_operator
- and caller_line == self.lshift_line
- and Diagnostics.warn_on_match_first_with_lshift_operator
- not in self.suppress_warnings_
- ):
- warnings.warn(
- "using '<<' operator with '|' is probably an error, use '<<='",
- stacklevel=2,
- )
- ret = super().__or__(other)
- return ret
-
- def __del__(self):
- # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
- if (
- self.expr is None
- and __diag__.warn_on_assignment_to_Forward
- and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
- ):
- warnings.warn_explicit(
- "Forward defined here but no expression attached later using '<<=' or '<<'",
- UserWarning,
- filename=self.caller_frame.filename,
- lineno=self.caller_frame.lineno,
- )
-
- def parseImpl(self, instring, loc, doActions=True):
- if (
- self.expr is None
- and __diag__.warn_on_parse_using_empty_Forward
- and Diagnostics.warn_on_parse_using_empty_Forward
- not in self.suppress_warnings_
- ):
- # walk stack until parse_string, scan_string, search_string, or transform_string is found
- parse_fns = [
- "parse_string",
- "scan_string",
- "search_string",
- "transform_string",
- ]
- tb = traceback.extract_stack(limit=200)
- for i, frm in enumerate(reversed(tb), start=1):
- if frm.name in parse_fns:
- stacklevel = i + 1
- break
- else:
- stacklevel = 2
- warnings.warn(
- "Forward expression was never assigned a value, will not parse any input",
- stacklevel=stacklevel,
- )
- if not ParserElement._left_recursion_enabled:
- return super().parseImpl(instring, loc, doActions)
- # ## Bounded Recursion algorithm ##
- # Recursion only needs to be processed at ``Forward`` elements, since they are
- # the only ones that can actually refer to themselves. The general idea is
- # to handle recursion stepwise: We start at no recursion, then recurse once,
- # recurse twice, ..., until more recursion offers no benefit (we hit the bound).
- #
- # The "trick" here is that each ``Forward`` gets evaluated in two contexts
- # - to *match* a specific recursion level, and
- # - to *search* the bounded recursion level
- # and the two run concurrently. The *search* must *match* each recursion level
- # to find the best possible match. This is handled by a memo table, which
- # provides the previous match to the next level match attempt.
- #
- # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
- #
- # There is a complication since we not only *parse* but also *transform* via
- # actions: We do not want to run the actions too often while expanding. Thus,
- # we expand using `doActions=False` and only run `doActions=True` if the next
- # recursion level is acceptable.
- with ParserElement.recursion_lock:
- memo = ParserElement.recursion_memos
- try:
- # we are parsing at a specific recursion expansion - use it as-is
- prev_loc, prev_result = memo[loc, self, doActions]
- if isinstance(prev_result, Exception):
- raise prev_result
- return prev_loc, prev_result.copy()
- except KeyError:
- act_key = (loc, self, True)
- peek_key = (loc, self, False)
- # we are searching for the best recursion expansion - keep on improving
- # both `doActions` cases must be tracked separately here!
- prev_loc, prev_peek = memo[peek_key] = (
- loc - 1,
- ParseException(
- instring, loc, "Forward recursion without base case", self
- ),
- )
- if doActions:
- memo[act_key] = memo[peek_key]
- while True:
- try:
- new_loc, new_peek = super().parseImpl(instring, loc, False)
- except ParseException:
- # we failed before getting any match – do not hide the error
- if isinstance(prev_peek, Exception):
- raise
- new_loc, new_peek = prev_loc, prev_peek
- # the match did not get better: we are done
- if new_loc <= prev_loc:
- if doActions:
- # replace the match for doActions=False as well,
- # in case the action did backtrack
- prev_loc, prev_result = memo[peek_key] = memo[act_key]
- del memo[peek_key], memo[act_key]
- return prev_loc, prev_result.copy()
- del memo[peek_key]
- return prev_loc, prev_peek.copy()
- # the match did get better: see if we can improve further
- else:
- if doActions:
- try:
- memo[act_key] = super().parseImpl(instring, loc, True)
- except ParseException as e:
- memo[peek_key] = memo[act_key] = (new_loc, e)
- raise
- prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
-
- def leave_whitespace(self, recursive: bool = True) -> ParserElement:
- self.skipWhitespace = False
- return self
-
- def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
- self.skipWhitespace = True
- return self
-
- def streamline(self) -> ParserElement:
- if not self.streamlined:
- self.streamlined = True
- if self.expr is not None:
- self.expr.streamline()
- return self
-
- def validate(self, validateTrace=None) -> None:
- if validateTrace is None:
- validateTrace = []
-
- if self not in validateTrace:
- tmp = validateTrace[:] + [self]
- if self.expr is not None:
- self.expr.validate(tmp)
- self._checkRecursion([])
-
- def _generateDefaultName(self):
- # Avoid infinite recursion by setting a temporary _defaultName
- self._defaultName = ": ..."
-
- # Use the string representation of main expression.
- retString = "..."
- try:
- if self.expr is not None:
- retString = str(self.expr)[:1000]
- else:
- retString = "None"
- finally:
- return self.__class__.__name__ + ": " + retString
-
- def copy(self) -> ParserElement:
- if self.expr is not None:
- return super().copy()
- else:
- ret = Forward()
- ret <<= self
- return ret
-
- def _setResultsName(self, name, list_all_matches=False):
- if (
- __diag__.warn_name_set_on_empty_Forward
- and Diagnostics.warn_name_set_on_empty_Forward
- not in self.suppress_warnings_
- ):
- if self.expr is None:
- warnings.warn(
- "{}: setting results name {!r} on {} expression "
- "that has no contained expression".format(
- "warn_name_set_on_empty_Forward", name, type(self).__name__
- ),
- stacklevel=3,
- )
-
- return super()._setResultsName(name, list_all_matches)
-
- ignoreWhitespace = ignore_whitespace
- leaveWhitespace = leave_whitespace
-
-
-class TokenConverter(ParseElementEnhance):
- """
- Abstract subclass of :class:`ParseExpression`, for converting parsed results.
- """
-
- def __init__(self, expr: Union[ParserElement, str], savelist=False):
- super().__init__(expr) # , savelist)
- self.saveAsList = False
-
-
-class Combine(TokenConverter):
- """Converter to concatenate all matching tokens to a single string.
- By default, the matching patterns must also be contiguous in the
- input string; this can be disabled by specifying
- ``'adjacent=False'`` in the constructor.
-
- Example::
-
- real = Word(nums) + '.' + Word(nums)
- print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
- # will also erroneously match the following
- print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
-
- real = Combine(Word(nums) + '.' + Word(nums))
- print(real.parse_string('3.1416')) # -> ['3.1416']
- # no match when there are internal spaces
- print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
- """
-
- def __init__(
- self,
- expr: ParserElement,
- join_string: str = "",
- adjacent: bool = True,
- *,
- joinString: typing.Optional[str] = None,
- ):
- super().__init__(expr)
- joinString = joinString if joinString is not None else join_string
- # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
- if adjacent:
- self.leave_whitespace()
- self.adjacent = adjacent
- self.skipWhitespace = True
- self.joinString = joinString
- self.callPreparse = True
-
- def ignore(self, other) -> ParserElement:
- if self.adjacent:
- ParserElement.ignore(self, other)
- else:
- super().ignore(other)
- return self
-
- def postParse(self, instring, loc, tokenlist):
- retToks = tokenlist.copy()
- del retToks[:]
- retToks += ParseResults(
- ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
- )
-
- if self.resultsName and retToks.haskeys():
- return [retToks]
- else:
- return retToks
-
-
-class Group(TokenConverter):
- """Converter to return the matched tokens as a list - useful for
- returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
-
- The optional ``aslist`` argument when set to True will return the
- parsed tokens as a Python list instead of a pyparsing ParseResults.
-
- Example::
-
- ident = Word(alphas)
- num = Word(nums)
- term = ident | num
- func = ident + Opt(delimited_list(term))
- print(func.parse_string("fn a, b, 100"))
- # -> ['fn', 'a', 'b', '100']
-
- func = ident + Group(Opt(delimited_list(term)))
- print(func.parse_string("fn a, b, 100"))
- # -> ['fn', ['a', 'b', '100']]
- """
-
- def __init__(self, expr: ParserElement, aslist: bool = False):
- super().__init__(expr)
- self.saveAsList = True
- self._asPythonList = aslist
-
- def postParse(self, instring, loc, tokenlist):
- if self._asPythonList:
- return ParseResults.List(
- tokenlist.asList()
- if isinstance(tokenlist, ParseResults)
- else list(tokenlist)
- )
- else:
- return [tokenlist]
-
-
-class Dict(TokenConverter):
- """Converter to return a repetitive expression as a list, but also
- as a dictionary. Each element can also be referenced using the first
- token in the expression as its key. Useful for tabular report
- scraping when the first column can be used as a item key.
-
- The optional ``asdict`` argument when set to True will return the
- parsed tokens as a Python dict instead of a pyparsing ParseResults.
-
- Example::
-
- data_word = Word(alphas)
- label = data_word + FollowedBy(':')
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
-
- # print attributes as plain groups
- print(attr_expr[1, ...].parse_string(text).dump())
-
- # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
- result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
- print(result.dump())
-
- # access named fields as dict entries, or output as dict
- print(result['shape'])
- print(result.as_dict())
-
- prints::
-
- ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: 'light blue'
- - posn: 'upper left'
- - shape: 'SQUARE'
- - texture: 'burlap'
- SQUARE
- {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
-
- See more examples at :class:`ParseResults` of accessing fields by results name.
- """
-
- def __init__(self, expr: ParserElement, asdict: bool = False):
- super().__init__(expr)
- self.saveAsList = True
- self._asPythonDict = asdict
-
- def postParse(self, instring, loc, tokenlist):
- for i, tok in enumerate(tokenlist):
- if len(tok) == 0:
- continue
-
- ikey = tok[0]
- if isinstance(ikey, int):
- ikey = str(ikey).strip()
-
- if len(tok) == 1:
- tokenlist[ikey] = _ParseResultsWithOffset("", i)
-
- elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
- tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
-
- else:
- try:
- dictvalue = tok.copy() # ParseResults(i)
- except Exception:
- exc = TypeError(
- "could not extract dict values from parsed results"
- " - Dict expression must contain Grouped expressions"
- )
- raise exc from None
-
- del dictvalue[0]
-
- if len(dictvalue) != 1 or (
- isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
- ):
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
- else:
- tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
-
- if self._asPythonDict:
- return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
- else:
- return [tokenlist] if self.resultsName else tokenlist
-
-
-class Suppress(TokenConverter):
- """Converter for ignoring the results of a parsed expression.
-
- Example::
-
- source = "a, b, c,d"
- wd = Word(alphas)
- wd_list1 = wd + (',' + wd)[...]
- print(wd_list1.parse_string(source))
-
- # often, delimiters that are useful during parsing are just in the
- # way afterward - use Suppress to keep them out of the parsed output
- wd_list2 = wd + (Suppress(',') + wd)[...]
- print(wd_list2.parse_string(source))
-
- # Skipped text (using '...') can be suppressed as well
- source = "lead in START relevant text END trailing text"
- start_marker = Keyword("START")
- end_marker = Keyword("END")
- find_body = Suppress(...) + start_marker + ... + end_marker
- print(find_body.parse_string(source)
-
- prints::
-
- ['a', ',', 'b', ',', 'c', ',', 'd']
- ['a', 'b', 'c', 'd']
- ['START', 'relevant text ', 'END']
-
- (See also :class:`delimited_list`.)
- """
-
- def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
- if expr is ...:
- expr = _PendingSkip(NoMatch())
- super().__init__(expr)
-
- def __add__(self, other) -> "ParserElement":
- if isinstance(self.expr, _PendingSkip):
- return Suppress(SkipTo(other)) + other
- else:
- return super().__add__(other)
-
- def __sub__(self, other) -> "ParserElement":
- if isinstance(self.expr, _PendingSkip):
- return Suppress(SkipTo(other)) - other
- else:
- return super().__sub__(other)
-
- def postParse(self, instring, loc, tokenlist):
- return []
-
- def suppress(self) -> ParserElement:
- return self
-
-
-def trace_parse_action(f: ParseAction) -> ParseAction:
- """Decorator for debugging parse actions.
-
- When the parse action is called, this decorator will print
- ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
- When the parse action completes, the decorator will print
- ``"<<"`` followed by the returned value, or any exception that the parse action raised.
-
- Example::
-
- wd = Word(alphas)
-
- @trace_parse_action
- def remove_duplicate_chars(tokens):
- return ''.join(sorted(set(''.join(tokens))))
-
- wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
- print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
-
- prints::
-
- >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
- <<leaving remove_duplicate_chars (ret: 'dfjkls')
- ['dfjkls']
- """
- f = _trim_arity(f)
-
- def z(*paArgs):
- thisFunc = f.__name__
- s, l, t = paArgs[-3:]
- if len(paArgs) > 3:
- thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
- sys.stderr.write(
- ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
- )
- try:
- ret = f(*paArgs)
- except Exception as exc:
- sys.stderr.write("<<leaving {} (exception: {})\n".format(thisFunc, exc))
- raise
- sys.stderr.write("<<leaving {} (ret: {!r})\n".format(thisFunc, ret))
- return ret
-
- z.__name__ = f.__name__
- return z
-
-
-# convenience constants for positional expressions
-empty = Empty().set_name("empty")
-line_start = LineStart().set_name("line_start")
-line_end = LineEnd().set_name("line_end")
-string_start = StringStart().set_name("string_start")
-string_end = StringEnd().set_name("string_end")
-
-_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).set_parse_action(
- lambda s, l, t: t[0][1]
-)
-_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").set_parse_action(
- lambda s, l, t: chr(int(t[0].lstrip(r"\0x"), 16))
-)
-_escapedOctChar = Regex(r"\\0[0-7]+").set_parse_action(
- lambda s, l, t: chr(int(t[0][1:], 8))
-)
-_singleChar = (
- _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
-)
-_charRange = Group(_singleChar + Suppress("-") + _singleChar)
-_reBracketExpr = (
- Literal("[")
- + Opt("^").set_results_name("negate")
- + Group(OneOrMore(_charRange | _singleChar)).set_results_name("body")
- + "]"
-)
-
-
-def srange(s: str) -> str:
- r"""Helper to easily define string ranges for use in :class:`Word`
- construction. Borrows syntax from regexp ``'[]'`` string range
- definitions::
-
- srange("[0-9]") -> "0123456789"
- srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
- srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
-
- The input string must be enclosed in []'s, and the returned string
- is the expanded character set joined into a single string. The
- values enclosed in the []'s may be:
-
- - a single character
- - an escaped character with a leading backslash (such as ``\-``
- or ``\]``)
- - an escaped hex character with a leading ``'\x'``
- (``\x21``, which is a ``'!'`` character) (``\0x##``
- is also supported for backwards compatibility)
- - an escaped octal character with a leading ``'\0'``
- (``\041``, which is a ``'!'`` character)
- - a range of any of the above, separated by a dash (``'a-z'``,
- etc.)
- - any combination of the above (``'aeiouy'``,
- ``'a-zA-Z0-9_$'``, etc.)
- """
- _expanded = (
- lambda p: p
- if not isinstance(p, ParseResults)
- else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
- )
- try:
- return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
- except Exception:
- return ""
-
-
-def token_map(func, *args) -> ParseAction:
- """Helper to define a parse action by mapping a function to all
- elements of a :class:`ParseResults` list. If any additional args are passed,
- they are forwarded to the given function as additional arguments
- after the token, as in
- ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
- which will convert the parsed data to an integer using base 16.
-
- Example (compare the last to example in :class:`ParserElement.transform_string`::
-
- hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
- hex_ints.run_tests('''
- 00 11 22 aa FF 0a 0d 1a
- ''')
-
- upperword = Word(alphas).set_parse_action(token_map(str.upper))
- upperword[1, ...].run_tests('''
- my kingdom for a horse
- ''')
-
- wd = Word(alphas).set_parse_action(token_map(str.title))
- wd[1, ...].set_parse_action(' '.join).run_tests('''
- now is the winter of our discontent made glorious summer by this sun of york
- ''')
-
- prints::
-
- 00 11 22 aa FF 0a 0d 1a
- [0, 17, 34, 170, 255, 10, 13, 26]
-
- my kingdom for a horse
- ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
-
- now is the winter of our discontent made glorious summer by this sun of york
- ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
- """
-
- def pa(s, l, t):
- return [func(tokn, *args) for tokn in t]
-
- func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
- pa.__name__ = func_name
-
- return pa
-
-
-def autoname_elements() -> None:
- """
- Utility to simplify mass-naming of parser elements, for
- generating railroad diagram with named subdiagrams.
- """
- for name, var in sys._getframe().f_back.f_locals.items():
- if isinstance(var, ParserElement) and not var.customName:
- var.set_name(name)
-
-
-dbl_quoted_string = Combine(
- Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
-).set_name("string enclosed in double quotes")
-
-sgl_quoted_string = Combine(
- Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
-).set_name("string enclosed in single quotes")
-
-quoted_string = Combine(
- Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
- | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
-).set_name("quotedString using single or double quotes")
-
-unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
-
-
-alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
-punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
-
-# build list of built-in expressions, for future reference if a global default value
-# gets updated
-_builtin_exprs: List[ParserElement] = [
- v for v in vars().values() if isinstance(v, ParserElement)
-]
-
-# backward compatibility names
-tokenMap = token_map
-conditionAsParseAction = condition_as_parse_action
-nullDebugAction = null_debug_action
-sglQuotedString = sgl_quoted_string
-dblQuotedString = dbl_quoted_string
-quotedString = quoted_string
-unicodeString = unicode_string
-lineStart = line_start
-lineEnd = line_end
-stringStart = string_start
-stringEnd = string_end
-traceParseAction = trace_parse_action
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py
deleted file mode 100644
index 8986447..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__init__.py
+++ /dev/null
@@ -1,642 +0,0 @@
-import railroad
-import pyparsing
-import typing
-from typing import (
- List,
- NamedTuple,
- Generic,
- TypeVar,
- Dict,
- Callable,
- Set,
- Iterable,
-)
-from jinja2 import Template
-from io import StringIO
-import inspect
-
-
-jinja2_template_source = """\
-<!DOCTYPE html>
-<html>
-<head>
- {% if not head %}
- <style type="text/css">
- .railroad-heading {
- font-family: monospace;
- }
- </style>
- {% else %}
- {{ head | safe }}
- {% endif %}
-</head>
-<body>
-{{ body | safe }}
-{% for diagram in diagrams %}
- <div class="railroad-group">
- <h1 class="railroad-heading">{{ diagram.title }}</h1>
- <div class="railroad-description">{{ diagram.text }}</div>
- <div class="railroad-svg">
- {{ diagram.svg }}
- </div>
- </div>
-{% endfor %}
-</body>
-</html>
-"""
-
-template = Template(jinja2_template_source)
-
-# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
-NamedDiagram = NamedTuple(
- "NamedDiagram",
- [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
-)
-"""
-A simple structure for associating a name with a railroad diagram
-"""
-
-T = TypeVar("T")
-
-
-class EachItem(railroad.Group):
- """
- Custom railroad item to compose a:
- - Group containing a
- - OneOrMore containing a
- - Choice of the elements in the Each
- with the group label indicating that all must be matched
- """
-
- all_label = "[ALL]"
-
- def __init__(self, *items):
- choice_item = railroad.Choice(len(items) - 1, *items)
- one_or_more_item = railroad.OneOrMore(item=choice_item)
- super().__init__(one_or_more_item, label=self.all_label)
-
-
-class AnnotatedItem(railroad.Group):
- """
- Simple subclass of Group that creates an annotation label
- """
-
- def __init__(self, label: str, item):
- super().__init__(item=item, label="[{}]".format(label) if label else label)
-
-
-class EditablePartial(Generic[T]):
- """
- Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
- constructed.
- """
-
- # We need this here because the railroad constructors actually transform the data, so can't be called until the
- # entire tree is assembled
-
- def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
- self.func = func
- self.args = args
- self.kwargs = kwargs
-
- @classmethod
- def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
- """
- If you call this function in the same way that you would call the constructor, it will store the arguments
- as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
- """
- return EditablePartial(func=func, args=list(args), kwargs=kwargs)
-
- @property
- def name(self):
- return self.kwargs["name"]
-
- def __call__(self) -> T:
- """
- Evaluate the partial and return the result
- """
- args = self.args.copy()
- kwargs = self.kwargs.copy()
-
- # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
- # args=['list', 'of', 'things'])
- arg_spec = inspect.getfullargspec(self.func)
- if arg_spec.varargs in self.kwargs:
- args += kwargs.pop(arg_spec.varargs)
-
- return self.func(*args, **kwargs)
-
-
-def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
- """
- Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
- :params kwargs: kwargs to be passed in to the template
- """
- data = []
- for diagram in diagrams:
- if diagram.diagram is None:
- continue
- io = StringIO()
- diagram.diagram.writeSvg(io.write)
- title = diagram.name
- if diagram.index == 0:
- title += " (root)"
- data.append({"title": title, "text": "", "svg": io.getvalue()})
-
- return template.render(diagrams=data, **kwargs)
-
-
-def resolve_partial(partial: "EditablePartial[T]") -> T:
- """
- Recursively resolves a collection of Partials into whatever type they are
- """
- if isinstance(partial, EditablePartial):
- partial.args = resolve_partial(partial.args)
- partial.kwargs = resolve_partial(partial.kwargs)
- return partial()
- elif isinstance(partial, list):
- return [resolve_partial(x) for x in partial]
- elif isinstance(partial, dict):
- return {key: resolve_partial(x) for key, x in partial.items()}
- else:
- return partial
-
-
-def to_railroad(
- element: pyparsing.ParserElement,
- diagram_kwargs: typing.Optional[dict] = None,
- vertical: int = 3,
- show_results_names: bool = False,
- show_groups: bool = False,
-) -> List[NamedDiagram]:
- """
- Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
- creation if you want to access the Railroad tree before it is converted to HTML
- :param element: base element of the parser being diagrammed
- :param diagram_kwargs: kwargs to pass to the Diagram() constructor
- :param vertical: (optional) - int - limit at which number of alternatives should be
- shown vertically instead of horizontally
- :param show_results_names - bool to indicate whether results name annotations should be
- included in the diagram
- :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
- surrounding box
- """
- # Convert the whole tree underneath the root
- lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
- _to_diagram_element(
- element,
- lookup=lookup,
- parent=None,
- vertical=vertical,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- root_id = id(element)
- # Convert the root if it hasn't been already
- if root_id in lookup:
- if not element.customName:
- lookup[root_id].name = ""
- lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
-
- # Now that we're finished, we can convert from intermediate structures into Railroad elements
- diags = list(lookup.diagrams.values())
- if len(diags) > 1:
- # collapse out duplicate diags with the same name
- seen = set()
- deduped_diags = []
- for d in diags:
- # don't extract SkipTo elements, they are uninformative as subdiagrams
- if d.name == "...":
- continue
- if d.name is not None and d.name not in seen:
- seen.add(d.name)
- deduped_diags.append(d)
- resolved = [resolve_partial(partial) for partial in deduped_diags]
- else:
- # special case - if just one diagram, always display it, even if
- # it has no name
- resolved = [resolve_partial(partial) for partial in diags]
- return sorted(resolved, key=lambda diag: diag.index)
-
-
-def _should_vertical(
- specification: int, exprs: Iterable[pyparsing.ParserElement]
-) -> bool:
- """
- Returns true if we should return a vertical list of elements
- """
- if specification is None:
- return False
- else:
- return len(_visible_exprs(exprs)) >= specification
-
-
-class ElementState:
- """
- State recorded for an individual pyparsing Element
- """
-
- # Note: this should be a dataclass, but we have to support Python 3.5
- def __init__(
- self,
- element: pyparsing.ParserElement,
- converted: EditablePartial,
- parent: EditablePartial,
- number: int,
- name: str = None,
- parent_index: typing.Optional[int] = None,
- ):
- #: The pyparsing element that this represents
- self.element: pyparsing.ParserElement = element
- #: The name of the element
- self.name: typing.Optional[str] = name
- #: The output Railroad element in an unconverted state
- self.converted: EditablePartial = converted
- #: The parent Railroad element, which we store so that we can extract this if it's duplicated
- self.parent: EditablePartial = parent
- #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
- self.number: int = number
- #: The index of this inside its parent
- self.parent_index: typing.Optional[int] = parent_index
- #: If true, we should extract this out into a subdiagram
- self.extract: bool = False
- #: If true, all of this element's children have been filled out
- self.complete: bool = False
-
- def mark_for_extraction(
- self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
- ):
- """
- Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
- :param el_id: id of the element
- :param state: element/diagram state tracker
- :param name: name to use for this element's text
- :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
- root element when we know we're finished
- """
- self.extract = True
-
- # Set the name
- if not self.name:
- if name:
- # Allow forcing a custom name
- self.name = name
- elif self.element.customName:
- self.name = self.element.customName
- else:
- self.name = ""
-
- # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
- # to be added
- # Also, if this is just a string literal etc, don't bother extracting it
- if force or (self.complete and _worth_extracting(self.element)):
- state.extract_into_diagram(el_id)
-
-
-class ConverterState:
- """
- Stores some state that persists between recursions into the element tree
- """
-
- def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
- #: A dictionary mapping ParserElements to state relating to them
- self._element_diagram_states: Dict[int, ElementState] = {}
- #: A dictionary mapping ParserElement IDs to subdiagrams generated from them
- self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
- #: The index of the next unnamed element
- self.unnamed_index: int = 1
- #: The index of the next element. This is used for sorting
- self.index: int = 0
- #: Shared kwargs that are used to customize the construction of diagrams
- self.diagram_kwargs: dict = diagram_kwargs or {}
- self.extracted_diagram_names: Set[str] = set()
-
- def __setitem__(self, key: int, value: ElementState):
- self._element_diagram_states[key] = value
-
- def __getitem__(self, key: int) -> ElementState:
- return self._element_diagram_states[key]
-
- def __delitem__(self, key: int):
- del self._element_diagram_states[key]
-
- def __contains__(self, key: int):
- return key in self._element_diagram_states
-
- def generate_unnamed(self) -> int:
- """
- Generate a number used in the name of an otherwise unnamed diagram
- """
- self.unnamed_index += 1
- return self.unnamed_index
-
- def generate_index(self) -> int:
- """
- Generate a number used to index a diagram
- """
- self.index += 1
- return self.index
-
- def extract_into_diagram(self, el_id: int):
- """
- Used when we encounter the same token twice in the same tree. When this
- happens, we replace all instances of that token with a terminal, and
- create a new subdiagram for the token
- """
- position = self[el_id]
-
- # Replace the original definition of this element with a regular block
- if position.parent:
- ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
- if "item" in position.parent.kwargs:
- position.parent.kwargs["item"] = ret
- elif "items" in position.parent.kwargs:
- position.parent.kwargs["items"][position.parent_index] = ret
-
- # If the element we're extracting is a group, skip to its content but keep the title
- if position.converted.func == railroad.Group:
- content = position.converted.kwargs["item"]
- else:
- content = position.converted
-
- self.diagrams[el_id] = EditablePartial.from_call(
- NamedDiagram,
- name=position.name,
- diagram=EditablePartial.from_call(
- railroad.Diagram, content, **self.diagram_kwargs
- ),
- index=position.number,
- )
-
- del self[el_id]
-
-
-def _worth_extracting(element: pyparsing.ParserElement) -> bool:
- """
- Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
- themselves have children, then its complex enough to extract
- """
- children = element.recurse()
- return any(child.recurse() for child in children)
-
-
-def _apply_diagram_item_enhancements(fn):
- """
- decorator to ensure enhancements to a diagram item (such as results name annotations)
- get applied on return from _to_diagram_element (we do this since there are several
- returns in _to_diagram_element)
- """
-
- def _inner(
- element: pyparsing.ParserElement,
- parent: typing.Optional[EditablePartial],
- lookup: ConverterState = None,
- vertical: int = None,
- index: int = 0,
- name_hint: str = None,
- show_results_names: bool = False,
- show_groups: bool = False,
- ) -> typing.Optional[EditablePartial]:
-
- ret = fn(
- element,
- parent,
- lookup,
- vertical,
- index,
- name_hint,
- show_results_names,
- show_groups,
- )
-
- # apply annotation for results name, if present
- if show_results_names and ret is not None:
- element_results_name = element.resultsName
- if element_results_name:
- # add "*" to indicate if this is a "list all results" name
- element_results_name += "" if element.modalResults else "*"
- ret = EditablePartial.from_call(
- railroad.Group, item=ret, label=element_results_name
- )
-
- return ret
-
- return _inner
-
-
-def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
- non_diagramming_exprs = (
- pyparsing.ParseElementEnhance,
- pyparsing.PositionToken,
- pyparsing.And._ErrorStop,
- )
- return [
- e
- for e in exprs
- if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
- ]
-
-
-@_apply_diagram_item_enhancements
-def _to_diagram_element(
- element: pyparsing.ParserElement,
- parent: typing.Optional[EditablePartial],
- lookup: ConverterState = None,
- vertical: int = None,
- index: int = 0,
- name_hint: str = None,
- show_results_names: bool = False,
- show_groups: bool = False,
-) -> typing.Optional[EditablePartial]:
- """
- Recursively converts a PyParsing Element to a railroad Element
- :param lookup: The shared converter state that keeps track of useful things
- :param index: The index of this element within the parent
- :param parent: The parent of this element in the output tree
- :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
- it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
- do so
- :param name_hint: If provided, this will override the generated name
- :param show_results_names: bool flag indicating whether to add annotations for results names
- :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
- :param show_groups: bool flag indicating whether to show groups using bounding box
- """
- exprs = element.recurse()
- name = name_hint or element.customName or element.__class__.__name__
-
- # Python's id() is used to provide a unique identifier for elements
- el_id = id(element)
-
- element_results_name = element.resultsName
-
- # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
- if not element.customName:
- if isinstance(
- element,
- (
- # pyparsing.TokenConverter,
- # pyparsing.Forward,
- pyparsing.Located,
- ),
- ):
- # However, if this element has a useful custom name, and its child does not, we can pass it on to the child
- if exprs:
- if not exprs[0].customName:
- propagated_name = name
- else:
- propagated_name = None
-
- return _to_diagram_element(
- element.expr,
- parent=parent,
- lookup=lookup,
- vertical=vertical,
- index=index,
- name_hint=propagated_name,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- # If the element isn't worth extracting, we always treat it as the first time we say it
- if _worth_extracting(element):
- if el_id in lookup:
- # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
- # so we have to extract it into a new diagram.
- looked_up = lookup[el_id]
- looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
- ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
- return ret
-
- elif el_id in lookup.diagrams:
- # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
- # just put in a marker element that refers to the sub-diagram
- ret = EditablePartial.from_call(
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
- )
- return ret
-
- # Recursively convert child elements
- # Here we find the most relevant Railroad element for matching pyparsing Element
- # We use ``items=[]`` here to hold the place for where the child elements will go once created
- if isinstance(element, pyparsing.And):
- # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
- # (all will have the same name, and resultsName)
- if not exprs:
- return None
- if len(set((e.name, e.resultsName) for e in exprs)) == 1:
- ret = EditablePartial.from_call(
- railroad.OneOrMore, item="", repeat=str(len(exprs))
- )
- elif _should_vertical(vertical, exprs):
- ret = EditablePartial.from_call(railroad.Stack, items=[])
- else:
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
- elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
- if not exprs:
- return None
- if _should_vertical(vertical, exprs):
- ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
- else:
- ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
- elif isinstance(element, pyparsing.Each):
- if not exprs:
- return None
- ret = EditablePartial.from_call(EachItem, items=[])
- elif isinstance(element, pyparsing.NotAny):
- ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
- elif isinstance(element, pyparsing.FollowedBy):
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
- elif isinstance(element, pyparsing.PrecededBy):
- ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
- elif isinstance(element, pyparsing.Group):
- if show_groups:
- ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
- else:
- ret = EditablePartial.from_call(railroad.Group, label="", item="")
- elif isinstance(element, pyparsing.TokenConverter):
- ret = EditablePartial.from_call(
- AnnotatedItem, label=type(element).__name__.lower(), item=""
- )
- elif isinstance(element, pyparsing.Opt):
- ret = EditablePartial.from_call(railroad.Optional, item="")
- elif isinstance(element, pyparsing.OneOrMore):
- ret = EditablePartial.from_call(railroad.OneOrMore, item="")
- elif isinstance(element, pyparsing.ZeroOrMore):
- ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
- elif isinstance(element, pyparsing.Group):
- ret = EditablePartial.from_call(
- railroad.Group, item=None, label=element_results_name
- )
- elif isinstance(element, pyparsing.Empty) and not element.customName:
- # Skip unnamed "Empty" elements
- ret = None
- elif len(exprs) > 1:
- ret = EditablePartial.from_call(railroad.Sequence, items=[])
- elif len(exprs) > 0 and not element_results_name:
- ret = EditablePartial.from_call(railroad.Group, item="", label=name)
- else:
- terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
- ret = terminal
-
- if ret is None:
- return
-
- # Indicate this element's position in the tree so we can extract it if necessary
- lookup[el_id] = ElementState(
- element=element,
- converted=ret,
- parent=parent,
- parent_index=index,
- number=lookup.generate_index(),
- )
- if element.customName:
- lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
-
- i = 0
- for expr in exprs:
- # Add a placeholder index in case we have to extract the child before we even add it to the parent
- if "items" in ret.kwargs:
- ret.kwargs["items"].insert(i, None)
-
- item = _to_diagram_element(
- expr,
- parent=ret,
- lookup=lookup,
- vertical=vertical,
- index=i,
- show_results_names=show_results_names,
- show_groups=show_groups,
- )
-
- # Some elements don't need to be shown in the diagram
- if item is not None:
- if "item" in ret.kwargs:
- ret.kwargs["item"] = item
- elif "items" in ret.kwargs:
- # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
- ret.kwargs["items"][i] = item
- i += 1
- elif "items" in ret.kwargs:
- # If we're supposed to skip this element, remove it from the parent
- del ret.kwargs["items"][i]
-
- # If all this items children are none, skip this item
- if ret and (
- ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
- or ("item" in ret.kwargs and ret.kwargs["item"] is None)
- ):
- ret = EditablePartial.from_call(railroad.Terminal, name)
-
- # Mark this element as "complete", ie it has all of its children
- if el_id in lookup:
- lookup[el_id].complete = True
-
- if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
- lookup.extract_into_diagram(el_id)
- if ret is not None:
- ret = EditablePartial.from_call(
- railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
- )
-
- return ret
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 0e2cbb7..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/diagram/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/exceptions.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/exceptions.py
deleted file mode 100644
index a38447b..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/exceptions.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# exceptions.py
-
-import re
-import sys
-import typing
-
-from .util import col, line, lineno, _collapse_string_to_ranges
-from .unicode import pyparsing_unicode as ppu
-
-
-class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
- pass
-
-
-_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
-_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
-
-
-class ParseBaseException(Exception):
- """base exception class for all parsing runtime exceptions"""
-
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__(
- self,
- pstr: str,
- loc: int = 0,
- msg: typing.Optional[str] = None,
- elem=None,
- ):
- self.loc = loc
- if msg is None:
- self.msg = pstr
- self.pstr = ""
- else:
- self.msg = msg
- self.pstr = pstr
- self.parser_element = self.parserElement = elem
- self.args = (pstr, loc, msg)
-
- @staticmethod
- def explain_exception(exc, depth=16):
- """
- Method to take an exception and translate the Python internal traceback into a list
- of the pyparsing expressions that caused the exception to be raised.
-
- Parameters:
-
- - exc - exception raised during parsing (need not be a ParseException, in support
- of Python exceptions that might be raised in a parse action)
- - depth (default=16) - number of levels back in the stack trace to list expression
- and function names; if None, the full stack trace names will be listed; if 0, only
- the failing input line, marker, and exception string will be shown
-
- Returns a multi-line string listing the ParserElements and/or function names in the
- exception's stack trace.
- """
- import inspect
- from .core import ParserElement
-
- if depth is None:
- depth = sys.getrecursionlimit()
- ret = []
- if isinstance(exc, ParseBaseException):
- ret.append(exc.line)
- ret.append(" " * (exc.column - 1) + "^")
- ret.append("{}: {}".format(type(exc).__name__, exc))
-
- if depth > 0:
- callers = inspect.getinnerframes(exc.__traceback__, context=depth)
- seen = set()
- for i, ff in enumerate(callers[-depth:]):
- frm = ff[0]
-
- f_self = frm.f_locals.get("self", None)
- if isinstance(f_self, ParserElement):
- if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
- continue
- if id(f_self) in seen:
- continue
- seen.add(id(f_self))
-
- self_type = type(f_self)
- ret.append(
- "{}.{} - {}".format(
- self_type.__module__, self_type.__name__, f_self
- )
- )
-
- elif f_self is not None:
- self_type = type(f_self)
- ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
-
- else:
- code = frm.f_code
- if code.co_name in ("wrapper", "<module>"):
- continue
-
- ret.append("{}".format(code.co_name))
-
- depth -= 1
- if not depth:
- break
-
- return "\n".join(ret)
-
- @classmethod
- def _from_exception(cls, pe):
- """
- internal factory method to simplify creating one type of ParseException
- from another - avoids having __init__ signature conflicts among subclasses
- """
- return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
-
- @property
- def line(self) -> str:
- """
- Return the line of text where the exception occurred.
- """
- return line(self.loc, self.pstr)
-
- @property
- def lineno(self) -> int:
- """
- Return the 1-based line number of text where the exception occurred.
- """
- return lineno(self.loc, self.pstr)
-
- @property
- def col(self) -> int:
- """
- Return the 1-based column on the line of text where the exception occurred.
- """
- return col(self.loc, self.pstr)
-
- @property
- def column(self) -> int:
- """
- Return the 1-based column on the line of text where the exception occurred.
- """
- return col(self.loc, self.pstr)
-
- def __str__(self) -> str:
- if self.pstr:
- if self.loc >= len(self.pstr):
- foundstr = ", found end of text"
- else:
- # pull out next word at error location
- found_match = _exception_word_extractor.match(self.pstr, self.loc)
- if found_match is not None:
- found = found_match.group(0)
- else:
- found = self.pstr[self.loc : self.loc + 1]
- foundstr = (", found %r" % found).replace(r"\\", "\\")
- else:
- foundstr = ""
- return "{}{} (at char {}), (line:{}, col:{})".format(
- self.msg, foundstr, self.loc, self.lineno, self.column
- )
-
- def __repr__(self):
- return str(self)
-
- def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
- """
- Extracts the exception line from the input string, and marks
- the location of the exception with a special symbol.
- """
- markerString = marker_string if marker_string is not None else markerString
- line_str = self.line
- line_column = self.column - 1
- if markerString:
- line_str = "".join(
- (line_str[:line_column], markerString, line_str[line_column:])
- )
- return line_str.strip()
-
- def explain(self, depth=16) -> str:
- """
- Method to translate the Python internal traceback into a list
- of the pyparsing expressions that caused the exception to be raised.
-
- Parameters:
-
- - depth (default=16) - number of levels back in the stack trace to list expression
- and function names; if None, the full stack trace names will be listed; if 0, only
- the failing input line, marker, and exception string will be shown
-
- Returns a multi-line string listing the ParserElements and/or function names in the
- exception's stack trace.
-
- Example::
-
- expr = pp.Word(pp.nums) * 3
- try:
- expr.parse_string("123 456 A789")
- except pp.ParseException as pe:
- print(pe.explain(depth=0))
-
- prints::
-
- 123 456 A789
- ^
- ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
-
- Note: the diagnostic output will include string representations of the expressions
- that failed to parse. These representations will be more helpful if you use `set_name` to
- give identifiable names to your expressions. Otherwise they will use the default string
- forms, which may be cryptic to read.
-
- Note: pyparsing's default truncation of exception tracebacks may also truncate the
- stack of expressions that are displayed in the ``explain`` output. To get the full listing
- of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
- """
- return self.explain_exception(self, depth)
-
- markInputline = mark_input_line
-
-
-class ParseException(ParseBaseException):
- """
- Exception thrown when a parse expression doesn't match the input string
-
- Example::
-
- try:
- Word(nums).set_name("integer").parse_string("ABC")
- except ParseException as pe:
- print(pe)
- print("column: {}".format(pe.column))
-
- prints::
-
- Expected integer (at char 0), (line:1, col:1)
- column: 1
-
- """
-
-
-class ParseFatalException(ParseBaseException):
- """
- User-throwable exception thrown when inconsistent parse content
- is found; stops all parsing immediately
- """
-
-
-class ParseSyntaxException(ParseFatalException):
- """
- Just like :class:`ParseFatalException`, but thrown internally
- when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
- that parsing is to stop immediately because an unbacktrackable
- syntax error has been found.
- """
-
-
-class RecursiveGrammarException(Exception):
- """
- Exception thrown by :class:`ParserElement.validate` if the
- grammar could be left-recursive; parser may need to enable
- left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
- """
-
- def __init__(self, parseElementList):
- self.parseElementTrace = parseElementList
-
- def __str__(self) -> str:
- return "RecursiveGrammarException: {}".format(self.parseElementTrace)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/helpers.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/helpers.py
deleted file mode 100644
index 9588b3b..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/helpers.py
+++ /dev/null
@@ -1,1088 +0,0 @@
-# helpers.py
-import html.entities
-import re
-import typing
-
-from . import __diag__
-from .core import *
-from .util import _bslash, _flatten, _escape_regex_range_chars
-
-
-#
-# global helpers
-#
-def delimited_list(
- expr: Union[str, ParserElement],
- delim: Union[str, ParserElement] = ",",
- combine: bool = False,
- min: typing.Optional[int] = None,
- max: typing.Optional[int] = None,
- *,
- allow_trailing_delim: bool = False,
-) -> ParserElement:
- """Helper to define a delimited list of expressions - the delimiter
- defaults to ','. By default, the list elements and delimiters can
- have intervening whitespace, and comments, but this can be
- overridden by passing ``combine=True`` in the constructor. If
- ``combine`` is set to ``True``, the matching tokens are
- returned as a single token string, with the delimiters included;
- otherwise, the matching tokens are returned as a list of tokens,
- with the delimiters suppressed.
-
- If ``allow_trailing_delim`` is set to True, then the list may end with
- a delimiter.
-
- Example::
-
- delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
- delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
- """
- if isinstance(expr, str_type):
- expr = ParserElement._literalStringClass(expr)
-
- dlName = "{expr} [{delim} {expr}]...{end}".format(
- expr=str(expr.copy().streamline()),
- delim=str(delim),
- end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
- )
-
- if not combine:
- delim = Suppress(delim)
-
- if min is not None:
- if min < 1:
- raise ValueError("min must be greater than 0")
- min -= 1
- if max is not None:
- if min is not None and max <= min:
- raise ValueError("max must be greater than, or equal to min")
- max -= 1
- delimited_list_expr = expr + (delim + expr)[min, max]
-
- if allow_trailing_delim:
- delimited_list_expr += Opt(delim)
-
- if combine:
- return Combine(delimited_list_expr).set_name(dlName)
- else:
- return delimited_list_expr.set_name(dlName)
-
-
-def counted_array(
- expr: ParserElement,
- int_expr: typing.Optional[ParserElement] = None,
- *,
- intExpr: typing.Optional[ParserElement] = None,
-) -> ParserElement:
- """Helper to define a counted list of expressions.
-
- This helper defines a pattern of the form::
-
- integer expr expr expr...
-
- where the leading integer tells how many expr expressions follow.
- The matched tokens returns the array of expr tokens as a list - the
- leading count token is suppressed.
-
- If ``int_expr`` is specified, it should be a pyparsing expression
- that produces an integer value.
-
- Example::
-
- counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
-
- # in this parser, the leading integer value is given in binary,
- # '10' indicating that 2 values are in the array
- binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
- counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
-
- # if other fields must be parsed after the count but before the
- # list items, give the fields results names and they will
- # be preserved in the returned ParseResults:
- count_with_metadata = integer + Word(alphas)("type")
- typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
- result = typed_array.parse_string("3 bool True True False")
- print(result.dump())
-
- # prints
- # ['True', 'True', 'False']
- # - items: ['True', 'True', 'False']
- # - type: 'bool'
- """
- intExpr = intExpr or int_expr
- array_expr = Forward()
-
- def count_field_parse_action(s, l, t):
- nonlocal array_expr
- n = t[0]
- array_expr <<= (expr * n) if n else Empty()
- # clear list contents, but keep any named results
- del t[:]
-
- if intExpr is None:
- intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
- else:
- intExpr = intExpr.copy()
- intExpr.set_name("arrayLen")
- intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
- return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
-
-
-def match_previous_literal(expr: ParserElement) -> ParserElement:
- """Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks for
- a 'repeat' of a previous expression. For example::
-
- first = Word(nums)
- second = match_previous_literal(first)
- match_expr = first + ":" + second
-
- will match ``"1:1"``, but not ``"1:2"``. Because this
- matches a previous literal, will also match the leading
- ``"1:1"`` in ``"1:10"``. If this is not desired, use
- :class:`match_previous_expr`. Do *not* use with packrat parsing
- enabled.
- """
- rep = Forward()
-
- def copy_token_to_repeater(s, l, t):
- if t:
- if len(t) == 1:
- rep << t[0]
- else:
- # flatten t tokens
- tflat = _flatten(t.as_list())
- rep << And(Literal(tt) for tt in tflat)
- else:
- rep << Empty()
-
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
- rep.set_name("(prev) " + str(expr))
- return rep
-
-
-def match_previous_expr(expr: ParserElement) -> ParserElement:
- """Helper to define an expression that is indirectly defined from
- the tokens matched in a previous expression, that is, it looks for
- a 'repeat' of a previous expression. For example::
-
- first = Word(nums)
- second = match_previous_expr(first)
- match_expr = first + ":" + second
-
- will match ``"1:1"``, but not ``"1:2"``. Because this
- matches by expressions, will *not* match the leading ``"1:1"``
- in ``"1:10"``; the expressions are evaluated first, and then
- compared, so ``"1"`` is compared with ``"10"``. Do *not* use
- with packrat parsing enabled.
- """
- rep = Forward()
- e2 = expr.copy()
- rep <<= e2
-
- def copy_token_to_repeater(s, l, t):
- matchTokens = _flatten(t.as_list())
-
- def must_match_these_tokens(s, l, t):
- theseTokens = _flatten(t.as_list())
- if theseTokens != matchTokens:
- raise ParseException(
- s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
- )
-
- rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
-
- expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
- rep.set_name("(prev) " + str(expr))
- return rep
-
-
-def one_of(
- strs: Union[typing.Iterable[str], str],
- caseless: bool = False,
- use_regex: bool = True,
- as_keyword: bool = False,
- *,
- useRegex: bool = True,
- asKeyword: bool = False,
-) -> ParserElement:
- """Helper to quickly define a set of alternative :class:`Literal` s,
- and makes sure to do longest-first testing when there is a conflict,
- regardless of the input order, but returns
- a :class:`MatchFirst` for best performance.
-
- Parameters:
-
- - ``strs`` - a string of space-delimited literals, or a collection of
- string literals
- - ``caseless`` - treat all literals as caseless - (default= ``False``)
- - ``use_regex`` - as an optimization, will
- generate a :class:`Regex` object; otherwise, will generate
- a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
- creating a :class:`Regex` raises an exception) - (default= ``True``)
- - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
- generated expressions - (default= ``False``)
- - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
- but will be removed in a future release
-
- Example::
-
- comp_oper = one_of("< = > <= >= !=")
- var = Word(alphas)
- number = Word(nums)
- term = var | number
- comparison_expr = term + comp_oper + term
- print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
-
- prints::
-
- [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
- """
- asKeyword = asKeyword or as_keyword
- useRegex = useRegex and use_regex
-
- if (
- isinstance(caseless, str_type)
- and __diag__.warn_on_multiple_string_args_to_oneof
- ):
- warnings.warn(
- "More than one string argument passed to one_of, pass"
- " choices as a list or space-delimited string",
- stacklevel=2,
- )
-
- if caseless:
- isequal = lambda a, b: a.upper() == b.upper()
- masks = lambda a, b: b.upper().startswith(a.upper())
- parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
- else:
- isequal = lambda a, b: a == b
- masks = lambda a, b: b.startswith(a)
- parseElementClass = Keyword if asKeyword else Literal
-
- symbols: List[str] = []
- if isinstance(strs, str_type):
- symbols = strs.split()
- elif isinstance(strs, Iterable):
- symbols = list(strs)
- else:
- raise TypeError("Invalid argument to one_of, expected string or iterable")
- if not symbols:
- return NoMatch()
-
- # reorder given symbols to take care to avoid masking longer choices with shorter ones
- # (but only if the given symbols are not just single characters)
- if any(len(sym) > 1 for sym in symbols):
- i = 0
- while i < len(symbols) - 1:
- cur = symbols[i]
- for j, other in enumerate(symbols[i + 1 :]):
- if isequal(other, cur):
- del symbols[i + j + 1]
- break
- elif masks(cur, other):
- del symbols[i + j + 1]
- symbols.insert(i, other)
- break
- else:
- i += 1
-
- if useRegex:
- re_flags: int = re.IGNORECASE if caseless else 0
-
- try:
- if all(len(sym) == 1 for sym in symbols):
- # symbols are just single characters, create range regex pattern
- patt = "[{}]".format(
- "".join(_escape_regex_range_chars(sym) for sym in symbols)
- )
- else:
- patt = "|".join(re.escape(sym) for sym in symbols)
-
- # wrap with \b word break markers if defining as keywords
- if asKeyword:
- patt = r"\b(?:{})\b".format(patt)
-
- ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
-
- if caseless:
- # add parse action to return symbols as specified, not in random
- # casing as found in input string
- symbol_map = {sym.lower(): sym for sym in symbols}
- ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
-
- return ret
-
- except re.error:
- warnings.warn(
- "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
- )
-
- # last resort, just use MatchFirst
- return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
- " | ".join(symbols)
- )
-
-
-def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
- """Helper to easily and clearly define a dictionary by specifying
- the respective patterns for the key and value. Takes care of
- defining the :class:`Dict`, :class:`ZeroOrMore`, and
- :class:`Group` tokens in the proper order. The key pattern
- can include delimiting markers or punctuation, as long as they are
- suppressed, thereby leaving the significant key text. The value
- pattern can include named results, so that the :class:`Dict` results
- can include named token fields.
-
- Example::
-
- text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
- attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
- print(attr_expr[1, ...].parse_string(text).dump())
-
- attr_label = label
- attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
-
- # similar to Dict, but simpler call format
- result = dict_of(attr_label, attr_value).parse_string(text)
- print(result.dump())
- print(result['shape'])
- print(result.shape) # object attribute access works too
- print(result.as_dict())
-
- prints::
-
- [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- - color: 'light blue'
- - posn: 'upper left'
- - shape: 'SQUARE'
- - texture: 'burlap'
- SQUARE
- SQUARE
- {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
- """
- return Dict(OneOrMore(Group(key + value)))
-
-
-def original_text_for(
- expr: ParserElement, as_string: bool = True, *, asString: bool = True
-) -> ParserElement:
- """Helper to return the original, untokenized text for a given
- expression. Useful to restore the parsed fields of an HTML start
- tag into the raw tag text itself, or to revert separate tokens with
- intervening whitespace back to the original matching input text. By
- default, returns astring containing the original parsed text.
-
- If the optional ``as_string`` argument is passed as
- ``False``, then the return value is
- a :class:`ParseResults` containing any results names that
- were originally matched, and a single token containing the original
- matched text from the input string. So if the expression passed to
- :class:`original_text_for` contains expressions with defined
- results names, you must set ``as_string`` to ``False`` if you
- want to preserve those results name values.
-
- The ``asString`` pre-PEP8 argument is retained for compatibility,
- but will be removed in a future release.
-
- Example::
-
- src = "this is test <b> bold <i>text</i> </b> normal text "
- for tag in ("b", "i"):
- opener, closer = make_html_tags(tag)
- patt = original_text_for(opener + SkipTo(closer) + closer)
- print(patt.search_string(src)[0])
-
- prints::
-
- ['<b> bold <i>text</i> </b>']
- ['<i>text</i>']
- """
- asString = asString and as_string
-
- locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
- endlocMarker = locMarker.copy()
- endlocMarker.callPreparse = False
- matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
- if asString:
- extractText = lambda s, l, t: s[t._original_start : t._original_end]
- else:
-
- def extractText(s, l, t):
- t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
-
- matchExpr.set_parse_action(extractText)
- matchExpr.ignoreExprs = expr.ignoreExprs
- matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
- return matchExpr
-
-
-def ungroup(expr: ParserElement) -> ParserElement:
- """Helper to undo pyparsing's default grouping of And expressions,
- even if all but one are non-empty.
- """
- return TokenConverter(expr).add_parse_action(lambda t: t[0])
-
-
-def locatedExpr(expr: ParserElement) -> ParserElement:
- """
- (DEPRECATED - future code should use the Located class)
- Helper to decorate a returned token with its starting and ending
- locations in the input string.
-
- This helper adds the following results names:
-
- - ``locn_start`` - location where matched expression begins
- - ``locn_end`` - location where matched expression ends
- - ``value`` - the actual parsed results
-
- Be careful if the input text contains ``<TAB>`` characters, you
- may want to call :class:`ParserElement.parseWithTabs`
-
- Example::
-
- wd = Word(alphas)
- for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
- print(match)
-
- prints::
-
- [[0, 'ljsdf', 5]]
- [[8, 'lksdjjf', 15]]
- [[18, 'lkkjj', 23]]
- """
- locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
- return Group(
- locator("locn_start")
- + expr("value")
- + locator.copy().leaveWhitespace()("locn_end")
- )
-
-
-def nested_expr(
- opener: Union[str, ParserElement] = "(",
- closer: Union[str, ParserElement] = ")",
- content: typing.Optional[ParserElement] = None,
- ignore_expr: ParserElement = quoted_string(),
- *,
- ignoreExpr: ParserElement = quoted_string(),
-) -> ParserElement:
- """Helper method for defining nested lists enclosed in opening and
- closing delimiters (``"("`` and ``")"`` are the default).
-
- Parameters:
- - ``opener`` - opening character for a nested list
- (default= ``"("``); can also be a pyparsing expression
- - ``closer`` - closing character for a nested list
- (default= ``")"``); can also be a pyparsing expression
- - ``content`` - expression for items within the nested lists
- (default= ``None``)
- - ``ignore_expr`` - expression for ignoring opening and closing delimiters
- (default= :class:`quoted_string`)
- - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
- but will be removed in a future release
-
- If an expression is not provided for the content argument, the
- nested expression will capture all whitespace-delimited content
- between delimiters as a list of separate values.
-
- Use the ``ignore_expr`` argument to define expressions that may
- contain opening or closing characters that should not be treated as
- opening or closing characters for nesting, such as quoted_string or
- a comment expression. Specify multiple expressions using an
- :class:`Or` or :class:`MatchFirst`. The default is
- :class:`quoted_string`, but if no expressions are to be ignored, then
- pass ``None`` for this argument.
-
- Example::
-
- data_type = one_of("void int short long char float double")
- decl_data_type = Combine(data_type + Opt(Word('*')))
- ident = Word(alphas+'_', alphanums+'_')
- number = pyparsing_common.number
- arg = Group(decl_data_type + ident)
- LPAR, RPAR = map(Suppress, "()")
-
- code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
-
- c_function = (decl_data_type("type")
- + ident("name")
- + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
- + code_body("body"))
- c_function.ignore(c_style_comment)
-
- source_code = '''
- int is_odd(int x) {
- return (x%2);
- }
-
- int dec_to_hex(char hchar) {
- if (hchar >= '0' && hchar <= '9') {
- return (ord(hchar)-ord('0'));
- } else {
- return (10+ord(hchar)-ord('A'));
- }
- }
- '''
- for func in c_function.search_string(source_code):
- print("%(name)s (%(type)s) args: %(args)s" % func)
-
-
- prints::
-
- is_odd (int) args: [['int', 'x']]
- dec_to_hex (int) args: [['char', 'hchar']]
- """
- if ignoreExpr != ignore_expr:
- ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
- if opener == closer:
- raise ValueError("opening and closing strings cannot be the same")
- if content is None:
- if isinstance(opener, str_type) and isinstance(closer, str_type):
- if len(opener) == 1 and len(closer) == 1:
- if ignoreExpr is not None:
- content = Combine(
- OneOrMore(
- ~ignoreExpr
- + CharsNotIn(
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
- exact=1,
- )
- )
- ).set_parse_action(lambda t: t[0].strip())
- else:
- content = empty.copy() + CharsNotIn(
- opener + closer + ParserElement.DEFAULT_WHITE_CHARS
- ).set_parse_action(lambda t: t[0].strip())
- else:
- if ignoreExpr is not None:
- content = Combine(
- OneOrMore(
- ~ignoreExpr
- + ~Literal(opener)
- + ~Literal(closer)
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
- )
- ).set_parse_action(lambda t: t[0].strip())
- else:
- content = Combine(
- OneOrMore(
- ~Literal(opener)
- + ~Literal(closer)
- + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
- )
- ).set_parse_action(lambda t: t[0].strip())
- else:
- raise ValueError(
- "opening and closing arguments must be strings if no content expression is given"
- )
- ret = Forward()
- if ignoreExpr is not None:
- ret <<= Group(
- Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
- )
- else:
- ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
- ret.set_name("nested %s%s expression" % (opener, closer))
- return ret
-
-
-def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
- """Internal helper to construct opening and closing tag expressions, given a tag name"""
- if isinstance(tagStr, str_type):
- resname = tagStr
- tagStr = Keyword(tagStr, caseless=not xml)
- else:
- resname = tagStr.name
-
- tagAttrName = Word(alphas, alphanums + "_-:")
- if xml:
- tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
- openTag = (
- suppress_LT
- + tagStr("tag")
- + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
- + Opt("/", default=[False])("empty").set_parse_action(
- lambda s, l, t: t[0] == "/"
- )
- + suppress_GT
- )
- else:
- tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
- printables, exclude_chars=">"
- )
- openTag = (
- suppress_LT
- + tagStr("tag")
- + Dict(
- ZeroOrMore(
- Group(
- tagAttrName.set_parse_action(lambda t: t[0].lower())
- + Opt(Suppress("=") + tagAttrValue)
- )
- )
- )
- + Opt("/", default=[False])("empty").set_parse_action(
- lambda s, l, t: t[0] == "/"
- )
- + suppress_GT
- )
- closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
-
- openTag.set_name("<%s>" % resname)
- # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
- openTag.add_parse_action(
- lambda t: t.__setitem__(
- "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
- )
- )
- closeTag = closeTag(
- "end" + "".join(resname.replace(":", " ").title().split())
- ).set_name("</%s>" % resname)
- openTag.tag = resname
- closeTag.tag = resname
- openTag.tag_body = SkipTo(closeTag())
- return openTag, closeTag
-
-
-def make_html_tags(
- tag_str: Union[str, ParserElement]
-) -> Tuple[ParserElement, ParserElement]:
- """Helper to construct opening and closing tag expressions for HTML,
- given a tag name. Matches tags in either upper or lower case,
- attributes with namespaces and with quoted or unquoted values.
-
- Example::
-
- text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
- # make_html_tags returns pyparsing expressions for the opening and
- # closing tags as a 2-tuple
- a, a_end = make_html_tags("A")
- link_expr = a + SkipTo(a_end)("link_text") + a_end
-
- for link in link_expr.search_string(text):
- # attributes in the <A> tag (like "href" shown here) are
- # also accessible as named results
- print(link.link_text, '->', link.href)
-
- prints::
-
- pyparsing -> https://github.com/pyparsing/pyparsing/wiki
- """
- return _makeTags(tag_str, False)
-
-
-def make_xml_tags(
- tag_str: Union[str, ParserElement]
-) -> Tuple[ParserElement, ParserElement]:
- """Helper to construct opening and closing tag expressions for XML,
- given a tag name. Matches tags only in the given upper/lower case.
-
- Example: similar to :class:`make_html_tags`
- """
- return _makeTags(tag_str, True)
-
-
-any_open_tag: ParserElement
-any_close_tag: ParserElement
-any_open_tag, any_close_tag = make_html_tags(
- Word(alphas, alphanums + "_:").set_name("any tag")
-)
-
-_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
-common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
- "common HTML entity"
-)
-
-
-def replace_html_entity(t):
- """Helper parser action to replace common HTML entities with their special characters"""
- return _htmlEntityMap.get(t.entity)
-
-
-class OpAssoc(Enum):
- LEFT = 1
- RIGHT = 2
-
-
-InfixNotationOperatorArgType = Union[
- ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
-]
-InfixNotationOperatorSpec = Union[
- Tuple[
- InfixNotationOperatorArgType,
- int,
- OpAssoc,
- typing.Optional[ParseAction],
- ],
- Tuple[
- InfixNotationOperatorArgType,
- int,
- OpAssoc,
- ],
-]
-
-
-def infix_notation(
- base_expr: ParserElement,
- op_list: List[InfixNotationOperatorSpec],
- lpar: Union[str, ParserElement] = Suppress("("),
- rpar: Union[str, ParserElement] = Suppress(")"),
-) -> ParserElement:
- """Helper method for constructing grammars of expressions made up of
- operators working in a precedence hierarchy. Operators may be unary
- or binary, left- or right-associative. Parse actions can also be
- attached to operator expressions. The generated parser will also
- recognize the use of parentheses to override operator precedences
- (see example below).
-
- Note: if you define a deep operator list, you may see performance
- issues when using infix_notation. See
- :class:`ParserElement.enable_packrat` for a mechanism to potentially
- improve your parser performance.
-
- Parameters:
- - ``base_expr`` - expression representing the most basic operand to
- be used in the expression
- - ``op_list`` - list of tuples, one for each operator precedence level
- in the expression grammar; each tuple is of the form ``(op_expr,
- num_operands, right_left_assoc, (optional)parse_action)``, where:
-
- - ``op_expr`` is the pyparsing expression for the operator; may also
- be a string, which will be converted to a Literal; if ``num_operands``
- is 3, ``op_expr`` is a tuple of two expressions, for the two
- operators separating the 3 terms
- - ``num_operands`` is the number of terms for this operator (must be 1,
- 2, or 3)
- - ``right_left_assoc`` is the indicator whether the operator is right
- or left associative, using the pyparsing-defined constants
- ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
- - ``parse_action`` is the parse action to be associated with
- expressions matching this operator expression (the parse action
- tuple member may be omitted); if the parse action is passed
- a tuple or list of functions, this is equivalent to calling
- ``set_parse_action(*fn)``
- (:class:`ParserElement.set_parse_action`)
- - ``lpar`` - expression for matching left-parentheses; if passed as a
- str, then will be parsed as Suppress(lpar). If lpar is passed as
- an expression (such as ``Literal('(')``), then it will be kept in
- the parsed results, and grouped with them. (default= ``Suppress('(')``)
- - ``rpar`` - expression for matching right-parentheses; if passed as a
- str, then will be parsed as Suppress(rpar). If rpar is passed as
- an expression (such as ``Literal(')')``), then it will be kept in
- the parsed results, and grouped with them. (default= ``Suppress(')')``)
-
- Example::
-
- # simple example of four-function arithmetic with ints and
- # variable names
- integer = pyparsing_common.signed_integer
- varname = pyparsing_common.identifier
-
- arith_expr = infix_notation(integer | varname,
- [
- ('-', 1, OpAssoc.RIGHT),
- (one_of('* /'), 2, OpAssoc.LEFT),
- (one_of('+ -'), 2, OpAssoc.LEFT),
- ])
-
- arith_expr.run_tests('''
- 5+3*6
- (5+3)*6
- -2--11
- ''', full_dump=False)
-
- prints::
-
- 5+3*6
- [[5, '+', [3, '*', 6]]]
-
- (5+3)*6
- [[[5, '+', 3], '*', 6]]
-
- -2--11
- [[['-', 2], '-', ['-', 11]]]
- """
- # captive version of FollowedBy that does not do parse actions or capture results names
- class _FB(FollowedBy):
- def parseImpl(self, instring, loc, doActions=True):
- self.expr.try_parse(instring, loc)
- return loc, []
-
- _FB.__name__ = "FollowedBy>"
-
- ret = Forward()
- if isinstance(lpar, str):
- lpar = Suppress(lpar)
- if isinstance(rpar, str):
- rpar = Suppress(rpar)
-
- # if lpar and rpar are not suppressed, wrap in group
- if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
- lastExpr = base_expr | Group(lpar + ret + rpar)
- else:
- lastExpr = base_expr | (lpar + ret + rpar)
-
- for i, operDef in enumerate(op_list):
- opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
- if isinstance(opExpr, str_type):
- opExpr = ParserElement._literalStringClass(opExpr)
- if arity == 3:
- if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
- raise ValueError(
- "if numterms=3, opExpr must be a tuple or list of two expressions"
- )
- opExpr1, opExpr2 = opExpr
- term_name = "{}{} term".format(opExpr1, opExpr2)
- else:
- term_name = "{} term".format(opExpr)
-
- if not 1 <= arity <= 3:
- raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
-
- if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
- raise ValueError("operator must indicate right or left associativity")
-
- thisExpr: Forward = Forward().set_name(term_name)
- if rightLeftAssoc is OpAssoc.LEFT:
- if arity == 1:
- matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
- elif arity == 2:
- if opExpr is not None:
- matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
- lastExpr + (opExpr + lastExpr)[1, ...]
- )
- else:
- matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
- elif arity == 3:
- matchExpr = _FB(
- lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
- ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
- elif rightLeftAssoc is OpAssoc.RIGHT:
- if arity == 1:
- # try to avoid LR with this extra test
- if not isinstance(opExpr, Opt):
- opExpr = Opt(opExpr)
- matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
- elif arity == 2:
- if opExpr is not None:
- matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
- lastExpr + (opExpr + thisExpr)[1, ...]
- )
- else:
- matchExpr = _FB(lastExpr + thisExpr) + Group(
- lastExpr + thisExpr[1, ...]
- )
- elif arity == 3:
- matchExpr = _FB(
- lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
- ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
- if pa:
- if isinstance(pa, (tuple, list)):
- matchExpr.set_parse_action(*pa)
- else:
- matchExpr.set_parse_action(pa)
- thisExpr <<= (matchExpr | lastExpr).setName(term_name)
- lastExpr = thisExpr
- ret <<= lastExpr
- return ret
-
-
-def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
- """
- (DEPRECATED - use IndentedBlock class instead)
- Helper method for defining space-delimited indentation blocks,
- such as those used to define block statements in Python source code.
-
- Parameters:
-
- - ``blockStatementExpr`` - expression defining syntax of statement that
- is repeated within the indented block
- - ``indentStack`` - list created by caller to manage indentation stack
- (multiple ``statementWithIndentedBlock`` expressions within a single
- grammar should share a common ``indentStack``)
- - ``indent`` - boolean indicating whether block must be indented beyond
- the current level; set to ``False`` for block of left-most statements
- (default= ``True``)
-
- A valid block must contain at least one ``blockStatement``.
-
- (Note that indentedBlock uses internal parse actions which make it
- incompatible with packrat parsing.)
-
- Example::
-
- data = '''
- def A(z):
- A1
- B = 100
- G = A2
- A2
- A3
- B
- def BB(a,b,c):
- BB1
- def BBA():
- bba1
- bba2
- bba3
- C
- D
- def spam(x,y):
- def eggs(z):
- pass
- '''
-
-
- indentStack = [1]
- stmt = Forward()
-
- identifier = Word(alphas, alphanums)
- funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
- func_body = indentedBlock(stmt, indentStack)
- funcDef = Group(funcDecl + func_body)
-
- rvalue = Forward()
- funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
- rvalue << (funcCall | identifier | Word(nums))
- assignment = Group(identifier + "=" + rvalue)
- stmt << (funcDef | assignment | identifier)
-
- module_body = stmt[1, ...]
-
- parseTree = module_body.parseString(data)
- parseTree.pprint()
-
- prints::
-
- [['def',
- 'A',
- ['(', 'z', ')'],
- ':',
- [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
- 'B',
- ['def',
- 'BB',
- ['(', 'a', 'b', 'c', ')'],
- ':',
- [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
- 'C',
- 'D',
- ['def',
- 'spam',
- ['(', 'x', 'y', ')'],
- ':',
- [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
- """
- backup_stacks.append(indentStack[:])
-
- def reset_stack():
- indentStack[:] = backup_stacks[-1]
-
- def checkPeerIndent(s, l, t):
- if l >= len(s):
- return
- curCol = col(l, s)
- if curCol != indentStack[-1]:
- if curCol > indentStack[-1]:
- raise ParseException(s, l, "illegal nesting")
- raise ParseException(s, l, "not a peer entry")
-
- def checkSubIndent(s, l, t):
- curCol = col(l, s)
- if curCol > indentStack[-1]:
- indentStack.append(curCol)
- else:
- raise ParseException(s, l, "not a subentry")
-
- def checkUnindent(s, l, t):
- if l >= len(s):
- return
- curCol = col(l, s)
- if not (indentStack and curCol in indentStack):
- raise ParseException(s, l, "not an unindent")
- if curCol < indentStack[-1]:
- indentStack.pop()
-
- NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
- INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
- PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
- UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
- if indent:
- smExpr = Group(
- Opt(NL)
- + INDENT
- + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
- + UNDENT
- )
- else:
- smExpr = Group(
- Opt(NL)
- + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
- + Opt(UNDENT)
- )
-
- # add a parse action to remove backup_stack from list of backups
- smExpr.add_parse_action(
- lambda: backup_stacks.pop(-1) and None if backup_stacks else None
- )
- smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
- blockStatementExpr.ignore(_bslash + LineEnd())
- return smExpr.set_name("indented block")
-
-
-# it's easy to get these comment structures wrong - they're very common, so may as well make them available
-c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
- "C style comment"
-)
-"Comment of the form ``/* ... */``"
-
-html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
-"Comment of the form ``<!-- ... -->``"
-
-rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
-dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
-"Comment of the form ``// ... (to end of line)``"
-
-cpp_style_comment = Combine(
- Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
-).set_name("C++ style comment")
-"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
-
-java_style_comment = cpp_style_comment
-"Same as :class:`cpp_style_comment`"
-
-python_style_comment = Regex(r"#.*").set_name("Python style comment")
-"Comment of the form ``# ... (to end of line)``"
-
-
-# build list of built-in expressions, for future reference if a global default value
-# gets updated
-_builtin_exprs: List[ParserElement] = [
- v for v in vars().values() if isinstance(v, ParserElement)
-]
-
-
-# pre-PEP8 compatible names
-delimitedList = delimited_list
-countedArray = counted_array
-matchPreviousLiteral = match_previous_literal
-matchPreviousExpr = match_previous_expr
-oneOf = one_of
-dictOf = dict_of
-originalTextFor = original_text_for
-nestedExpr = nested_expr
-makeHTMLTags = make_html_tags
-makeXMLTags = make_xml_tags
-anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
-commonHTMLEntity = common_html_entity
-replaceHTMLEntity = replace_html_entity
-opAssoc = OpAssoc
-infixNotation = infix_notation
-cStyleComment = c_style_comment
-htmlComment = html_comment
-restOfLine = rest_of_line
-dblSlashComment = dbl_slash_comment
-cppStyleComment = cpp_style_comment
-javaStyleComment = java_style_comment
-pythonStyleComment = python_style_comment
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/results.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/results.py
deleted file mode 100644
index 00c9421..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/results.py
+++ /dev/null
@@ -1,760 +0,0 @@
-# results.py
-from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
-import pprint
-from weakref import ref as wkref
-from typing import Tuple, Any
-
-str_type: Tuple[type, ...] = (str, bytes)
-_generator_type = type((_ for _ in ()))
-
-
-class _ParseResultsWithOffset:
- __slots__ = ["tup"]
-
- def __init__(self, p1, p2):
- self.tup = (p1, p2)
-
- def __getitem__(self, i):
- return self.tup[i]
-
- def __getstate__(self):
- return self.tup
-
- def __setstate__(self, *args):
- self.tup = args[0]
-
-
-class ParseResults:
- """Structured parse results, to provide multiple means of access to
- the parsed data:
-
- - as a list (``len(results)``)
- - by list index (``results[0], results[1]``, etc.)
- - by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
-
- Example::
-
- integer = Word(nums)
- date_str = (integer.set_results_name("year") + '/'
- + integer.set_results_name("month") + '/'
- + integer.set_results_name("day"))
- # equivalent form:
- # date_str = (integer("year") + '/'
- # + integer("month") + '/'
- # + integer("day"))
-
- # parse_string returns a ParseResults object
- result = date_str.parse_string("1999/12/31")
-
- def test(s, fn=repr):
- print("{} -> {}".format(s, fn(eval(s))))
- test("list(result)")
- test("result[0]")
- test("result['month']")
- test("result.day")
- test("'month' in result")
- test("'minutes' in result")
- test("result.dump()", str)
-
- prints::
-
- list(result) -> ['1999', '/', '12', '/', '31']
- result[0] -> '1999'
- result['month'] -> '12'
- result.day -> '31'
- 'month' in result -> True
- 'minutes' in result -> False
- result.dump() -> ['1999', '/', '12', '/', '31']
- - day: '31'
- - month: '12'
- - year: '1999'
- """
-
- _null_values: Tuple[Any, ...] = (None, [], "", ())
-
- __slots__ = [
- "_name",
- "_parent",
- "_all_names",
- "_modal",
- "_toklist",
- "_tokdict",
- "__weakref__",
- ]
-
- class List(list):
- """
- Simple wrapper class to distinguish parsed list results that should be preserved
- as actual Python lists, instead of being converted to :class:`ParseResults`:
-
- LBRACK, RBRACK = map(pp.Suppress, "[]")
- element = pp.Forward()
- item = ppc.integer
- element_list = LBRACK + pp.delimited_list(element) + RBRACK
-
- # add parse actions to convert from ParseResults to actual Python collection types
- def as_python_list(t):
- return pp.ParseResults.List(t.as_list())
- element_list.add_parse_action(as_python_list)
-
- element <<= item | element_list
-
- element.run_tests('''
- 100
- [2,3,4]
- [[2, 1],3,4]
- [(2, 1),3,4]
- (2,3,4)
- ''', post_parse=lambda s, r: (r[0], type(r[0])))
-
- prints:
-
- 100
- (100, <class 'int'>)
-
- [2,3,4]
- ([2, 3, 4], <class 'list'>)
-
- [[2, 1],3,4]
- ([[2, 1], 3, 4], <class 'list'>)
-
- (Used internally by :class:`Group` when `aslist=True`.)
- """
-
- def __new__(cls, contained=None):
- if contained is None:
- contained = []
-
- if not isinstance(contained, list):
- raise TypeError(
- "{} may only be constructed with a list,"
- " not {}".format(cls.__name__, type(contained).__name__)
- )
-
- return list.__new__(cls)
-
- def __new__(cls, toklist=None, name=None, **kwargs):
- if isinstance(toklist, ParseResults):
- return toklist
- self = object.__new__(cls)
- self._name = None
- self._parent = None
- self._all_names = set()
-
- if toklist is None:
- self._toklist = []
- elif isinstance(toklist, (list, _generator_type)):
- self._toklist = (
- [toklist[:]]
- if isinstance(toklist, ParseResults.List)
- else list(toklist)
- )
- else:
- self._toklist = [toklist]
- self._tokdict = dict()
- return self
-
- # Performance tuning: we construct a *lot* of these, so keep this
- # constructor as small and fast as possible
- def __init__(
- self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
- ):
- self._modal = modal
- if name is not None and name != "":
- if isinstance(name, int):
- name = str(name)
- if not modal:
- self._all_names = {name}
- self._name = name
- if toklist not in self._null_values:
- if isinstance(toklist, (str_type, type)):
- toklist = [toklist]
- if asList:
- if isinstance(toklist, ParseResults):
- self[name] = _ParseResultsWithOffset(
- ParseResults(toklist._toklist), 0
- )
- else:
- self[name] = _ParseResultsWithOffset(
- ParseResults(toklist[0]), 0
- )
- self[name]._name = name
- else:
- try:
- self[name] = toklist[0]
- except (KeyError, TypeError, IndexError):
- if toklist is not self:
- self[name] = toklist
- else:
- self._name = name
-
- def __getitem__(self, i):
- if isinstance(i, (int, slice)):
- return self._toklist[i]
- else:
- if i not in self._all_names:
- return self._tokdict[i][-1][0]
- else:
- return ParseResults([v[0] for v in self._tokdict[i]])
-
- def __setitem__(self, k, v, isinstance=isinstance):
- if isinstance(v, _ParseResultsWithOffset):
- self._tokdict[k] = self._tokdict.get(k, list()) + [v]
- sub = v[0]
- elif isinstance(k, (int, slice)):
- self._toklist[k] = v
- sub = v
- else:
- self._tokdict[k] = self._tokdict.get(k, list()) + [
- _ParseResultsWithOffset(v, 0)
- ]
- sub = v
- if isinstance(sub, ParseResults):
- sub._parent = wkref(self)
-
- def __delitem__(self, i):
- if isinstance(i, (int, slice)):
- mylen = len(self._toklist)
- del self._toklist[i]
-
- # convert int to slice
- if isinstance(i, int):
- if i < 0:
- i += mylen
- i = slice(i, i + 1)
- # get removed indices
- removed = list(range(*i.indices(mylen)))
- removed.reverse()
- # fixup indices in token dictionary
- for name, occurrences in self._tokdict.items():
- for j in removed:
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(
- value, position - (position > j)
- )
- else:
- del self._tokdict[i]
-
- def __contains__(self, k) -> bool:
- return k in self._tokdict
-
- def __len__(self) -> int:
- return len(self._toklist)
-
- def __bool__(self) -> bool:
- return not not (self._toklist or self._tokdict)
-
- def __iter__(self) -> Iterator:
- return iter(self._toklist)
-
- def __reversed__(self) -> Iterator:
- return iter(self._toklist[::-1])
-
- def keys(self):
- return iter(self._tokdict)
-
- def values(self):
- return (self[k] for k in self.keys())
-
- def items(self):
- return ((k, self[k]) for k in self.keys())
-
- def haskeys(self) -> bool:
- """
- Since ``keys()`` returns an iterator, this method is helpful in bypassing
- code that looks for the existence of any defined results names."""
- return bool(self._tokdict)
-
- def pop(self, *args, **kwargs):
- """
- Removes and returns item at specified index (default= ``last``).
- Supports both ``list`` and ``dict`` semantics for ``pop()``. If
- passed no argument or an integer argument, it will use ``list``
- semantics and pop tokens from the list of parsed tokens. If passed
- a non-integer argument (most likely a string), it will use ``dict``
- semantics and pop the corresponding value from any defined results
- names. A second default return value argument is supported, just as in
- ``dict.pop()``.
-
- Example::
-
- numlist = Word(nums)[...]
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
-
- def remove_first(tokens):
- tokens.pop(0)
- numlist.add_parse_action(remove_first)
- print(numlist.parse_string("0 123 321")) # -> ['123', '321']
-
- label = Word(alphas)
- patt = label("LABEL") + Word(nums)[1, ...]
- print(patt.parse_string("AAB 123 321").dump())
-
- # Use pop() in a parse action to remove named result (note that corresponding value is not
- # removed from list form of results)
- def remove_LABEL(tokens):
- tokens.pop("LABEL")
- return tokens
- patt.add_parse_action(remove_LABEL)
- print(patt.parse_string("AAB 123 321").dump())
-
- prints::
-
- ['AAB', '123', '321']
- - LABEL: 'AAB'
-
- ['AAB', '123', '321']
- """
- if not args:
- args = [-1]
- for k, v in kwargs.items():
- if k == "default":
- args = (args[0], v)
- else:
- raise TypeError(
- "pop() got an unexpected keyword argument {!r}".format(k)
- )
- if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
- index = args[0]
- ret = self[index]
- del self[index]
- return ret
- else:
- defaultvalue = args[1]
- return defaultvalue
-
- def get(self, key, default_value=None):
- """
- Returns named result matching the given key, or if there is no
- such name, then returns the given ``default_value`` or ``None`` if no
- ``default_value`` is specified.
-
- Similar to ``dict.get()``.
-
- Example::
-
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parse_string("1999/12/31")
- print(result.get("year")) # -> '1999'
- print(result.get("hour", "not specified")) # -> 'not specified'
- print(result.get("hour")) # -> None
- """
- if key in self:
- return self[key]
- else:
- return default_value
-
- def insert(self, index, ins_string):
- """
- Inserts new element at location index in the list of parsed tokens.
-
- Similar to ``list.insert()``.
-
- Example::
-
- numlist = Word(nums)[...]
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to insert the parse location in the front of the parsed results
- def insert_locn(locn, tokens):
- tokens.insert(0, locn)
- numlist.add_parse_action(insert_locn)
- print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
- """
- self._toklist.insert(index, ins_string)
- # fixup indices in token dictionary
- for name, occurrences in self._tokdict.items():
- for k, (value, position) in enumerate(occurrences):
- occurrences[k] = _ParseResultsWithOffset(
- value, position + (position > index)
- )
-
- def append(self, item):
- """
- Add single element to end of ``ParseResults`` list of elements.
-
- Example::
-
- numlist = Word(nums)[...]
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
-
- # use a parse action to compute the sum of the parsed integers, and add it to the end
- def append_sum(tokens):
- tokens.append(sum(map(int, tokens)))
- numlist.add_parse_action(append_sum)
- print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
- """
- self._toklist.append(item)
-
- def extend(self, itemseq):
- """
- Add sequence of elements to end of ``ParseResults`` list of elements.
-
- Example::
-
- patt = Word(alphas)[1, ...]
-
- # use a parse action to append the reverse of the matched strings, to make a palindrome
- def make_palindrome(tokens):
- tokens.extend(reversed([t[::-1] for t in tokens]))
- return ''.join(tokens)
- patt.add_parse_action(make_palindrome)
- print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
- """
- if isinstance(itemseq, ParseResults):
- self.__iadd__(itemseq)
- else:
- self._toklist.extend(itemseq)
-
- def clear(self):
- """
- Clear all elements and results names.
- """
- del self._toklist[:]
- self._tokdict.clear()
-
- def __getattr__(self, name):
- try:
- return self[name]
- except KeyError:
- if name.startswith("__"):
- raise AttributeError(name)
- return ""
-
- def __add__(self, other) -> "ParseResults":
- ret = self.copy()
- ret += other
- return ret
-
- def __iadd__(self, other) -> "ParseResults":
- if other._tokdict:
- offset = len(self._toklist)
- addoffset = lambda a: offset if a < 0 else a + offset
- otheritems = other._tokdict.items()
- otherdictitems = [
- (k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
- for k, vlist in otheritems
- for v in vlist
- ]
- for k, v in otherdictitems:
- self[k] = v
- if isinstance(v[0], ParseResults):
- v[0]._parent = wkref(self)
-
- self._toklist += other._toklist
- self._all_names |= other._all_names
- return self
-
- def __radd__(self, other) -> "ParseResults":
- if isinstance(other, int) and other == 0:
- # useful for merging many ParseResults using sum() builtin
- return self.copy()
- else:
- # this may raise a TypeError - so be it
- return other + self
-
- def __repr__(self) -> str:
- return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
-
- def __str__(self) -> str:
- return (
- "["
- + ", ".join(
- [
- str(i) if isinstance(i, ParseResults) else repr(i)
- for i in self._toklist
- ]
- )
- + "]"
- )
-
- def _asStringList(self, sep=""):
- out = []
- for item in self._toklist:
- if out and sep:
- out.append(sep)
- if isinstance(item, ParseResults):
- out += item._asStringList()
- else:
- out.append(str(item))
- return out
-
- def as_list(self) -> list:
- """
- Returns the parse results as a nested list of matching tokens, all converted to strings.
-
- Example::
-
- patt = Word(alphas)[1, ...]
- result = patt.parse_string("sldkj lsdkj sldkj")
- # even though the result prints in string-like form, it is actually a pyparsing ParseResults
- print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
-
- # Use as_list() to create an actual list
- result_list = result.as_list()
- print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
- """
- return [
- res.as_list() if isinstance(res, ParseResults) else res
- for res in self._toklist
- ]
-
- def as_dict(self) -> dict:
- """
- Returns the named parse results as a nested dictionary.
-
- Example::
-
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parse_string('12/31/1999')
- print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
-
- result_dict = result.as_dict()
- print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
-
- # even though a ParseResults supports dict-like access, sometime you just need to have a dict
- import json
- print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
- print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
- """
-
- def to_item(obj):
- if isinstance(obj, ParseResults):
- return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
- else:
- return obj
-
- return dict((k, to_item(v)) for k, v in self.items())
-
- def copy(self) -> "ParseResults":
- """
- Returns a new copy of a :class:`ParseResults` object.
- """
- ret = ParseResults(self._toklist)
- ret._tokdict = self._tokdict.copy()
- ret._parent = self._parent
- ret._all_names |= self._all_names
- ret._name = self._name
- return ret
-
- def get_name(self):
- r"""
- Returns the results name for this token expression. Useful when several
- different expressions might match at a particular location.
-
- Example::
-
- integer = Word(nums)
- ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
- house_number_expr = Suppress('#') + Word(nums, alphanums)
- user_data = (Group(house_number_expr)("house_number")
- | Group(ssn_expr)("ssn")
- | Group(integer)("age"))
- user_info = user_data[1, ...]
-
- result = user_info.parse_string("22 111-22-3333 #221B")
- for item in result:
- print(item.get_name(), ':', item[0])
-
- prints::
-
- age : 22
- ssn : 111-22-3333
- house_number : 221B
- """
- if self._name:
- return self._name
- elif self._parent:
- par = self._parent()
-
- def find_in_parent(sub):
- return next(
- (
- k
- for k, vlist in par._tokdict.items()
- for v, loc in vlist
- if sub is v
- ),
- None,
- )
-
- return find_in_parent(self) if par else None
- elif (
- len(self) == 1
- and len(self._tokdict) == 1
- and next(iter(self._tokdict.values()))[0][1] in (0, -1)
- ):
- return next(iter(self._tokdict.keys()))
- else:
- return None
-
- def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
- """
- Diagnostic method for listing out the contents of
- a :class:`ParseResults`. Accepts an optional ``indent`` argument so
- that this string can be embedded in a nested display of other data.
-
- Example::
-
- integer = Word(nums)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
-
- result = date_str.parse_string('1999/12/31')
- print(result.dump())
-
- prints::
-
- ['1999', '/', '12', '/', '31']
- - day: '31'
- - month: '12'
- - year: '1999'
- """
- out = []
- NL = "\n"
- out.append(indent + str(self.as_list()) if include_list else "")
-
- if full:
- if self.haskeys():
- items = sorted((str(k), v) for k, v in self.items())
- for k, v in items:
- if out:
- out.append(NL)
- out.append("{}{}- {}: ".format(indent, (" " * _depth), k))
- if isinstance(v, ParseResults):
- if v:
- out.append(
- v.dump(
- indent=indent,
- full=full,
- include_list=include_list,
- _depth=_depth + 1,
- )
- )
- else:
- out.append(str(v))
- else:
- out.append(repr(v))
- if any(isinstance(vv, ParseResults) for vv in self):
- v = self
- for i, vv in enumerate(v):
- if isinstance(vv, ParseResults):
- out.append(
- "\n{}{}[{}]:\n{}{}{}".format(
- indent,
- (" " * (_depth)),
- i,
- indent,
- (" " * (_depth + 1)),
- vv.dump(
- indent=indent,
- full=full,
- include_list=include_list,
- _depth=_depth + 1,
- ),
- )
- )
- else:
- out.append(
- "\n%s%s[%d]:\n%s%s%s"
- % (
- indent,
- (" " * (_depth)),
- i,
- indent,
- (" " * (_depth + 1)),
- str(vv),
- )
- )
-
- return "".join(out)
-
- def pprint(self, *args, **kwargs):
- """
- Pretty-printer for parsed results as a list, using the
- `pprint <https://docs.python.org/3/library/pprint.html>`_ module.
- Accepts additional positional or keyword args as defined for
- `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
-
- Example::
-
- ident = Word(alphas, alphanums)
- num = Word(nums)
- func = Forward()
- term = ident | num | Group('(' + func + ')')
- func <<= ident + Group(Optional(delimited_list(term)))
- result = func.parse_string("fna a,b,(fnb c,d,200),100")
- result.pprint(width=40)
-
- prints::
-
- ['fna',
- ['a',
- 'b',
- ['(', 'fnb', ['c', 'd', '200'], ')'],
- '100']]
- """
- pprint.pprint(self.as_list(), *args, **kwargs)
-
- # add support for pickle protocol
- def __getstate__(self):
- return (
- self._toklist,
- (
- self._tokdict.copy(),
- self._parent is not None and self._parent() or None,
- self._all_names,
- self._name,
- ),
- )
-
- def __setstate__(self, state):
- self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
- self._all_names = set(inAccumNames)
- if par is not None:
- self._parent = wkref(par)
- else:
- self._parent = None
-
- def __getnewargs__(self):
- return self._toklist, self._name
-
- def __dir__(self):
- return dir(type(self)) + list(self.keys())
-
- @classmethod
- def from_dict(cls, other, name=None) -> "ParseResults":
- """
- Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
- name-value relations as results names. If an optional ``name`` argument is
- given, a nested ``ParseResults`` will be returned.
- """
-
- def is_iterable(obj):
- try:
- iter(obj)
- except Exception:
- return False
- else:
- return not isinstance(obj, str_type)
-
- ret = cls([])
- for k, v in other.items():
- if isinstance(v, Mapping):
- ret += cls.from_dict(v, name=k)
- else:
- ret += cls([v], name=k, asList=is_iterable(v))
- if name is not None:
- ret = cls([ret], name=name)
- return ret
-
- asList = as_list
- asDict = as_dict
- getName = get_name
-
-
-MutableMapping.register(ParseResults)
-MutableSequence.register(ParseResults)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py
deleted file mode 100644
index 84a0ef1..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/testing.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# testing.py
-
-from contextlib import contextmanager
-import typing
-
-from .core import (
- ParserElement,
- ParseException,
- Keyword,
- __diag__,
- __compat__,
-)
-
-
-class pyparsing_test:
- """
- namespace class for classes useful in writing unit tests
- """
-
- class reset_pyparsing_context:
- """
- Context manager to be used when writing unit tests that modify pyparsing config values:
- - packrat parsing
- - bounded recursion parsing
- - default whitespace characters.
- - default keyword characters
- - literal string auto-conversion class
- - __diag__ settings
-
- Example::
-
- with reset_pyparsing_context():
- # test that literals used to construct a grammar are automatically suppressed
- ParserElement.inlineLiteralsUsing(Suppress)
-
- term = Word(alphas) | Word(nums)
- group = Group('(' + term[...] + ')')
-
- # assert that the '()' characters are not included in the parsed tokens
- self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
-
- # after exiting context manager, literals are converted to Literal expressions again
- """
-
- def __init__(self):
- self._save_context = {}
-
- def save(self):
- self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
- self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
-
- self._save_context[
- "literal_string_class"
- ] = ParserElement._literalStringClass
-
- self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
-
- self._save_context["packrat_enabled"] = ParserElement._packratEnabled
- if ParserElement._packratEnabled:
- self._save_context[
- "packrat_cache_size"
- ] = ParserElement.packrat_cache.size
- else:
- self._save_context["packrat_cache_size"] = None
- self._save_context["packrat_parse"] = ParserElement._parse
- self._save_context[
- "recursion_enabled"
- ] = ParserElement._left_recursion_enabled
-
- self._save_context["__diag__"] = {
- name: getattr(__diag__, name) for name in __diag__._all_names
- }
-
- self._save_context["__compat__"] = {
- "collect_all_And_tokens": __compat__.collect_all_And_tokens
- }
-
- return self
-
- def restore(self):
- # reset pyparsing global state
- if (
- ParserElement.DEFAULT_WHITE_CHARS
- != self._save_context["default_whitespace"]
- ):
- ParserElement.set_default_whitespace_chars(
- self._save_context["default_whitespace"]
- )
-
- ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
-
- Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
- ParserElement.inlineLiteralsUsing(
- self._save_context["literal_string_class"]
- )
-
- for name, value in self._save_context["__diag__"].items():
- (__diag__.enable if value else __diag__.disable)(name)
-
- ParserElement._packratEnabled = False
- if self._save_context["packrat_enabled"]:
- ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
- else:
- ParserElement._parse = self._save_context["packrat_parse"]
- ParserElement._left_recursion_enabled = self._save_context[
- "recursion_enabled"
- ]
-
- __compat__.collect_all_And_tokens = self._save_context["__compat__"]
-
- return self
-
- def copy(self):
- ret = type(self)()
- ret._save_context.update(self._save_context)
- return ret
-
- def __enter__(self):
- return self.save()
-
- def __exit__(self, *args):
- self.restore()
-
- class TestParseResultsAsserts:
- """
- A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
- """
-
- def assertParseResultsEquals(
- self, result, expected_list=None, expected_dict=None, msg=None
- ):
- """
- Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
- and compare any defined results names with an optional ``expected_dict``.
- """
- if expected_list is not None:
- self.assertEqual(expected_list, result.as_list(), msg=msg)
- if expected_dict is not None:
- self.assertEqual(expected_dict, result.as_dict(), msg=msg)
-
- def assertParseAndCheckList(
- self, expr, test_string, expected_list, msg=None, verbose=True
- ):
- """
- Convenience wrapper assert to test a parser element and input string, and assert that
- the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
- """
- result = expr.parse_string(test_string, parse_all=True)
- if verbose:
- print(result.dump())
- else:
- print(result.as_list())
- self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
-
- def assertParseAndCheckDict(
- self, expr, test_string, expected_dict, msg=None, verbose=True
- ):
- """
- Convenience wrapper assert to test a parser element and input string, and assert that
- the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
- """
- result = expr.parse_string(test_string, parseAll=True)
- if verbose:
- print(result.dump())
- else:
- print(result.as_list())
- self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
-
- def assertRunTestResults(
- self, run_tests_report, expected_parse_results=None, msg=None
- ):
- """
- Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
- list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
- with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
- Finally, asserts that the overall ``runTests()`` success value is ``True``.
-
- :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
- :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
- """
- run_test_success, run_test_results = run_tests_report
-
- if expected_parse_results is not None:
- merged = [
- (*rpt, expected)
- for rpt, expected in zip(run_test_results, expected_parse_results)
- ]
- for test_string, result, expected in merged:
- # expected should be a tuple containing a list and/or a dict or an exception,
- # and optional failure message string
- # an empty tuple will skip any result validation
- fail_msg = next(
- (exp for exp in expected if isinstance(exp, str)), None
- )
- expected_exception = next(
- (
- exp
- for exp in expected
- if isinstance(exp, type) and issubclass(exp, Exception)
- ),
- None,
- )
- if expected_exception is not None:
- with self.assertRaises(
- expected_exception=expected_exception, msg=fail_msg or msg
- ):
- if isinstance(result, Exception):
- raise result
- else:
- expected_list = next(
- (exp for exp in expected if isinstance(exp, list)), None
- )
- expected_dict = next(
- (exp for exp in expected if isinstance(exp, dict)), None
- )
- if (expected_list, expected_dict) != (None, None):
- self.assertParseResultsEquals(
- result,
- expected_list=expected_list,
- expected_dict=expected_dict,
- msg=fail_msg or msg,
- )
- else:
- # warning here maybe?
- print("no validation for {!r}".format(test_string))
-
- # do this last, in case some specific test results can be reported instead
- self.assertTrue(
- run_test_success, msg=msg if msg is not None else "failed runTests"
- )
-
- @contextmanager
- def assertRaisesParseException(self, exc_type=ParseException, msg=None):
- with self.assertRaises(exc_type, msg=msg):
- yield
-
- @staticmethod
- def with_line_numbers(
- s: str,
- start_line: typing.Optional[int] = None,
- end_line: typing.Optional[int] = None,
- expand_tabs: bool = True,
- eol_mark: str = "|",
- mark_spaces: typing.Optional[str] = None,
- mark_control: typing.Optional[str] = None,
- ) -> str:
- """
- Helpful method for debugging a parser - prints a string with line and column numbers.
- (Line and column numbers are 1-based.)
-
- :param s: tuple(bool, str - string to be printed with line and column numbers
- :param start_line: int - (optional) starting line number in s to print (default=1)
- :param end_line: int - (optional) ending line number in s to print (default=len(s))
- :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
- :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
- :param mark_spaces: str - (optional) special character to display in place of spaces
- :param mark_control: str - (optional) convert non-printing control characters to a placeholding
- character; valid values:
- - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
- - any single character string - replace control characters with given string
- - None (default) - string is displayed as-is
-
- :return: str - input string with leading line numbers and column number headers
- """
- if expand_tabs:
- s = s.expandtabs()
- if mark_control is not None:
- if mark_control == "unicode":
- tbl = str.maketrans(
- {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
- | {127: 0x2421}
- )
- eol_mark = ""
- else:
- tbl = str.maketrans(
- {c: mark_control for c in list(range(0, 32)) + [127]}
- )
- s = s.translate(tbl)
- if mark_spaces is not None and mark_spaces != " ":
- if mark_spaces == "unicode":
- tbl = str.maketrans({9: 0x2409, 32: 0x2423})
- s = s.translate(tbl)
- else:
- s = s.replace(" ", mark_spaces)
- if start_line is None:
- start_line = 1
- if end_line is None:
- end_line = len(s)
- end_line = min(end_line, len(s))
- start_line = min(max(1, start_line), end_line)
-
- if mark_control != "unicode":
- s_lines = s.splitlines()[start_line - 1 : end_line]
- else:
- s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
- if not s_lines:
- return ""
-
- lineno_width = len(str(end_line))
- max_line_len = max(len(line) for line in s_lines)
- lead = " " * (lineno_width + 1)
- if max_line_len >= 99:
- header0 = (
- lead
- + "".join(
- "{}{}".format(" " * 99, (i + 1) % 100)
- for i in range(max(max_line_len // 100, 1))
- )
- + "\n"
- )
- else:
- header0 = ""
- header1 = (
- header0
- + lead
- + "".join(
- " {}".format((i + 1) % 10)
- for i in range(-(-max_line_len // 10))
- )
- + "\n"
- )
- header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
- return (
- header1
- + header2
- + "\n".join(
- "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
- for i, line in enumerate(s_lines, start=start_line)
- )
- + "\n"
- )
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/unicode.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/unicode.py
deleted file mode 100644
index 0652620..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/unicode.py
+++ /dev/null
@@ -1,352 +0,0 @@
-# unicode.py
-
-import sys
-from itertools import filterfalse
-from typing import List, Tuple, Union
-
-
-class _lazyclassproperty:
- def __init__(self, fn):
- self.fn = fn
- self.__doc__ = fn.__doc__
- self.__name__ = fn.__name__
-
- def __get__(self, obj, cls):
- if cls is None:
- cls = type(obj)
- if not hasattr(cls, "_intern") or any(
- cls._intern is getattr(superclass, "_intern", [])
- for superclass in cls.__mro__[1:]
- ):
- cls._intern = {}
- attrname = self.fn.__name__
- if attrname not in cls._intern:
- cls._intern[attrname] = self.fn(cls)
- return cls._intern[attrname]
-
-
-UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
-
-
-class unicode_set:
- """
- A set of Unicode characters, for language-specific strings for
- ``alphas``, ``nums``, ``alphanums``, and ``printables``.
- A unicode_set is defined by a list of ranges in the Unicode character
- set, in a class attribute ``_ranges``. Ranges can be specified using
- 2-tuples or a 1-tuple, such as::
-
- _ranges = [
- (0x0020, 0x007e),
- (0x00a0, 0x00ff),
- (0x0100,),
- ]
-
- Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
-
- A unicode set can also be defined using multiple inheritance of other unicode sets::
-
- class CJK(Chinese, Japanese, Korean):
- pass
- """
-
- _ranges: UnicodeRangeList = []
-
- @_lazyclassproperty
- def _chars_for_ranges(cls):
- ret = []
- for cc in cls.__mro__:
- if cc is unicode_set:
- break
- for rr in getattr(cc, "_ranges", ()):
- ret.extend(range(rr[0], rr[-1] + 1))
- return [chr(c) for c in sorted(set(ret))]
-
- @_lazyclassproperty
- def printables(cls):
- "all non-whitespace characters in this range"
- return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def alphas(cls):
- "all alphabetic characters in this range"
- return "".join(filter(str.isalpha, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def nums(cls):
- "all numeric digit characters in this range"
- return "".join(filter(str.isdigit, cls._chars_for_ranges))
-
- @_lazyclassproperty
- def alphanums(cls):
- "all alphanumeric characters in this range"
- return cls.alphas + cls.nums
-
- @_lazyclassproperty
- def identchars(cls):
- "all characters in this range that are valid identifier characters, plus underscore '_'"
- return "".join(
- sorted(
- set(
- "".join(filter(str.isidentifier, cls._chars_for_ranges))
- + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
- + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
- + "_"
- )
- )
- )
-
- @_lazyclassproperty
- def identbodychars(cls):
- """
- all characters in this range that are valid identifier body characters,
- plus the digits 0-9
- """
- return "".join(
- sorted(
- set(
- cls.identchars
- + "0123456789"
- + "".join(
- [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
- )
- )
- )
- )
-
-
-class pyparsing_unicode(unicode_set):
- """
- A namespace class for defining common language unicode_sets.
- """
-
- # fmt: off
-
- # define ranges in language character sets
- _ranges: UnicodeRangeList = [
- (0x0020, sys.maxunicode),
- ]
-
- class BasicMultilingualPlane(unicode_set):
- "Unicode set for the Basic Multilingual Plane"
- _ranges: UnicodeRangeList = [
- (0x0020, 0xFFFF),
- ]
-
- class Latin1(unicode_set):
- "Unicode set for Latin-1 Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0020, 0x007E),
- (0x00A0, 0x00FF),
- ]
-
- class LatinA(unicode_set):
- "Unicode set for Latin-A Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0100, 0x017F),
- ]
-
- class LatinB(unicode_set):
- "Unicode set for Latin-B Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0180, 0x024F),
- ]
-
- class Greek(unicode_set):
- "Unicode set for Greek Unicode Character Ranges"
- _ranges: UnicodeRangeList = [
- (0x0342, 0x0345),
- (0x0370, 0x0377),
- (0x037A, 0x037F),
- (0x0384, 0x038A),
- (0x038C,),
- (0x038E, 0x03A1),
- (0x03A3, 0x03E1),
- (0x03F0, 0x03FF),
- (0x1D26, 0x1D2A),
- (0x1D5E,),
- (0x1D60,),
- (0x1D66, 0x1D6A),
- (0x1F00, 0x1F15),
- (0x1F18, 0x1F1D),
- (0x1F20, 0x1F45),
- (0x1F48, 0x1F4D),
- (0x1F50, 0x1F57),
- (0x1F59,),
- (0x1F5B,),
- (0x1F5D,),
- (0x1F5F, 0x1F7D),
- (0x1F80, 0x1FB4),
- (0x1FB6, 0x1FC4),
- (0x1FC6, 0x1FD3),
- (0x1FD6, 0x1FDB),
- (0x1FDD, 0x1FEF),
- (0x1FF2, 0x1FF4),
- (0x1FF6, 0x1FFE),
- (0x2129,),
- (0x2719, 0x271A),
- (0xAB65,),
- (0x10140, 0x1018D),
- (0x101A0,),
- (0x1D200, 0x1D245),
- (0x1F7A1, 0x1F7A7),
- ]
-
- class Cyrillic(unicode_set):
- "Unicode set for Cyrillic Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0400, 0x052F),
- (0x1C80, 0x1C88),
- (0x1D2B,),
- (0x1D78,),
- (0x2DE0, 0x2DFF),
- (0xA640, 0xA672),
- (0xA674, 0xA69F),
- (0xFE2E, 0xFE2F),
- ]
-
- class Chinese(unicode_set):
- "Unicode set for Chinese Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x2E80, 0x2E99),
- (0x2E9B, 0x2EF3),
- (0x31C0, 0x31E3),
- (0x3400, 0x4DB5),
- (0x4E00, 0x9FEF),
- (0xA700, 0xA707),
- (0xF900, 0xFA6D),
- (0xFA70, 0xFAD9),
- (0x16FE2, 0x16FE3),
- (0x1F210, 0x1F212),
- (0x1F214, 0x1F23B),
- (0x1F240, 0x1F248),
- (0x20000, 0x2A6D6),
- (0x2A700, 0x2B734),
- (0x2B740, 0x2B81D),
- (0x2B820, 0x2CEA1),
- (0x2CEB0, 0x2EBE0),
- (0x2F800, 0x2FA1D),
- ]
-
- class Japanese(unicode_set):
- "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
- _ranges: UnicodeRangeList = []
-
- class Kanji(unicode_set):
- "Unicode set for Kanji Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x4E00, 0x9FBF),
- (0x3000, 0x303F),
- ]
-
- class Hiragana(unicode_set):
- "Unicode set for Hiragana Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x3041, 0x3096),
- (0x3099, 0x30A0),
- (0x30FC,),
- (0xFF70,),
- (0x1B001,),
- (0x1B150, 0x1B152),
- (0x1F200,),
- ]
-
- class Katakana(unicode_set):
- "Unicode set for Katakana Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x3099, 0x309C),
- (0x30A0, 0x30FF),
- (0x31F0, 0x31FF),
- (0x32D0, 0x32FE),
- (0xFF65, 0xFF9F),
- (0x1B000,),
- (0x1B164, 0x1B167),
- (0x1F201, 0x1F202),
- (0x1F213,),
- ]
-
- class Hangul(unicode_set):
- "Unicode set for Hangul (Korean) Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x1100, 0x11FF),
- (0x302E, 0x302F),
- (0x3131, 0x318E),
- (0x3200, 0x321C),
- (0x3260, 0x327B),
- (0x327E,),
- (0xA960, 0xA97C),
- (0xAC00, 0xD7A3),
- (0xD7B0, 0xD7C6),
- (0xD7CB, 0xD7FB),
- (0xFFA0, 0xFFBE),
- (0xFFC2, 0xFFC7),
- (0xFFCA, 0xFFCF),
- (0xFFD2, 0xFFD7),
- (0xFFDA, 0xFFDC),
- ]
-
- Korean = Hangul
-
- class CJK(Chinese, Japanese, Hangul):
- "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
-
- class Thai(unicode_set):
- "Unicode set for Thai Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0E01, 0x0E3A),
- (0x0E3F, 0x0E5B)
- ]
-
- class Arabic(unicode_set):
- "Unicode set for Arabic Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0600, 0x061B),
- (0x061E, 0x06FF),
- (0x0700, 0x077F),
- ]
-
- class Hebrew(unicode_set):
- "Unicode set for Hebrew Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0591, 0x05C7),
- (0x05D0, 0x05EA),
- (0x05EF, 0x05F4),
- (0xFB1D, 0xFB36),
- (0xFB38, 0xFB3C),
- (0xFB3E,),
- (0xFB40, 0xFB41),
- (0xFB43, 0xFB44),
- (0xFB46, 0xFB4F),
- ]
-
- class Devanagari(unicode_set):
- "Unicode set for Devanagari Unicode Character Range"
- _ranges: UnicodeRangeList = [
- (0x0900, 0x097F),
- (0xA8E0, 0xA8FF)
- ]
-
- # fmt: on
-
-
-pyparsing_unicode.Japanese._ranges = (
- pyparsing_unicode.Japanese.Kanji._ranges
- + pyparsing_unicode.Japanese.Hiragana._ranges
- + pyparsing_unicode.Japanese.Katakana._ranges
-)
-
-pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
-
-# add language identifiers using language Unicode
-pyparsing_unicode.العربية = pyparsing_unicode.Arabic
-pyparsing_unicode.中文 = pyparsing_unicode.Chinese
-pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
-pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
-pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
-pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
-pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
-pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
-pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
-pyparsing_unicode.한국어 = pyparsing_unicode.Korean
-pyparsing_unicode.ไทย = pyparsing_unicode.Thai
-pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/util.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/util.py
deleted file mode 100644
index 34ce092..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/pyparsing/util.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# util.py
-import warnings
-import types
-import collections
-import itertools
-from functools import lru_cache
-from typing import List, Union, Iterable
-
-_bslash = chr(92)
-
-
-class __config_flags:
- """Internal class for defining compatibility and debugging flags"""
-
- _all_names: List[str] = []
- _fixed_names: List[str] = []
- _type_desc = "configuration"
-
- @classmethod
- def _set(cls, dname, value):
- if dname in cls._fixed_names:
- warnings.warn(
- "{}.{} {} is {} and cannot be overridden".format(
- cls.__name__,
- dname,
- cls._type_desc,
- str(getattr(cls, dname)).upper(),
- )
- )
- return
- if dname in cls._all_names:
- setattr(cls, dname, value)
- else:
- raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
-
- enable = classmethod(lambda cls, name: cls._set(name, True))
- disable = classmethod(lambda cls, name: cls._set(name, False))
-
-
-@lru_cache(maxsize=128)
-def col(loc: int, strg: str) -> int:
- """
- Returns current column within a string, counting newlines as line separators.
- The first column is number 1.
-
- Note: the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See
- :class:`ParserElement.parseString` for more
- information on parsing strings containing ``<TAB>`` s, and suggested
- methods to maintain a consistent view of the parsed string, the parse
- location, and line and column positions within the parsed string.
- """
- s = strg
- return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
-
-
-@lru_cache(maxsize=128)
-def lineno(loc: int, strg: str) -> int:
- """Returns current line number within a string, counting newlines as line separators.
- The first line is number 1.
-
- Note - the default parsing behavior is to expand tabs in the input string
- before starting the parsing process. See :class:`ParserElement.parseString`
- for more information on parsing strings containing ``<TAB>`` s, and
- suggested methods to maintain a consistent view of the parsed string, the
- parse location, and line and column positions within the parsed string.
- """
- return strg.count("\n", 0, loc) + 1
-
-
-@lru_cache(maxsize=128)
-def line(loc: int, strg: str) -> str:
- """
- Returns the line of text containing loc within a string, counting newlines as line separators.
- """
- last_cr = strg.rfind("\n", 0, loc)
- next_cr = strg.find("\n", loc)
- return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
-
-
-class _UnboundedCache:
- def __init__(self):
- cache = {}
- cache_get = cache.get
- self.not_in_cache = not_in_cache = object()
-
- def get(_, key):
- return cache_get(key, not_in_cache)
-
- def set_(_, key, value):
- cache[key] = value
-
- def clear(_):
- cache.clear()
-
- self.size = None
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set_, self)
- self.clear = types.MethodType(clear, self)
-
-
-class _FifoCache:
- def __init__(self, size):
- self.not_in_cache = not_in_cache = object()
- cache = collections.OrderedDict()
- cache_get = cache.get
-
- def get(_, key):
- return cache_get(key, not_in_cache)
-
- def set_(_, key, value):
- cache[key] = value
- while len(cache) > size:
- cache.popitem(last=False)
-
- def clear(_):
- cache.clear()
-
- self.size = size
- self.get = types.MethodType(get, self)
- self.set = types.MethodType(set_, self)
- self.clear = types.MethodType(clear, self)
-
-
-class LRUMemo:
- """
- A memoizing mapping that retains `capacity` deleted items
-
- The memo tracks retained items by their access order; once `capacity` items
- are retained, the least recently used item is discarded.
- """
-
- def __init__(self, capacity):
- self._capacity = capacity
- self._active = {}
- self._memory = collections.OrderedDict()
-
- def __getitem__(self, key):
- try:
- return self._active[key]
- except KeyError:
- self._memory.move_to_end(key)
- return self._memory[key]
-
- def __setitem__(self, key, value):
- self._memory.pop(key, None)
- self._active[key] = value
-
- def __delitem__(self, key):
- try:
- value = self._active.pop(key)
- except KeyError:
- pass
- else:
- while len(self._memory) >= self._capacity:
- self._memory.popitem(last=False)
- self._memory[key] = value
-
- def clear(self):
- self._active.clear()
- self._memory.clear()
-
-
-class UnboundedMemo(dict):
- """
- A memoizing mapping that retains all deleted items
- """
-
- def __delitem__(self, key):
- pass
-
-
-def _escape_regex_range_chars(s: str) -> str:
- # escape these chars: ^-[]
- for c in r"\^-[]":
- s = s.replace(c, _bslash + c)
- s = s.replace("\n", r"\n")
- s = s.replace("\t", r"\t")
- return str(s)
-
-
-def _collapse_string_to_ranges(
- s: Union[str, Iterable[str]], re_escape: bool = True
-) -> str:
- def is_consecutive(c):
- c_int = ord(c)
- is_consecutive.prev, prev = c_int, is_consecutive.prev
- if c_int - prev > 1:
- is_consecutive.value = next(is_consecutive.counter)
- return is_consecutive.value
-
- is_consecutive.prev = 0
- is_consecutive.counter = itertools.count()
- is_consecutive.value = -1
-
- def escape_re_range_char(c):
- return "\\" + c if c in r"\^-][" else c
-
- def no_escape_re_range_char(c):
- return c
-
- if not re_escape:
- escape_re_range_char = no_escape_re_range_char
-
- ret = []
- s = "".join(sorted(set(s)))
- if len(s) > 3:
- for _, chars in itertools.groupby(s, key=is_consecutive):
- first = last = next(chars)
- last = collections.deque(
- itertools.chain(iter([last]), chars), maxlen=1
- ).pop()
- if first == last:
- ret.append(escape_re_range_char(first))
- else:
- sep = "" if ord(last) == ord(first) + 1 else "-"
- ret.append(
- "{}{}{}".format(
- escape_re_range_char(first), sep, escape_re_range_char(last)
- )
- )
- else:
- ret = [escape_re_range_char(c) for c in s]
-
- return "".join(ret)
-
-
-def _flatten(ll: list) -> list:
- ret = []
- for i in ll:
- if isinstance(i, list):
- ret.extend(_flatten(i))
- else:
- ret.append(i)
- return ret
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__init__.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__init__.py
deleted file mode 100644
index 4c6ec97..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: MIT
-# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
-# Licensed to PSF under a Contributor Agreement.
-
-__all__ = ("loads", "load", "TOMLDecodeError")
-__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
-
-from ._parser import TOMLDecodeError, load, loads
-
-# Pretend this exception was created here.
-TOMLDecodeError.__module__ = __name__
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/__init__.cpython-311.pyc
deleted file mode 100644
index 1a956bb..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/__init__.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_parser.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_parser.cpython-311.pyc
deleted file mode 100644
index 543507b..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_parser.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_re.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_re.cpython-311.pyc
deleted file mode 100644
index 6066cbf..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_re.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_types.cpython-311.pyc b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_types.cpython-311.pyc
deleted file mode 100644
index 7da4524..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/__pycache__/_types.cpython-311.pyc
+++ /dev/null
Binary files differ
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_parser.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_parser.py
deleted file mode 100644
index f1bb0aa..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_parser.py
+++ /dev/null
@@ -1,691 +0,0 @@
-# SPDX-License-Identifier: MIT
-# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
-# Licensed to PSF under a Contributor Agreement.
-
-from __future__ import annotations
-
-from collections.abc import Iterable
-import string
-from types import MappingProxyType
-from typing import Any, BinaryIO, NamedTuple
-
-from ._re import (
- RE_DATETIME,
- RE_LOCALTIME,
- RE_NUMBER,
- match_to_datetime,
- match_to_localtime,
- match_to_number,
-)
-from ._types import Key, ParseFloat, Pos
-
-ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
-
-# Neither of these sets include quotation mark or backslash. They are
-# currently handled as separate cases in the parser functions.
-ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
-ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
-
-ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
-ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
-
-ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
-
-TOML_WS = frozenset(" \t")
-TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
-BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
-KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
-HEXDIGIT_CHARS = frozenset(string.hexdigits)
-
-BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
- {
- "\\b": "\u0008", # backspace
- "\\t": "\u0009", # tab
- "\\n": "\u000A", # linefeed
- "\\f": "\u000C", # form feed
- "\\r": "\u000D", # carriage return
- '\\"': "\u0022", # quote
- "\\\\": "\u005C", # backslash
- }
-)
-
-
-class TOMLDecodeError(ValueError):
- """An error raised if a document is not valid TOML."""
-
-
-def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
- """Parse TOML from a binary file object."""
- b = __fp.read()
- try:
- s = b.decode()
- except AttributeError:
- raise TypeError(
- "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
- ) from None
- return loads(s, parse_float=parse_float)
-
-
-def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
- """Parse TOML from a string."""
-
- # The spec allows converting "\r\n" to "\n", even in string
- # literals. Let's do so to simplify parsing.
- src = __s.replace("\r\n", "\n")
- pos = 0
- out = Output(NestedDict(), Flags())
- header: Key = ()
- parse_float = make_safe_parse_float(parse_float)
-
- # Parse one statement at a time
- # (typically means one line in TOML source)
- while True:
- # 1. Skip line leading whitespace
- pos = skip_chars(src, pos, TOML_WS)
-
- # 2. Parse rules. Expect one of the following:
- # - end of file
- # - end of line
- # - comment
- # - key/value pair
- # - append dict to list (and move to its namespace)
- # - create dict (and move to its namespace)
- # Skip trailing whitespace when applicable.
- try:
- char = src[pos]
- except IndexError:
- break
- if char == "\n":
- pos += 1
- continue
- if char in KEY_INITIAL_CHARS:
- pos = key_value_rule(src, pos, out, header, parse_float)
- pos = skip_chars(src, pos, TOML_WS)
- elif char == "[":
- try:
- second_char: str | None = src[pos + 1]
- except IndexError:
- second_char = None
- out.flags.finalize_pending()
- if second_char == "[":
- pos, header = create_list_rule(src, pos, out)
- else:
- pos, header = create_dict_rule(src, pos, out)
- pos = skip_chars(src, pos, TOML_WS)
- elif char != "#":
- raise suffixed_err(src, pos, "Invalid statement")
-
- # 3. Skip comment
- pos = skip_comment(src, pos)
-
- # 4. Expect end of line or end of file
- try:
- char = src[pos]
- except IndexError:
- break
- if char != "\n":
- raise suffixed_err(
- src, pos, "Expected newline or end of document after a statement"
- )
- pos += 1
-
- return out.data.dict
-
-
-class Flags:
- """Flags that map to parsed keys/namespaces."""
-
- # Marks an immutable namespace (inline array or inline table).
- FROZEN = 0
- # Marks a nest that has been explicitly created and can no longer
- # be opened using the "[table]" syntax.
- EXPLICIT_NEST = 1
-
- def __init__(self) -> None:
- self._flags: dict[str, dict] = {}
- self._pending_flags: set[tuple[Key, int]] = set()
-
- def add_pending(self, key: Key, flag: int) -> None:
- self._pending_flags.add((key, flag))
-
- def finalize_pending(self) -> None:
- for key, flag in self._pending_flags:
- self.set(key, flag, recursive=False)
- self._pending_flags.clear()
-
- def unset_all(self, key: Key) -> None:
- cont = self._flags
- for k in key[:-1]:
- if k not in cont:
- return
- cont = cont[k]["nested"]
- cont.pop(key[-1], None)
-
- def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
- cont = self._flags
- key_parent, key_stem = key[:-1], key[-1]
- for k in key_parent:
- if k not in cont:
- cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
- cont = cont[k]["nested"]
- if key_stem not in cont:
- cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
- cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
-
- def is_(self, key: Key, flag: int) -> bool:
- if not key:
- return False # document root has no flags
- cont = self._flags
- for k in key[:-1]:
- if k not in cont:
- return False
- inner_cont = cont[k]
- if flag in inner_cont["recursive_flags"]:
- return True
- cont = inner_cont["nested"]
- key_stem = key[-1]
- if key_stem in cont:
- cont = cont[key_stem]
- return flag in cont["flags"] or flag in cont["recursive_flags"]
- return False
-
-
-class NestedDict:
- def __init__(self) -> None:
- # The parsed content of the TOML document
- self.dict: dict[str, Any] = {}
-
- def get_or_create_nest(
- self,
- key: Key,
- *,
- access_lists: bool = True,
- ) -> dict:
- cont: Any = self.dict
- for k in key:
- if k not in cont:
- cont[k] = {}
- cont = cont[k]
- if access_lists and isinstance(cont, list):
- cont = cont[-1]
- if not isinstance(cont, dict):
- raise KeyError("There is no nest behind this key")
- return cont
-
- def append_nest_to_list(self, key: Key) -> None:
- cont = self.get_or_create_nest(key[:-1])
- last_key = key[-1]
- if last_key in cont:
- list_ = cont[last_key]
- if not isinstance(list_, list):
- raise KeyError("An object other than list found behind this key")
- list_.append({})
- else:
- cont[last_key] = [{}]
-
-
-class Output(NamedTuple):
- data: NestedDict
- flags: Flags
-
-
-def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
- try:
- while src[pos] in chars:
- pos += 1
- except IndexError:
- pass
- return pos
-
-
-def skip_until(
- src: str,
- pos: Pos,
- expect: str,
- *,
- error_on: frozenset[str],
- error_on_eof: bool,
-) -> Pos:
- try:
- new_pos = src.index(expect, pos)
- except ValueError:
- new_pos = len(src)
- if error_on_eof:
- raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
-
- if not error_on.isdisjoint(src[pos:new_pos]):
- while src[pos] not in error_on:
- pos += 1
- raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
- return new_pos
-
-
-def skip_comment(src: str, pos: Pos) -> Pos:
- try:
- char: str | None = src[pos]
- except IndexError:
- char = None
- if char == "#":
- return skip_until(
- src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
- )
- return pos
-
-
-def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
- while True:
- pos_before_skip = pos
- pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
- pos = skip_comment(src, pos)
- if pos == pos_before_skip:
- return pos
-
-
-def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
- pos += 1 # Skip "["
- pos = skip_chars(src, pos, TOML_WS)
- pos, key = parse_key(src, pos)
-
- if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
- raise suffixed_err(src, pos, f"Cannot declare {key} twice")
- out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
- try:
- out.data.get_or_create_nest(key)
- except KeyError:
- raise suffixed_err(src, pos, "Cannot overwrite a value") from None
-
- if not src.startswith("]", pos):
- raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
- return pos + 1, key
-
-
-def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
- pos += 2 # Skip "[["
- pos = skip_chars(src, pos, TOML_WS)
- pos, key = parse_key(src, pos)
-
- if out.flags.is_(key, Flags.FROZEN):
- raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
- # Free the namespace now that it points to another empty list item...
- out.flags.unset_all(key)
- # ...but this key precisely is still prohibited from table declaration
- out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
- try:
- out.data.append_nest_to_list(key)
- except KeyError:
- raise suffixed_err(src, pos, "Cannot overwrite a value") from None
-
- if not src.startswith("]]", pos):
- raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
- return pos + 2, key
-
-
-def key_value_rule(
- src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
-) -> Pos:
- pos, key, value = parse_key_value_pair(src, pos, parse_float)
- key_parent, key_stem = key[:-1], key[-1]
- abs_key_parent = header + key_parent
-
- relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
- for cont_key in relative_path_cont_keys:
- # Check that dotted key syntax does not redefine an existing table
- if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
- raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
- # Containers in the relative path can't be opened with the table syntax or
- # dotted key/value syntax in following table sections.
- out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
-
- if out.flags.is_(abs_key_parent, Flags.FROZEN):
- raise suffixed_err(
- src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
- )
-
- try:
- nest = out.data.get_or_create_nest(abs_key_parent)
- except KeyError:
- raise suffixed_err(src, pos, "Cannot overwrite a value") from None
- if key_stem in nest:
- raise suffixed_err(src, pos, "Cannot overwrite a value")
- # Mark inline table and array namespaces recursively immutable
- if isinstance(value, (dict, list)):
- out.flags.set(header + key, Flags.FROZEN, recursive=True)
- nest[key_stem] = value
- return pos
-
-
-def parse_key_value_pair(
- src: str, pos: Pos, parse_float: ParseFloat
-) -> tuple[Pos, Key, Any]:
- pos, key = parse_key(src, pos)
- try:
- char: str | None = src[pos]
- except IndexError:
- char = None
- if char != "=":
- raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
- pos += 1
- pos = skip_chars(src, pos, TOML_WS)
- pos, value = parse_value(src, pos, parse_float)
- return pos, key, value
-
-
-def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
- pos, key_part = parse_key_part(src, pos)
- key: Key = (key_part,)
- pos = skip_chars(src, pos, TOML_WS)
- while True:
- try:
- char: str | None = src[pos]
- except IndexError:
- char = None
- if char != ".":
- return pos, key
- pos += 1
- pos = skip_chars(src, pos, TOML_WS)
- pos, key_part = parse_key_part(src, pos)
- key += (key_part,)
- pos = skip_chars(src, pos, TOML_WS)
-
-
-def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
- try:
- char: str | None = src[pos]
- except IndexError:
- char = None
- if char in BARE_KEY_CHARS:
- start_pos = pos
- pos = skip_chars(src, pos, BARE_KEY_CHARS)
- return pos, src[start_pos:pos]
- if char == "'":
- return parse_literal_str(src, pos)
- if char == '"':
- return parse_one_line_basic_str(src, pos)
- raise suffixed_err(src, pos, "Invalid initial character for a key part")
-
-
-def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
- pos += 1
- return parse_basic_str(src, pos, multiline=False)
-
-
-def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
- pos += 1
- array: list = []
-
- pos = skip_comments_and_array_ws(src, pos)
- if src.startswith("]", pos):
- return pos + 1, array
- while True:
- pos, val = parse_value(src, pos, parse_float)
- array.append(val)
- pos = skip_comments_and_array_ws(src, pos)
-
- c = src[pos : pos + 1]
- if c == "]":
- return pos + 1, array
- if c != ",":
- raise suffixed_err(src, pos, "Unclosed array")
- pos += 1
-
- pos = skip_comments_and_array_ws(src, pos)
- if src.startswith("]", pos):
- return pos + 1, array
-
-
-def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
- pos += 1
- nested_dict = NestedDict()
- flags = Flags()
-
- pos = skip_chars(src, pos, TOML_WS)
- if src.startswith("}", pos):
- return pos + 1, nested_dict.dict
- while True:
- pos, key, value = parse_key_value_pair(src, pos, parse_float)
- key_parent, key_stem = key[:-1], key[-1]
- if flags.is_(key, Flags.FROZEN):
- raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
- try:
- nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
- except KeyError:
- raise suffixed_err(src, pos, "Cannot overwrite a value") from None
- if key_stem in nest:
- raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
- nest[key_stem] = value
- pos = skip_chars(src, pos, TOML_WS)
- c = src[pos : pos + 1]
- if c == "}":
- return pos + 1, nested_dict.dict
- if c != ",":
- raise suffixed_err(src, pos, "Unclosed inline table")
- if isinstance(value, (dict, list)):
- flags.set(key, Flags.FROZEN, recursive=True)
- pos += 1
- pos = skip_chars(src, pos, TOML_WS)
-
-
-def parse_basic_str_escape(
- src: str, pos: Pos, *, multiline: bool = False
-) -> tuple[Pos, str]:
- escape_id = src[pos : pos + 2]
- pos += 2
- if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
- # Skip whitespace until next non-whitespace character or end of
- # the doc. Error if non-whitespace is found before newline.
- if escape_id != "\\\n":
- pos = skip_chars(src, pos, TOML_WS)
- try:
- char = src[pos]
- except IndexError:
- return pos, ""
- if char != "\n":
- raise suffixed_err(src, pos, "Unescaped '\\' in a string")
- pos += 1
- pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
- return pos, ""
- if escape_id == "\\u":
- return parse_hex_char(src, pos, 4)
- if escape_id == "\\U":
- return parse_hex_char(src, pos, 8)
- try:
- return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
- except KeyError:
- raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None
-
-
-def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
- return parse_basic_str_escape(src, pos, multiline=True)
-
-
-def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
- hex_str = src[pos : pos + hex_len]
- if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
- raise suffixed_err(src, pos, "Invalid hex value")
- pos += hex_len
- hex_int = int(hex_str, 16)
- if not is_unicode_scalar_value(hex_int):
- raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
- return pos, chr(hex_int)
-
-
-def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
- pos += 1 # Skip starting apostrophe
- start_pos = pos
- pos = skip_until(
- src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
- )
- return pos + 1, src[start_pos:pos] # Skip ending apostrophe
-
-
-def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
- pos += 3
- if src.startswith("\n", pos):
- pos += 1
-
- if literal:
- delim = "'"
- end_pos = skip_until(
- src,
- pos,
- "'''",
- error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
- error_on_eof=True,
- )
- result = src[pos:end_pos]
- pos = end_pos + 3
- else:
- delim = '"'
- pos, result = parse_basic_str(src, pos, multiline=True)
-
- # Add at maximum two extra apostrophes/quotes if the end sequence
- # is 4 or 5 chars long instead of just 3.
- if not src.startswith(delim, pos):
- return pos, result
- pos += 1
- if not src.startswith(delim, pos):
- return pos, result + delim
- pos += 1
- return pos, result + (delim * 2)
-
-
-def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
- if multiline:
- error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
- parse_escapes = parse_basic_str_escape_multiline
- else:
- error_on = ILLEGAL_BASIC_STR_CHARS
- parse_escapes = parse_basic_str_escape
- result = ""
- start_pos = pos
- while True:
- try:
- char = src[pos]
- except IndexError:
- raise suffixed_err(src, pos, "Unterminated string") from None
- if char == '"':
- if not multiline:
- return pos + 1, result + src[start_pos:pos]
- if src.startswith('"""', pos):
- return pos + 3, result + src[start_pos:pos]
- pos += 1
- continue
- if char == "\\":
- result += src[start_pos:pos]
- pos, parsed_escape = parse_escapes(src, pos)
- result += parsed_escape
- start_pos = pos
- continue
- if char in error_on:
- raise suffixed_err(src, pos, f"Illegal character {char!r}")
- pos += 1
-
-
-def parse_value( # noqa: C901
- src: str, pos: Pos, parse_float: ParseFloat
-) -> tuple[Pos, Any]:
- try:
- char: str | None = src[pos]
- except IndexError:
- char = None
-
- # IMPORTANT: order conditions based on speed of checking and likelihood
-
- # Basic strings
- if char == '"':
- if src.startswith('"""', pos):
- return parse_multiline_str(src, pos, literal=False)
- return parse_one_line_basic_str(src, pos)
-
- # Literal strings
- if char == "'":
- if src.startswith("'''", pos):
- return parse_multiline_str(src, pos, literal=True)
- return parse_literal_str(src, pos)
-
- # Booleans
- if char == "t":
- if src.startswith("true", pos):
- return pos + 4, True
- if char == "f":
- if src.startswith("false", pos):
- return pos + 5, False
-
- # Arrays
- if char == "[":
- return parse_array(src, pos, parse_float)
-
- # Inline tables
- if char == "{":
- return parse_inline_table(src, pos, parse_float)
-
- # Dates and times
- datetime_match = RE_DATETIME.match(src, pos)
- if datetime_match:
- try:
- datetime_obj = match_to_datetime(datetime_match)
- except ValueError as e:
- raise suffixed_err(src, pos, "Invalid date or datetime") from e
- return datetime_match.end(), datetime_obj
- localtime_match = RE_LOCALTIME.match(src, pos)
- if localtime_match:
- return localtime_match.end(), match_to_localtime(localtime_match)
-
- # Integers and "normal" floats.
- # The regex will greedily match any type starting with a decimal
- # char, so needs to be located after handling of dates and times.
- number_match = RE_NUMBER.match(src, pos)
- if number_match:
- return number_match.end(), match_to_number(number_match, parse_float)
-
- # Special floats
- first_three = src[pos : pos + 3]
- if first_three in {"inf", "nan"}:
- return pos + 3, parse_float(first_three)
- first_four = src[pos : pos + 4]
- if first_four in {"-inf", "+inf", "-nan", "+nan"}:
- return pos + 4, parse_float(first_four)
-
- raise suffixed_err(src, pos, "Invalid value")
-
-
-def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
- """Return a `TOMLDecodeError` where error message is suffixed with
- coordinates in source."""
-
- def coord_repr(src: str, pos: Pos) -> str:
- if pos >= len(src):
- return "end of document"
- line = src.count("\n", 0, pos) + 1
- if line == 1:
- column = pos + 1
- else:
- column = pos - src.rindex("\n", 0, pos)
- return f"line {line}, column {column}"
-
- return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
-
-
-def is_unicode_scalar_value(codepoint: int) -> bool:
- return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
-
-
-def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
- """A decorator to make `parse_float` safe.
-
- `parse_float` must not return dicts or lists, because these types
- would be mixed with parsed TOML tables and arrays, thus confusing
- the parser. The returned decorated callable raises `ValueError`
- instead of returning illegal types.
- """
- # The default `float` callable never returns illegal types. Optimize it.
- if parse_float is float: # type: ignore[comparison-overlap]
- return float
-
- def safe_parse_float(float_str: str) -> Any:
- float_value = parse_float(float_str)
- if isinstance(float_value, (dict, list)):
- raise ValueError("parse_float must not return dicts or lists")
- return float_value
-
- return safe_parse_float
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_re.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_re.py
deleted file mode 100644
index 994bb74..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_re.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# SPDX-License-Identifier: MIT
-# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
-# Licensed to PSF under a Contributor Agreement.
-
-from __future__ import annotations
-
-from datetime import date, datetime, time, timedelta, timezone, tzinfo
-from functools import lru_cache
-import re
-from typing import Any
-
-from ._types import ParseFloat
-
-# E.g.
-# - 00:32:00.999999
-# - 00:32:00
-_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
-
-RE_NUMBER = re.compile(
- r"""
-0
-(?:
- x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
- |
- b[01](?:_?[01])* # bin
- |
- o[0-7](?:_?[0-7])* # oct
-)
-|
-[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
-(?P<floatpart>
- (?:\.[0-9](?:_?[0-9])*)? # optional fractional part
- (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
-)
-""",
- flags=re.VERBOSE,
-)
-RE_LOCALTIME = re.compile(_TIME_RE_STR)
-RE_DATETIME = re.compile(
- rf"""
-([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
-(?:
- [Tt ]
- {_TIME_RE_STR}
- (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
-)?
-""",
- flags=re.VERBOSE,
-)
-
-
-def match_to_datetime(match: re.Match) -> datetime | date:
- """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
-
- Raises ValueError if the match does not correspond to a valid date
- or datetime.
- """
- (
- year_str,
- month_str,
- day_str,
- hour_str,
- minute_str,
- sec_str,
- micros_str,
- zulu_time,
- offset_sign_str,
- offset_hour_str,
- offset_minute_str,
- ) = match.groups()
- year, month, day = int(year_str), int(month_str), int(day_str)
- if hour_str is None:
- return date(year, month, day)
- hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
- micros = int(micros_str.ljust(6, "0")) if micros_str else 0
- if offset_sign_str:
- tz: tzinfo | None = cached_tz(
- offset_hour_str, offset_minute_str, offset_sign_str
- )
- elif zulu_time:
- tz = timezone.utc
- else: # local date-time
- tz = None
- return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
-
-
-@lru_cache(maxsize=None)
-def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
- sign = 1 if sign_str == "+" else -1
- return timezone(
- timedelta(
- hours=sign * int(hour_str),
- minutes=sign * int(minute_str),
- )
- )
-
-
-def match_to_localtime(match: re.Match) -> time:
- hour_str, minute_str, sec_str, micros_str = match.groups()
- micros = int(micros_str.ljust(6, "0")) if micros_str else 0
- return time(int(hour_str), int(minute_str), int(sec_str), micros)
-
-
-def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
- if match.group("floatpart"):
- return parse_float(match.group())
- return int(match.group(), 0)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py
deleted file mode 100644
index d949412..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/tomli/_types.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: MIT
-# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
-# Licensed to PSF under a Contributor Agreement.
-
-from typing import Any, Callable, Tuple
-
-# Type annotations
-ParseFloat = Callable[[str], Any]
-Key = Tuple[str, ...]
-Pos = int
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/typing_extensions.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/typing_extensions.py
deleted file mode 100644
index 9f1c7aa..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/typing_extensions.py
+++ /dev/null
@@ -1,2296 +0,0 @@
-import abc
-import collections
-import collections.abc
-import operator
-import sys
-import typing
-
-# After PEP 560, internal typing API was substantially reworked.
-# This is especially important for Protocol class which uses internal APIs
-# quite extensively.
-PEP_560 = sys.version_info[:3] >= (3, 7, 0)
-
-if PEP_560:
- GenericMeta = type
-else:
- # 3.6
- from typing import GenericMeta, _type_vars # noqa
-
-# The two functions below are copies of typing internal helpers.
-# They are needed by _ProtocolMeta
-
-
-def _no_slots_copy(dct):
- dict_copy = dict(dct)
- if '__slots__' in dict_copy:
- for slot in dict_copy['__slots__']:
- dict_copy.pop(slot, None)
- return dict_copy
-
-
-def _check_generic(cls, parameters):
- if not cls.__parameters__:
- raise TypeError(f"{cls} is not a generic class")
- alen = len(parameters)
- elen = len(cls.__parameters__)
- if alen != elen:
- raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};"
- f" actual {alen}, expected {elen}")
-
-
-# Please keep __all__ alphabetized within each category.
-__all__ = [
- # Super-special typing primitives.
- 'ClassVar',
- 'Concatenate',
- 'Final',
- 'ParamSpec',
- 'Self',
- 'Type',
-
- # ABCs (from collections.abc).
- 'Awaitable',
- 'AsyncIterator',
- 'AsyncIterable',
- 'Coroutine',
- 'AsyncGenerator',
- 'AsyncContextManager',
- 'ChainMap',
-
- # Concrete collection types.
- 'ContextManager',
- 'Counter',
- 'Deque',
- 'DefaultDict',
- 'OrderedDict',
- 'TypedDict',
-
- # Structural checks, a.k.a. protocols.
- 'SupportsIndex',
-
- # One-off things.
- 'Annotated',
- 'final',
- 'IntVar',
- 'Literal',
- 'NewType',
- 'overload',
- 'Protocol',
- 'runtime',
- 'runtime_checkable',
- 'Text',
- 'TypeAlias',
- 'TypeGuard',
- 'TYPE_CHECKING',
-]
-
-if PEP_560:
- __all__.extend(["get_args", "get_origin", "get_type_hints"])
-
-# 3.6.2+
-if hasattr(typing, 'NoReturn'):
- NoReturn = typing.NoReturn
-# 3.6.0-3.6.1
-else:
- class _NoReturn(typing._FinalTypingBase, _root=True):
- """Special type indicating functions that never return.
- Example::
-
- from typing import NoReturn
-
- def stop() -> NoReturn:
- raise Exception('no way')
-
- This type is invalid in other positions, e.g., ``List[NoReturn]``
- will fail in static type checkers.
- """
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("NoReturn cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("NoReturn cannot be used with issubclass().")
-
- NoReturn = _NoReturn(_root=True)
-
-# Some unconstrained type variables. These are used by the container types.
-# (These are not for export.)
-T = typing.TypeVar('T') # Any type.
-KT = typing.TypeVar('KT') # Key type.
-VT = typing.TypeVar('VT') # Value type.
-T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
-T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
-
-ClassVar = typing.ClassVar
-
-# On older versions of typing there is an internal class named "Final".
-# 3.8+
-if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
- Final = typing.Final
-# 3.7
-elif sys.version_info[:2] >= (3, 7):
- class _FinalForm(typing._SpecialForm, _root=True):
-
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(parameters,
- f'{self._name} accepts only single type')
- return typing._GenericAlias(self, (item,))
-
- Final = _FinalForm('Final',
- doc="""A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.""")
-# 3.6
-else:
- class _Final(typing._FinalTypingBase, _root=True):
- """A special typing construct to indicate that a name
- cannot be re-assigned or overridden in a subclass.
- For example:
-
- MAX_SIZE: Final = 9000
- MAX_SIZE += 1 # Error reported by type checker
-
- class Connection:
- TIMEOUT: Final[int] = 10
- class FastConnector(Connection):
- TIMEOUT = 1 # Error reported by type checker
-
- There is no runtime checking of these properties.
- """
-
- __slots__ = ('__type__',)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(typing._type_check(item,
- f'{cls.__name__[1:]} accepts only single type.'),
- _root=True)
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += f'[{typing._type_repr(self.__type__)}]'
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, _Final):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- Final = _Final(_root=True)
-
-
-# 3.8+
-if hasattr(typing, 'final'):
- final = typing.final
-# 3.6-3.7
-else:
- def final(f):
- """This decorator can be used to indicate to type checkers that
- the decorated method cannot be overridden, and decorated class
- cannot be subclassed. For example:
-
- class Base:
- @final
- def done(self) -> None:
- ...
- class Sub(Base):
- def done(self) -> None: # Error reported by type checker
- ...
- @final
- class Leaf:
- ...
- class Other(Leaf): # Error reported by type checker
- ...
-
- There is no runtime checking of these properties.
- """
- return f
-
-
-def IntVar(name):
- return typing.TypeVar(name)
-
-
-# 3.8+:
-if hasattr(typing, 'Literal'):
- Literal = typing.Literal
-# 3.7:
-elif sys.version_info[:2] >= (3, 7):
- class _LiteralForm(typing._SpecialForm, _root=True):
-
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- return typing._GenericAlias(self, parameters)
-
- Literal = _LiteralForm('Literal',
- doc="""A type that can be used to indicate to type checkers
- that the corresponding value has a value literally equivalent
- to the provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to
- the value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime
- checking verifying that the parameter is actually a value
- instead of a type.""")
-# 3.6:
-else:
- class _Literal(typing._FinalTypingBase, _root=True):
- """A type that can be used to indicate to type checkers that the
- corresponding value has a value literally equivalent to the
- provided parameter. For example:
-
- var: Literal[4] = 4
-
- The type checker understands that 'var' is literally equal to the
- value 4 and no other value.
-
- Literal[...] cannot be subclassed. There is no runtime checking
- verifying that the parameter is actually a value instead of a type.
- """
-
- __slots__ = ('__values__',)
-
- def __init__(self, values=None, **kwds):
- self.__values__ = values
-
- def __getitem__(self, values):
- cls = type(self)
- if self.__values__ is None:
- if not isinstance(values, tuple):
- values = (values,)
- return cls(values, _root=True)
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
-
- def _eval_type(self, globalns, localns):
- return self
-
- def __repr__(self):
- r = super().__repr__()
- if self.__values__ is not None:
- r += f'[{", ".join(map(typing._type_repr, self.__values__))}]'
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__values__))
-
- def __eq__(self, other):
- if not isinstance(other, _Literal):
- return NotImplemented
- if self.__values__ is not None:
- return self.__values__ == other.__values__
- return self is other
-
- Literal = _Literal(_root=True)
-
-
-_overload_dummy = typing._overload_dummy # noqa
-overload = typing.overload
-
-
-# This is not a real generic class. Don't use outside annotations.
-Type = typing.Type
-
-# Various ABCs mimicking those in collections.abc.
-# A few are simply re-exported for completeness.
-
-
-class _ExtensionsGenericMeta(GenericMeta):
- def __subclasscheck__(self, subclass):
- """This mimics a more modern GenericMeta.__subclasscheck__() logic
- (that does not have problems with recursion) to work around interactions
- between collections, typing, and typing_extensions on older
- versions of Python, see https://github.com/python/typing/issues/501.
- """
- if self.__origin__ is not None:
- if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
- raise TypeError("Parameterized generics cannot be used with class "
- "or instance checks")
- return False
- if not self.__extra__:
- return super().__subclasscheck__(subclass)
- res = self.__extra__.__subclasshook__(subclass)
- if res is not NotImplemented:
- return res
- if self.__extra__ in subclass.__mro__:
- return True
- for scls in self.__extra__.__subclasses__():
- if isinstance(scls, GenericMeta):
- continue
- if issubclass(subclass, scls):
- return True
- return False
-
-
-Awaitable = typing.Awaitable
-Coroutine = typing.Coroutine
-AsyncIterable = typing.AsyncIterable
-AsyncIterator = typing.AsyncIterator
-
-# 3.6.1+
-if hasattr(typing, 'Deque'):
- Deque = typing.Deque
-# 3.6.0
-else:
- class Deque(collections.deque, typing.MutableSequence[T],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.deque):
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is Deque:
- return collections.deque(*args, **kwds)
- return typing._generic_new(collections.deque, cls, *args, **kwds)
-
-ContextManager = typing.ContextManager
-# 3.6.2+
-if hasattr(typing, 'AsyncContextManager'):
- AsyncContextManager = typing.AsyncContextManager
-# 3.6.0-3.6.1
-else:
- from _collections_abc import _check_methods as _check_methods_in_mro # noqa
-
- class AsyncContextManager(typing.Generic[T_co]):
- __slots__ = ()
-
- async def __aenter__(self):
- return self
-
- @abc.abstractmethod
- async def __aexit__(self, exc_type, exc_value, traceback):
- return None
-
- @classmethod
- def __subclasshook__(cls, C):
- if cls is AsyncContextManager:
- return _check_methods_in_mro(C, "__aenter__", "__aexit__")
- return NotImplemented
-
-DefaultDict = typing.DefaultDict
-
-# 3.7.2+
-if hasattr(typing, 'OrderedDict'):
- OrderedDict = typing.OrderedDict
-# 3.7.0-3.7.2
-elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
- OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
-# 3.6
-else:
- class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.OrderedDict):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is OrderedDict:
- return collections.OrderedDict(*args, **kwds)
- return typing._generic_new(collections.OrderedDict, cls, *args, **kwds)
-
-# 3.6.2+
-if hasattr(typing, 'Counter'):
- Counter = typing.Counter
-# 3.6.0-3.6.1
-else:
- class Counter(collections.Counter,
- typing.Dict[T, int],
- metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is Counter:
- return collections.Counter(*args, **kwds)
- return typing._generic_new(collections.Counter, cls, *args, **kwds)
-
-# 3.6.1+
-if hasattr(typing, 'ChainMap'):
- ChainMap = typing.ChainMap
-elif hasattr(collections, 'ChainMap'):
- class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.ChainMap):
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwds):
- if cls._gorg is ChainMap:
- return collections.ChainMap(*args, **kwds)
- return typing._generic_new(collections.ChainMap, cls, *args, **kwds)
-
-# 3.6.1+
-if hasattr(typing, 'AsyncGenerator'):
- AsyncGenerator = typing.AsyncGenerator
-# 3.6.0
-else:
- class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
- metaclass=_ExtensionsGenericMeta,
- extra=collections.abc.AsyncGenerator):
- __slots__ = ()
-
-NewType = typing.NewType
-Text = typing.Text
-TYPE_CHECKING = typing.TYPE_CHECKING
-
-
-def _gorg(cls):
- """This function exists for compatibility with old typing versions."""
- assert isinstance(cls, GenericMeta)
- if hasattr(cls, '_gorg'):
- return cls._gorg
- while cls.__origin__ is not None:
- cls = cls.__origin__
- return cls
-
-
-_PROTO_WHITELIST = ['Callable', 'Awaitable',
- 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
- 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
- 'ContextManager', 'AsyncContextManager']
-
-
-def _get_protocol_attrs(cls):
- attrs = set()
- for base in cls.__mro__[:-1]: # without object
- if base.__name__ in ('Protocol', 'Generic'):
- continue
- annotations = getattr(base, '__annotations__', {})
- for attr in list(base.__dict__.keys()) + list(annotations.keys()):
- if (not attr.startswith('_abc_') and attr not in (
- '__abstractmethods__', '__annotations__', '__weakref__',
- '_is_protocol', '_is_runtime_protocol', '__dict__',
- '__args__', '__slots__',
- '__next_in_mro__', '__parameters__', '__origin__',
- '__orig_bases__', '__extra__', '__tree_hash__',
- '__doc__', '__subclasshook__', '__init__', '__new__',
- '__module__', '_MutableMapping__marker', '_gorg')):
- attrs.add(attr)
- return attrs
-
-
-def _is_callable_members_only(cls):
- return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
-
-
-# 3.8+
-if hasattr(typing, 'Protocol'):
- Protocol = typing.Protocol
-# 3.7
-elif PEP_560:
- from typing import _collect_type_vars # noqa
-
- def _no_init(self, *args, **kwargs):
- if type(self)._is_protocol:
- raise TypeError('Protocols cannot be instantiated')
-
- class _ProtocolMeta(abc.ABCMeta):
- # This metaclass is a bit unfortunate and exists only because of the lack
- # of __instancehook__.
- def __instancecheck__(cls, instance):
- # We need this method for situations where attributes are
- # assigned in __init__.
- if ((not getattr(cls, '_is_protocol', False) or
- _is_callable_members_only(cls)) and
- issubclass(instance.__class__, cls)):
- return True
- if cls._is_protocol:
- if all(hasattr(instance, attr) and
- (not callable(getattr(cls, attr, None)) or
- getattr(instance, attr) is not None)
- for attr in _get_protocol_attrs(cls)):
- return True
- return super().__instancecheck__(instance)
-
- class Protocol(metaclass=_ProtocolMeta):
- # There is quite a lot of overlapping code with typing.Generic.
- # Unfortunately it is hard to avoid this while these live in two different
- # modules. The duplicated code will be removed when Protocol is moved to typing.
- """Base class for protocol classes. Protocol classes are defined as::
-
- class Proto(Protocol):
- def meth(self) -> int:
- ...
-
- Such classes are primarily used with static type checkers that recognize
- structural subtyping (static duck-typing), for example::
-
- class C:
- def meth(self) -> int:
- return 0
-
- def func(x: Proto) -> int:
- return x.meth()
-
- func(C()) # Passes static type check
-
- See PEP 544 for details. Protocol classes decorated with
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
- only the presence of given attributes, ignoring their type signatures.
-
- Protocol classes can be generic, they are defined as::
-
- class GenProto(Protocol[T]):
- def meth(self) -> T:
- ...
- """
- __slots__ = ()
- _is_protocol = True
-
- def __new__(cls, *args, **kwds):
- if cls is Protocol:
- raise TypeError("Type Protocol cannot be instantiated; "
- "it can only be used as a base class")
- return super().__new__(cls)
-
- @typing._tp_cache
- def __class_getitem__(cls, params):
- if not isinstance(params, tuple):
- params = (params,)
- if not params and cls is not typing.Tuple:
- raise TypeError(
- f"Parameter list to {cls.__qualname__}[...] cannot be empty")
- msg = "Parameters to generic types must be types."
- params = tuple(typing._type_check(p, msg) for p in params) # noqa
- if cls is Protocol:
- # Generic can only be subscripted with unique type variables.
- if not all(isinstance(p, typing.TypeVar) for p in params):
- i = 0
- while isinstance(params[i], typing.TypeVar):
- i += 1
- raise TypeError(
- "Parameters to Protocol[...] must all be type variables."
- f" Parameter {i + 1} is {params[i]}")
- if len(set(params)) != len(params):
- raise TypeError(
- "Parameters to Protocol[...] must all be unique")
- else:
- # Subscripting a regular Generic subclass.
- _check_generic(cls, params)
- return typing._GenericAlias(cls, params)
-
- def __init_subclass__(cls, *args, **kwargs):
- tvars = []
- if '__orig_bases__' in cls.__dict__:
- error = typing.Generic in cls.__orig_bases__
- else:
- error = typing.Generic in cls.__bases__
- if error:
- raise TypeError("Cannot inherit from plain Generic")
- if '__orig_bases__' in cls.__dict__:
- tvars = _collect_type_vars(cls.__orig_bases__)
- # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
- # If found, tvars must be a subset of it.
- # If not found, tvars is it.
- # Also check for and reject plain Generic,
- # and reject multiple Generic[...] and/or Protocol[...].
- gvars = None
- for base in cls.__orig_bases__:
- if (isinstance(base, typing._GenericAlias) and
- base.__origin__ in (typing.Generic, Protocol)):
- # for error messages
- the_base = base.__origin__.__name__
- if gvars is not None:
- raise TypeError(
- "Cannot inherit from Generic[...]"
- " and/or Protocol[...] multiple types.")
- gvars = base.__parameters__
- if gvars is None:
- gvars = tvars
- else:
- tvarset = set(tvars)
- gvarset = set(gvars)
- if not tvarset <= gvarset:
- s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
- s_args = ', '.join(str(g) for g in gvars)
- raise TypeError(f"Some type variables ({s_vars}) are"
- f" not listed in {the_base}[{s_args}]")
- tvars = gvars
- cls.__parameters__ = tuple(tvars)
-
- # Determine if this is a protocol or a concrete subclass.
- if not cls.__dict__.get('_is_protocol', None):
- cls._is_protocol = any(b is Protocol for b in cls.__bases__)
-
- # Set (or override) the protocol subclass hook.
- def _proto_hook(other):
- if not cls.__dict__.get('_is_protocol', None):
- return NotImplemented
- if not getattr(cls, '_is_runtime_protocol', False):
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
- return NotImplemented
- raise TypeError("Instance and class checks can only be used with"
- " @runtime protocols")
- if not _is_callable_members_only(cls):
- if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
- return NotImplemented
- raise TypeError("Protocols with non-method members"
- " don't support issubclass()")
- if not isinstance(other, type):
- # Same error as for issubclass(1, int)
- raise TypeError('issubclass() arg 1 must be a class')
- for attr in _get_protocol_attrs(cls):
- for base in other.__mro__:
- if attr in base.__dict__:
- if base.__dict__[attr] is None:
- return NotImplemented
- break
- annotations = getattr(base, '__annotations__', {})
- if (isinstance(annotations, typing.Mapping) and
- attr in annotations and
- isinstance(other, _ProtocolMeta) and
- other._is_protocol):
- break
- else:
- return NotImplemented
- return True
- if '__subclasshook__' not in cls.__dict__:
- cls.__subclasshook__ = _proto_hook
-
- # We have nothing more to do for non-protocols.
- if not cls._is_protocol:
- return
-
- # Check consistency of bases.
- for base in cls.__bases__:
- if not (base in (object, typing.Generic) or
- base.__module__ == 'collections.abc' and
- base.__name__ in _PROTO_WHITELIST or
- isinstance(base, _ProtocolMeta) and base._is_protocol):
- raise TypeError('Protocols can only inherit from other'
- f' protocols, got {repr(base)}')
- cls.__init__ = _no_init
-# 3.6
-else:
- from typing import _next_in_mro, _type_check # noqa
-
- def _no_init(self, *args, **kwargs):
- if type(self)._is_protocol:
- raise TypeError('Protocols cannot be instantiated')
-
- class _ProtocolMeta(GenericMeta):
- """Internal metaclass for Protocol.
-
- This exists so Protocol classes can be generic without deriving
- from Generic.
- """
- def __new__(cls, name, bases, namespace,
- tvars=None, args=None, origin=None, extra=None, orig_bases=None):
- # This is just a version copied from GenericMeta.__new__ that
- # includes "Protocol" special treatment. (Comments removed for brevity.)
- assert extra is None # Protocols should not have extra
- if tvars is not None:
- assert origin is not None
- assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars
- else:
- tvars = _type_vars(bases)
- gvars = None
- for base in bases:
- if base is typing.Generic:
- raise TypeError("Cannot inherit from plain Generic")
- if (isinstance(base, GenericMeta) and
- base.__origin__ in (typing.Generic, Protocol)):
- if gvars is not None:
- raise TypeError(
- "Cannot inherit from Generic[...] or"
- " Protocol[...] multiple times.")
- gvars = base.__parameters__
- if gvars is None:
- gvars = tvars
- else:
- tvarset = set(tvars)
- gvarset = set(gvars)
- if not tvarset <= gvarset:
- s_vars = ", ".join(str(t) for t in tvars if t not in gvarset)
- s_args = ", ".join(str(g) for g in gvars)
- cls_name = "Generic" if any(b.__origin__ is typing.Generic
- for b in bases) else "Protocol"
- raise TypeError(f"Some type variables ({s_vars}) are"
- f" not listed in {cls_name}[{s_args}]")
- tvars = gvars
-
- initial_bases = bases
- if (extra is not None and type(extra) is abc.ABCMeta and
- extra not in bases):
- bases = (extra,) + bases
- bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
- for b in bases)
- if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases):
- bases = tuple(b for b in bases if b is not typing.Generic)
- namespace.update({'__origin__': origin, '__extra__': extra})
- self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
- _root=True)
- super(GenericMeta, self).__setattr__('_gorg',
- self if not origin else
- _gorg(origin))
- self.__parameters__ = tvars
- self.__args__ = tuple(... if a is typing._TypingEllipsis else
- () if a is typing._TypingEmpty else
- a for a in args) if args else None
- self.__next_in_mro__ = _next_in_mro(self)
- if orig_bases is None:
- self.__orig_bases__ = initial_bases
- elif origin is not None:
- self._abc_registry = origin._abc_registry
- self._abc_cache = origin._abc_cache
- if hasattr(self, '_subs_tree'):
- self.__tree_hash__ = (hash(self._subs_tree()) if origin else
- super(GenericMeta, self).__hash__())
- return self
-
- def __init__(cls, *args, **kwargs):
- super().__init__(*args, **kwargs)
- if not cls.__dict__.get('_is_protocol', None):
- cls._is_protocol = any(b is Protocol or
- isinstance(b, _ProtocolMeta) and
- b.__origin__ is Protocol
- for b in cls.__bases__)
- if cls._is_protocol:
- for base in cls.__mro__[1:]:
- if not (base in (object, typing.Generic) or
- base.__module__ == 'collections.abc' and
- base.__name__ in _PROTO_WHITELIST or
- isinstance(base, typing.TypingMeta) and base._is_protocol or
- isinstance(base, GenericMeta) and
- base.__origin__ is typing.Generic):
- raise TypeError(f'Protocols can only inherit from other'
- f' protocols, got {repr(base)}')
-
- cls.__init__ = _no_init
-
- def _proto_hook(other):
- if not cls.__dict__.get('_is_protocol', None):
- return NotImplemented
- if not isinstance(other, type):
- # Same error as for issubclass(1, int)
- raise TypeError('issubclass() arg 1 must be a class')
- for attr in _get_protocol_attrs(cls):
- for base in other.__mro__:
- if attr in base.__dict__:
- if base.__dict__[attr] is None:
- return NotImplemented
- break
- annotations = getattr(base, '__annotations__', {})
- if (isinstance(annotations, typing.Mapping) and
- attr in annotations and
- isinstance(other, _ProtocolMeta) and
- other._is_protocol):
- break
- else:
- return NotImplemented
- return True
- if '__subclasshook__' not in cls.__dict__:
- cls.__subclasshook__ = _proto_hook
-
- def __instancecheck__(self, instance):
- # We need this method for situations where attributes are
- # assigned in __init__.
- if ((not getattr(self, '_is_protocol', False) or
- _is_callable_members_only(self)) and
- issubclass(instance.__class__, self)):
- return True
- if self._is_protocol:
- if all(hasattr(instance, attr) and
- (not callable(getattr(self, attr, None)) or
- getattr(instance, attr) is not None)
- for attr in _get_protocol_attrs(self)):
- return True
- return super(GenericMeta, self).__instancecheck__(instance)
-
- def __subclasscheck__(self, cls):
- if self.__origin__ is not None:
- if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
- raise TypeError("Parameterized generics cannot be used with class "
- "or instance checks")
- return False
- if (self.__dict__.get('_is_protocol', None) and
- not self.__dict__.get('_is_runtime_protocol', None)):
- if sys._getframe(1).f_globals['__name__'] in ['abc',
- 'functools',
- 'typing']:
- return False
- raise TypeError("Instance and class checks can only be used with"
- " @runtime protocols")
- if (self.__dict__.get('_is_runtime_protocol', None) and
- not _is_callable_members_only(self)):
- if sys._getframe(1).f_globals['__name__'] in ['abc',
- 'functools',
- 'typing']:
- return super(GenericMeta, self).__subclasscheck__(cls)
- raise TypeError("Protocols with non-method members"
- " don't support issubclass()")
- return super(GenericMeta, self).__subclasscheck__(cls)
-
- @typing._tp_cache
- def __getitem__(self, params):
- # We also need to copy this from GenericMeta.__getitem__ to get
- # special treatment of "Protocol". (Comments removed for brevity.)
- if not isinstance(params, tuple):
- params = (params,)
- if not params and _gorg(self) is not typing.Tuple:
- raise TypeError(
- f"Parameter list to {self.__qualname__}[...] cannot be empty")
- msg = "Parameters to generic types must be types."
- params = tuple(_type_check(p, msg) for p in params)
- if self in (typing.Generic, Protocol):
- if not all(isinstance(p, typing.TypeVar) for p in params):
- raise TypeError(
- f"Parameters to {repr(self)}[...] must all be type variables")
- if len(set(params)) != len(params):
- raise TypeError(
- f"Parameters to {repr(self)}[...] must all be unique")
- tvars = params
- args = params
- elif self in (typing.Tuple, typing.Callable):
- tvars = _type_vars(params)
- args = params
- elif self.__origin__ in (typing.Generic, Protocol):
- raise TypeError(f"Cannot subscript already-subscripted {repr(self)}")
- else:
- _check_generic(self, params)
- tvars = _type_vars(params)
- args = params
-
- prepend = (self,) if self.__origin__ is None else ()
- return self.__class__(self.__name__,
- prepend + self.__bases__,
- _no_slots_copy(self.__dict__),
- tvars=tvars,
- args=args,
- origin=self,
- extra=self.__extra__,
- orig_bases=self.__orig_bases__)
-
- class Protocol(metaclass=_ProtocolMeta):
- """Base class for protocol classes. Protocol classes are defined as::
-
- class Proto(Protocol):
- def meth(self) -> int:
- ...
-
- Such classes are primarily used with static type checkers that recognize
- structural subtyping (static duck-typing), for example::
-
- class C:
- def meth(self) -> int:
- return 0
-
- def func(x: Proto) -> int:
- return x.meth()
-
- func(C()) # Passes static type check
-
- See PEP 544 for details. Protocol classes decorated with
- @typing_extensions.runtime act as simple-minded runtime protocol that checks
- only the presence of given attributes, ignoring their type signatures.
-
- Protocol classes can be generic, they are defined as::
-
- class GenProto(Protocol[T]):
- def meth(self) -> T:
- ...
- """
- __slots__ = ()
- _is_protocol = True
-
- def __new__(cls, *args, **kwds):
- if _gorg(cls) is Protocol:
- raise TypeError("Type Protocol cannot be instantiated; "
- "it can be used only as a base class")
- return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds)
-
-
-# 3.8+
-if hasattr(typing, 'runtime_checkable'):
- runtime_checkable = typing.runtime_checkable
-# 3.6-3.7
-else:
- def runtime_checkable(cls):
- """Mark a protocol class as a runtime protocol, so that it
- can be used with isinstance() and issubclass(). Raise TypeError
- if applied to a non-protocol class.
-
- This allows a simple-minded structural check very similar to the
- one-offs in collections.abc such as Hashable.
- """
- if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
- raise TypeError('@runtime_checkable can be only applied to protocol classes,'
- f' got {cls!r}')
- cls._is_runtime_protocol = True
- return cls
-
-
-# Exists for backwards compatibility.
-runtime = runtime_checkable
-
-
-# 3.8+
-if hasattr(typing, 'SupportsIndex'):
- SupportsIndex = typing.SupportsIndex
-# 3.6-3.7
-else:
- @runtime_checkable
- class SupportsIndex(Protocol):
- __slots__ = ()
-
- @abc.abstractmethod
- def __index__(self) -> int:
- pass
-
-
-if sys.version_info >= (3, 9, 2):
- # The standard library TypedDict in Python 3.8 does not store runtime information
- # about which (if any) keys are optional. See https://bugs.python.org/issue38834
- # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
- # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
- TypedDict = typing.TypedDict
-else:
- def _check_fails(cls, other):
- try:
- if sys._getframe(1).f_globals['__name__'] not in ['abc',
- 'functools',
- 'typing']:
- # Typed dicts are only for static structural subtyping.
- raise TypeError('TypedDict does not support instance and class checks')
- except (AttributeError, ValueError):
- pass
- return False
-
- def _dict_new(*args, **kwargs):
- if not args:
- raise TypeError('TypedDict.__new__(): not enough arguments')
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
- return dict(*args, **kwargs)
-
- _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
-
- def _typeddict_new(*args, total=True, **kwargs):
- if not args:
- raise TypeError('TypedDict.__new__(): not enough arguments')
- _, args = args[0], args[1:] # allow the "cls" keyword be passed
- if args:
- typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
- elif '_typename' in kwargs:
- typename = kwargs.pop('_typename')
- import warnings
- warnings.warn("Passing '_typename' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- raise TypeError("TypedDict.__new__() missing 1 required positional "
- "argument: '_typename'")
- if args:
- try:
- fields, = args # allow the "_fields" keyword be passed
- except ValueError:
- raise TypeError('TypedDict.__new__() takes from 2 to 3 '
- f'positional arguments but {len(args) + 2} '
- 'were given')
- elif '_fields' in kwargs and len(kwargs) == 1:
- fields = kwargs.pop('_fields')
- import warnings
- warnings.warn("Passing '_fields' as keyword argument is deprecated",
- DeprecationWarning, stacklevel=2)
- else:
- fields = None
-
- if fields is None:
- fields = kwargs
- elif kwargs:
- raise TypeError("TypedDict takes either a dict or keyword arguments,"
- " but not both")
-
- ns = {'__annotations__': dict(fields)}
- try:
- # Setting correct module is necessary to make typed dict classes pickleable.
- ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
- except (AttributeError, ValueError):
- pass
-
- return _TypedDictMeta(typename, (), ns, total=total)
-
- _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
- ' /, *, total=True, **kwargs)')
-
- class _TypedDictMeta(type):
- def __init__(cls, name, bases, ns, total=True):
- super().__init__(name, bases, ns)
-
- def __new__(cls, name, bases, ns, total=True):
- # Create new typed dict class object.
- # This method is called directly when TypedDict is subclassed,
- # or via _typeddict_new when TypedDict is instantiated. This way
- # TypedDict supports all three syntaxes described in its docstring.
- # Subclasses and instances of TypedDict return actual dictionaries
- # via _dict_new.
- ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
- tp_dict = super().__new__(cls, name, (dict,), ns)
-
- annotations = {}
- own_annotations = ns.get('__annotations__', {})
- own_annotation_keys = set(own_annotations.keys())
- msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
- own_annotations = {
- n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
- }
- required_keys = set()
- optional_keys = set()
-
- for base in bases:
- annotations.update(base.__dict__.get('__annotations__', {}))
- required_keys.update(base.__dict__.get('__required_keys__', ()))
- optional_keys.update(base.__dict__.get('__optional_keys__', ()))
-
- annotations.update(own_annotations)
- if total:
- required_keys.update(own_annotation_keys)
- else:
- optional_keys.update(own_annotation_keys)
-
- tp_dict.__annotations__ = annotations
- tp_dict.__required_keys__ = frozenset(required_keys)
- tp_dict.__optional_keys__ = frozenset(optional_keys)
- if not hasattr(tp_dict, '__total__'):
- tp_dict.__total__ = total
- return tp_dict
-
- __instancecheck__ = __subclasscheck__ = _check_fails
-
- TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
- TypedDict.__module__ = __name__
- TypedDict.__doc__ = \
- """A simple typed name space. At runtime it is equivalent to a plain dict.
-
- TypedDict creates a dictionary type that expects all of its
- instances to have a certain set of keys, with each key
- associated with a value of a consistent type. This expectation
- is not checked at runtime but is only enforced by type checkers.
- Usage::
-
- class Point2D(TypedDict):
- x: int
- y: int
- label: str
-
- a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
- b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
-
- assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
-
- The type info can be accessed via the Point2D.__annotations__ dict, and
- the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
- TypedDict supports two additional equivalent forms::
-
- Point2D = TypedDict('Point2D', x=int, y=int, label=str)
- Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
-
- The class syntax is only supported in Python 3.6+, while two other
- syntax forms work for Python 2.7 and 3.2+
- """
-
-
-# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
-if hasattr(typing, 'Annotated'):
- Annotated = typing.Annotated
- get_type_hints = typing.get_type_hints
- # Not exported and not a public API, but needed for get_origin() and get_args()
- # to work.
- _AnnotatedAlias = typing._AnnotatedAlias
-# 3.7-3.8
-elif PEP_560:
- class _AnnotatedAlias(typing._GenericAlias, _root=True):
- """Runtime representation of an annotated type.
-
- At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
- with extra annotations. The alias behaves like a normal typing alias,
- instantiating is the same as instantiating the underlying type, binding
- it to types is also the same.
- """
- def __init__(self, origin, metadata):
- if isinstance(origin, _AnnotatedAlias):
- metadata = origin.__metadata__ + metadata
- origin = origin.__origin__
- super().__init__(origin, origin)
- self.__metadata__ = metadata
-
- def copy_with(self, params):
- assert len(params) == 1
- new_type = params[0]
- return _AnnotatedAlias(new_type, self.__metadata__)
-
- def __repr__(self):
- return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
- f"{', '.join(repr(a) for a in self.__metadata__)}]")
-
- def __reduce__(self):
- return operator.getitem, (
- Annotated, (self.__origin__,) + self.__metadata__
- )
-
- def __eq__(self, other):
- if not isinstance(other, _AnnotatedAlias):
- return NotImplemented
- if self.__origin__ != other.__origin__:
- return False
- return self.__metadata__ == other.__metadata__
-
- def __hash__(self):
- return hash((self.__origin__, self.__metadata__))
-
- class Annotated:
- """Add context specific metadata to a type.
-
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
- hypothetical runtime_check module that this type is an unsigned int.
- Every other consumer of this type can ignore this metadata and treat
- this type as int.
-
- The first argument to Annotated must be a valid type (and will be in
- the __origin__ field), the remaining arguments are kept as a tuple in
- the __extra__ field.
-
- Details:
-
- - It's an error to call `Annotated` with less than two arguments.
- - Nested Annotated are flattened::
-
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
- - Instantiating an annotated type is equivalent to instantiating the
- underlying type::
-
- Annotated[C, Ann1](5) == C(5)
-
- - Annotated can be used as a generic type alias::
-
- Optimized = Annotated[T, runtime.Optimize()]
- Optimized[int] == Annotated[int, runtime.Optimize()]
-
- OptimizedList = Annotated[List[T], runtime.Optimize()]
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
- """
-
- __slots__ = ()
-
- def __new__(cls, *args, **kwargs):
- raise TypeError("Type Annotated cannot be instantiated.")
-
- @typing._tp_cache
- def __class_getitem__(cls, params):
- if not isinstance(params, tuple) or len(params) < 2:
- raise TypeError("Annotated[...] should be used "
- "with at least two arguments (a type and an "
- "annotation).")
- msg = "Annotated[t, ...]: t must be a type."
- origin = typing._type_check(params[0], msg)
- metadata = tuple(params[1:])
- return _AnnotatedAlias(origin, metadata)
-
- def __init_subclass__(cls, *args, **kwargs):
- raise TypeError(
- f"Cannot subclass {cls.__module__}.Annotated"
- )
-
- def _strip_annotations(t):
- """Strips the annotations from a given type.
- """
- if isinstance(t, _AnnotatedAlias):
- return _strip_annotations(t.__origin__)
- if isinstance(t, typing._GenericAlias):
- stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
- if stripped_args == t.__args__:
- return t
- res = t.copy_with(stripped_args)
- res._special = t._special
- return res
- return t
-
- def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
- """Return type hints for an object.
-
- This is often the same as obj.__annotations__, but it handles
- forward references encoded as string literals, adds Optional[t] if a
- default value equal to None is set and recursively replaces all
- 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
-
- The argument may be a module, class, method, or function. The annotations
- are returned as a dictionary. For classes, annotations include also
- inherited members.
-
- TypeError is raised if the argument is not of a type that can contain
- annotations, and an empty dictionary is returned if no annotations are
- present.
-
- BEWARE -- the behavior of globalns and localns is counterintuitive
- (unless you are familiar with how eval() and exec() work). The
- search order is locals first, then globals.
-
- - If no dict arguments are passed, an attempt is made to use the
- globals from obj (or the respective module's globals for classes),
- and these are also used as the locals. If the object does not appear
- to have globals, an empty dictionary is used.
-
- - If one dict argument is passed, it is used for both globals and
- locals.
-
- - If two dict arguments are passed, they specify globals and
- locals, respectively.
- """
- hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
- if include_extras:
- return hint
- return {k: _strip_annotations(t) for k, t in hint.items()}
-# 3.6
-else:
-
- def _is_dunder(name):
- """Returns True if name is a __dunder_variable_name__."""
- return len(name) > 4 and name.startswith('__') and name.endswith('__')
-
- # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
- # checks, argument expansion etc. are done on the _subs_tre. As a result we
- # can't provide a get_type_hints function that strips out annotations.
-
- class AnnotatedMeta(typing.GenericMeta):
- """Metaclass for Annotated"""
-
- def __new__(cls, name, bases, namespace, **kwargs):
- if any(b is not object for b in bases):
- raise TypeError("Cannot subclass " + str(Annotated))
- return super().__new__(cls, name, bases, namespace, **kwargs)
-
- @property
- def __metadata__(self):
- return self._subs_tree()[2]
-
- def _tree_repr(self, tree):
- cls, origin, metadata = tree
- if not isinstance(origin, tuple):
- tp_repr = typing._type_repr(origin)
- else:
- tp_repr = origin[0]._tree_repr(origin)
- metadata_reprs = ", ".join(repr(arg) for arg in metadata)
- return f'{cls}[{tp_repr}, {metadata_reprs}]'
-
- def _subs_tree(self, tvars=None, args=None): # noqa
- if self is Annotated:
- return Annotated
- res = super()._subs_tree(tvars=tvars, args=args)
- # Flatten nested Annotated
- if isinstance(res[1], tuple) and res[1][0] is Annotated:
- sub_tp = res[1][1]
- sub_annot = res[1][2]
- return (Annotated, sub_tp, sub_annot + res[2])
- return res
-
- def _get_cons(self):
- """Return the class used to create instance of this type."""
- if self.__origin__ is None:
- raise TypeError("Cannot get the underlying type of a "
- "non-specialized Annotated type.")
- tree = self._subs_tree()
- while isinstance(tree, tuple) and tree[0] is Annotated:
- tree = tree[1]
- if isinstance(tree, tuple):
- return tree[0]
- else:
- return tree
-
- @typing._tp_cache
- def __getitem__(self, params):
- if not isinstance(params, tuple):
- params = (params,)
- if self.__origin__ is not None: # specializing an instantiated type
- return super().__getitem__(params)
- elif not isinstance(params, tuple) or len(params) < 2:
- raise TypeError("Annotated[...] should be instantiated "
- "with at least two arguments (a type and an "
- "annotation).")
- else:
- msg = "Annotated[t, ...]: t must be a type."
- tp = typing._type_check(params[0], msg)
- metadata = tuple(params[1:])
- return self.__class__(
- self.__name__,
- self.__bases__,
- _no_slots_copy(self.__dict__),
- tvars=_type_vars((tp,)),
- # Metadata is a tuple so it won't be touched by _replace_args et al.
- args=(tp, metadata),
- origin=self,
- )
-
- def __call__(self, *args, **kwargs):
- cons = self._get_cons()
- result = cons(*args, **kwargs)
- try:
- result.__orig_class__ = self
- except AttributeError:
- pass
- return result
-
- def __getattr__(self, attr):
- # For simplicity we just don't relay all dunder names
- if self.__origin__ is not None and not _is_dunder(attr):
- return getattr(self._get_cons(), attr)
- raise AttributeError(attr)
-
- def __setattr__(self, attr, value):
- if _is_dunder(attr) or attr.startswith('_abc_'):
- super().__setattr__(attr, value)
- elif self.__origin__ is None:
- raise AttributeError(attr)
- else:
- setattr(self._get_cons(), attr, value)
-
- def __instancecheck__(self, obj):
- raise TypeError("Annotated cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Annotated cannot be used with issubclass().")
-
- class Annotated(metaclass=AnnotatedMeta):
- """Add context specific metadata to a type.
-
- Example: Annotated[int, runtime_check.Unsigned] indicates to the
- hypothetical runtime_check module that this type is an unsigned int.
- Every other consumer of this type can ignore this metadata and treat
- this type as int.
-
- The first argument to Annotated must be a valid type, the remaining
- arguments are kept as a tuple in the __metadata__ field.
-
- Details:
-
- - It's an error to call `Annotated` with less than two arguments.
- - Nested Annotated are flattened::
-
- Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
-
- - Instantiating an annotated type is equivalent to instantiating the
- underlying type::
-
- Annotated[C, Ann1](5) == C(5)
-
- - Annotated can be used as a generic type alias::
-
- Optimized = Annotated[T, runtime.Optimize()]
- Optimized[int] == Annotated[int, runtime.Optimize()]
-
- OptimizedList = Annotated[List[T], runtime.Optimize()]
- OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
- """
-
-# Python 3.8 has get_origin() and get_args() but those implementations aren't
-# Annotated-aware, so we can't use those. Python 3.9's versions don't support
-# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
-if sys.version_info[:2] >= (3, 10):
- get_origin = typing.get_origin
- get_args = typing.get_args
-# 3.7-3.9
-elif PEP_560:
- try:
- # 3.9+
- from typing import _BaseGenericAlias
- except ImportError:
- _BaseGenericAlias = typing._GenericAlias
- try:
- # 3.9+
- from typing import GenericAlias
- except ImportError:
- GenericAlias = typing._GenericAlias
-
- def get_origin(tp):
- """Get the unsubscripted version of a type.
-
- This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
- and Annotated. Return None for unsupported types. Examples::
-
- get_origin(Literal[42]) is Literal
- get_origin(int) is None
- get_origin(ClassVar[int]) is ClassVar
- get_origin(Generic) is Generic
- get_origin(Generic[T]) is Generic
- get_origin(Union[T, int]) is Union
- get_origin(List[Tuple[T, T]][int]) == list
- get_origin(P.args) is P
- """
- if isinstance(tp, _AnnotatedAlias):
- return Annotated
- if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias,
- ParamSpecArgs, ParamSpecKwargs)):
- return tp.__origin__
- if tp is typing.Generic:
- return typing.Generic
- return None
-
- def get_args(tp):
- """Get type arguments with all substitutions performed.
-
- For unions, basic simplifications used by Union constructor are performed.
- Examples::
- get_args(Dict[str, int]) == (str, int)
- get_args(int) == ()
- get_args(Union[int, Union[T, int], str][int]) == (int, str)
- get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
- get_args(Callable[[], T][int]) == ([], int)
- """
- if isinstance(tp, _AnnotatedAlias):
- return (tp.__origin__,) + tp.__metadata__
- if isinstance(tp, (typing._GenericAlias, GenericAlias)):
- if getattr(tp, "_special", False):
- return ()
- res = tp.__args__
- if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
- res = (list(res[:-1]), res[-1])
- return res
- return ()
-
-
-# 3.10+
-if hasattr(typing, 'TypeAlias'):
- TypeAlias = typing.TypeAlias
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
- class _TypeAliasForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- @_TypeAliasForm
- def TypeAlias(self, parameters):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
- raise TypeError(f"{self} is not subscriptable")
-# 3.7-3.8
-elif sys.version_info[:2] >= (3, 7):
- class _TypeAliasForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- TypeAlias = _TypeAliasForm('TypeAlias',
- doc="""Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example
- above.""")
-# 3.6
-else:
- class _TypeAliasMeta(typing.TypingMeta):
- """Metaclass for TypeAlias"""
-
- def __repr__(self):
- return 'typing_extensions.TypeAlias'
-
- class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
- """Special marker indicating that an assignment should
- be recognized as a proper type alias definition by type
- checkers.
-
- For example::
-
- Predicate: TypeAlias = Callable[..., bool]
-
- It's invalid when used anywhere except as in the example above.
- """
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("TypeAlias cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("TypeAlias cannot be used with issubclass().")
-
- def __repr__(self):
- return 'typing_extensions.TypeAlias'
-
- TypeAlias = _TypeAliasBase(_root=True)
-
-
-# Python 3.10+ has PEP 612
-if hasattr(typing, 'ParamSpecArgs'):
- ParamSpecArgs = typing.ParamSpecArgs
- ParamSpecKwargs = typing.ParamSpecKwargs
-# 3.6-3.9
-else:
- class _Immutable:
- """Mixin to indicate that object should not be copied."""
- __slots__ = ()
-
- def __copy__(self):
- return self
-
- def __deepcopy__(self, memo):
- return self
-
- class ParamSpecArgs(_Immutable):
- """The args for a ParamSpec object.
-
- Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
-
- ParamSpecArgs objects have a reference back to their ParamSpec:
-
- P.args.__origin__ is P
-
- This type is meant for runtime introspection and has no special meaning to
- static type checkers.
- """
- def __init__(self, origin):
- self.__origin__ = origin
-
- def __repr__(self):
- return f"{self.__origin__.__name__}.args"
-
- class ParamSpecKwargs(_Immutable):
- """The kwargs for a ParamSpec object.
-
- Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
-
- ParamSpecKwargs objects have a reference back to their ParamSpec:
-
- P.kwargs.__origin__ is P
-
- This type is meant for runtime introspection and has no special meaning to
- static type checkers.
- """
- def __init__(self, origin):
- self.__origin__ = origin
-
- def __repr__(self):
- return f"{self.__origin__.__name__}.kwargs"
-
-# 3.10+
-if hasattr(typing, 'ParamSpec'):
- ParamSpec = typing.ParamSpec
-# 3.6-3.9
-else:
-
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
- class ParamSpec(list):
- """Parameter specification variable.
-
- Usage::
-
- P = ParamSpec('P')
-
- Parameter specification variables exist primarily for the benefit of static
- type checkers. They are used to forward the parameter types of one
- callable to another callable, a pattern commonly found in higher order
- functions and decorators. They are only valid when used in ``Concatenate``,
- or s the first argument to ``Callable``. In Python 3.10 and higher,
- they are also supported in user-defined Generics at runtime.
- See class Generic for more information on generic types. An
- example for annotating a decorator::
-
- T = TypeVar('T')
- P = ParamSpec('P')
-
- def add_logging(f: Callable[P, T]) -> Callable[P, T]:
- '''A type-safe decorator to add logging to a function.'''
- def inner(*args: P.args, **kwargs: P.kwargs) -> T:
- logging.info(f'{f.__name__} was called')
- return f(*args, **kwargs)
- return inner
-
- @add_logging
- def add_two(x: float, y: float) -> float:
- '''Add two numbers together.'''
- return x + y
-
- Parameter specification variables defined with covariant=True or
- contravariant=True can be used to declare covariant or contravariant
- generic types. These keyword arguments are valid, but their actual semantics
- are yet to be decided. See PEP 612 for details.
-
- Parameter specification variables can be introspected. e.g.:
-
- P.__name__ == 'T'
- P.__bound__ == None
- P.__covariant__ == False
- P.__contravariant__ == False
-
- Note that only parameter specification variables defined in global scope can
- be pickled.
- """
-
- # Trick Generic __parameters__.
- __class__ = typing.TypeVar
-
- @property
- def args(self):
- return ParamSpecArgs(self)
-
- @property
- def kwargs(self):
- return ParamSpecKwargs(self)
-
- def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
- super().__init__([self])
- self.__name__ = name
- self.__covariant__ = bool(covariant)
- self.__contravariant__ = bool(contravariant)
- if bound:
- self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
- else:
- self.__bound__ = None
-
- # for pickling:
- try:
- def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
- except (AttributeError, ValueError):
- def_mod = None
- if def_mod != 'typing_extensions':
- self.__module__ = def_mod
-
- def __repr__(self):
- if self.__covariant__:
- prefix = '+'
- elif self.__contravariant__:
- prefix = '-'
- else:
- prefix = '~'
- return prefix + self.__name__
-
- def __hash__(self):
- return object.__hash__(self)
-
- def __eq__(self, other):
- return self is other
-
- def __reduce__(self):
- return self.__name__
-
- # Hack to get typing._type_check to pass.
- def __call__(self, *args, **kwargs):
- pass
-
- if not PEP_560:
- # Only needed in 3.6.
- def _get_type_vars(self, tvars):
- if self not in tvars:
- tvars.append(self)
-
-
-# 3.6-3.9
-if not hasattr(typing, 'Concatenate'):
- # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
- class _ConcatenateGenericAlias(list):
-
- # Trick Generic into looking into this for __parameters__.
- if PEP_560:
- __class__ = typing._GenericAlias
- else:
- __class__ = typing._TypingBase
-
- # Flag in 3.8.
- _special = False
- # Attribute in 3.6 and earlier.
- _gorg = typing.Generic
-
- def __init__(self, origin, args):
- super().__init__(args)
- self.__origin__ = origin
- self.__args__ = args
-
- def __repr__(self):
- _type_repr = typing._type_repr
- return (f'{_type_repr(self.__origin__)}'
- f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
-
- def __hash__(self):
- return hash((self.__origin__, self.__args__))
-
- # Hack to get typing._type_check to pass in Generic.
- def __call__(self, *args, **kwargs):
- pass
-
- @property
- def __parameters__(self):
- return tuple(
- tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
- )
-
- if not PEP_560:
- # Only required in 3.6.
- def _get_type_vars(self, tvars):
- if self.__origin__ and self.__parameters__:
- typing._get_type_vars(self.__parameters__, tvars)
-
-
-# 3.6-3.9
-@typing._tp_cache
-def _concatenate_getitem(self, parameters):
- if parameters == ():
- raise TypeError("Cannot take a Concatenate of no types.")
- if not isinstance(parameters, tuple):
- parameters = (parameters,)
- if not isinstance(parameters[-1], ParamSpec):
- raise TypeError("The last parameter to Concatenate should be a "
- "ParamSpec variable.")
- msg = "Concatenate[arg, ...]: each arg must be a type."
- parameters = tuple(typing._type_check(p, msg) for p in parameters)
- return _ConcatenateGenericAlias(self, parameters)
-
-
-# 3.10+
-if hasattr(typing, 'Concatenate'):
- Concatenate = typing.Concatenate
- _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
- @_TypeAliasForm
- def Concatenate(self, parameters):
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
- higher order function which adds, removes or transforms parameters of a
- callable.
-
- For example::
-
- Callable[Concatenate[int, P], int]
-
- See PEP 612 for detailed information.
- """
- return _concatenate_getitem(self, parameters)
-# 3.7-8
-elif sys.version_info[:2] >= (3, 7):
- class _ConcatenateForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- return _concatenate_getitem(self, parameters)
-
- Concatenate = _ConcatenateForm(
- 'Concatenate',
- doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
- higher order function which adds, removes or transforms parameters of a
- callable.
-
- For example::
-
- Callable[Concatenate[int, P], int]
-
- See PEP 612 for detailed information.
- """)
-# 3.6
-else:
- class _ConcatenateAliasMeta(typing.TypingMeta):
- """Metaclass for Concatenate."""
-
- def __repr__(self):
- return 'typing_extensions.Concatenate'
-
- class _ConcatenateAliasBase(typing._FinalTypingBase,
- metaclass=_ConcatenateAliasMeta,
- _root=True):
- """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
- higher order function which adds, removes or transforms parameters of a
- callable.
-
- For example::
-
- Callable[Concatenate[int, P], int]
-
- See PEP 612 for detailed information.
- """
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError("Concatenate cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError("Concatenate cannot be used with issubclass().")
-
- def __repr__(self):
- return 'typing_extensions.Concatenate'
-
- def __getitem__(self, parameters):
- return _concatenate_getitem(self, parameters)
-
- Concatenate = _ConcatenateAliasBase(_root=True)
-
-# 3.10+
-if hasattr(typing, 'TypeGuard'):
- TypeGuard = typing.TypeGuard
-# 3.9
-elif sys.version_info[:2] >= (3, 9):
- class _TypeGuardForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- @_TypeGuardForm
- def TypeGuard(self, parameters):
- """Special typing form used to annotate the return type of a user-defined
- type guard function. ``TypeGuard`` only accepts a single type argument.
- At runtime, functions marked this way should return a boolean.
-
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
- type checkers to determine a more precise type of an expression within a
- program's code flow. Usually type narrowing is done by analyzing
- conditional code flow and applying the narrowing to a block of code. The
- conditional expression here is sometimes referred to as a "type guard".
-
- Sometimes it would be convenient to use a user-defined boolean function
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
- return type to alert static type checkers to this intention.
-
- Using ``-> TypeGuard`` tells the static type checker that for a given
- function:
-
- 1. The return value is a boolean.
- 2. If the return value is ``True``, the type of its argument
- is the type inside ``TypeGuard``.
-
- For example::
-
- def is_str(val: Union[str, float]):
- # "isinstance" type guard
- if isinstance(val, str):
- # Type of ``val`` is narrowed to ``str``
- ...
- else:
- # Else, type of ``val`` is narrowed to ``float``.
- ...
-
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
- form of ``TypeA`` (it can even be a wider form) and this may lead to
- type-unsafe results. The main reason is to allow for things like
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
- a subtype of the former, since ``List`` is invariant. The responsibility of
- writing type-safe type guards is left to the user.
-
- ``TypeGuard`` also works with type variables. For more information, see
- PEP 647 (User-Defined Type Guards).
- """
- item = typing._type_check(parameters, f'{self} accepts only single type.')
- return typing._GenericAlias(self, (item,))
-# 3.7-3.8
-elif sys.version_info[:2] >= (3, 7):
- class _TypeGuardForm(typing._SpecialForm, _root=True):
-
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(parameters,
- f'{self._name} accepts only a single type')
- return typing._GenericAlias(self, (item,))
-
- TypeGuard = _TypeGuardForm(
- 'TypeGuard',
- doc="""Special typing form used to annotate the return type of a user-defined
- type guard function. ``TypeGuard`` only accepts a single type argument.
- At runtime, functions marked this way should return a boolean.
-
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
- type checkers to determine a more precise type of an expression within a
- program's code flow. Usually type narrowing is done by analyzing
- conditional code flow and applying the narrowing to a block of code. The
- conditional expression here is sometimes referred to as a "type guard".
-
- Sometimes it would be convenient to use a user-defined boolean function
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
- return type to alert static type checkers to this intention.
-
- Using ``-> TypeGuard`` tells the static type checker that for a given
- function:
-
- 1. The return value is a boolean.
- 2. If the return value is ``True``, the type of its argument
- is the type inside ``TypeGuard``.
-
- For example::
-
- def is_str(val: Union[str, float]):
- # "isinstance" type guard
- if isinstance(val, str):
- # Type of ``val`` is narrowed to ``str``
- ...
- else:
- # Else, type of ``val`` is narrowed to ``float``.
- ...
-
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
- form of ``TypeA`` (it can even be a wider form) and this may lead to
- type-unsafe results. The main reason is to allow for things like
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
- a subtype of the former, since ``List`` is invariant. The responsibility of
- writing type-safe type guards is left to the user.
-
- ``TypeGuard`` also works with type variables. For more information, see
- PEP 647 (User-Defined Type Guards).
- """)
-# 3.6
-else:
- class _TypeGuard(typing._FinalTypingBase, _root=True):
- """Special typing form used to annotate the return type of a user-defined
- type guard function. ``TypeGuard`` only accepts a single type argument.
- At runtime, functions marked this way should return a boolean.
-
- ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
- type checkers to determine a more precise type of an expression within a
- program's code flow. Usually type narrowing is done by analyzing
- conditional code flow and applying the narrowing to a block of code. The
- conditional expression here is sometimes referred to as a "type guard".
-
- Sometimes it would be convenient to use a user-defined boolean function
- as a type guard. Such a function should use ``TypeGuard[...]`` as its
- return type to alert static type checkers to this intention.
-
- Using ``-> TypeGuard`` tells the static type checker that for a given
- function:
-
- 1. The return value is a boolean.
- 2. If the return value is ``True``, the type of its argument
- is the type inside ``TypeGuard``.
-
- For example::
-
- def is_str(val: Union[str, float]):
- # "isinstance" type guard
- if isinstance(val, str):
- # Type of ``val`` is narrowed to ``str``
- ...
- else:
- # Else, type of ``val`` is narrowed to ``float``.
- ...
-
- Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
- form of ``TypeA`` (it can even be a wider form) and this may lead to
- type-unsafe results. The main reason is to allow for things like
- narrowing ``List[object]`` to ``List[str]`` even though the latter is not
- a subtype of the former, since ``List`` is invariant. The responsibility of
- writing type-safe type guards is left to the user.
-
- ``TypeGuard`` also works with type variables. For more information, see
- PEP 647 (User-Defined Type Guards).
- """
-
- __slots__ = ('__type__',)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(typing._type_check(item,
- f'{cls.__name__[1:]} accepts only a single type.'),
- _root=True)
- raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted')
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += f'[{typing._type_repr(self.__type__)}]'
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, _TypeGuard):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- TypeGuard = _TypeGuard(_root=True)
-
-if hasattr(typing, "Self"):
- Self = typing.Self
-elif sys.version_info[:2] >= (3, 7):
- # Vendored from cpython typing._SpecialFrom
- class _SpecialForm(typing._Final, _root=True):
- __slots__ = ('_name', '__doc__', '_getitem')
-
- def __init__(self, getitem):
- self._getitem = getitem
- self._name = getitem.__name__
- self.__doc__ = getitem.__doc__
-
- def __getattr__(self, item):
- if item in {'__name__', '__qualname__'}:
- return self._name
-
- raise AttributeError(item)
-
- def __mro_entries__(self, bases):
- raise TypeError(f"Cannot subclass {self!r}")
-
- def __repr__(self):
- return f'typing_extensions.{self._name}'
-
- def __reduce__(self):
- return self._name
-
- def __call__(self, *args, **kwds):
- raise TypeError(f"Cannot instantiate {self!r}")
-
- def __or__(self, other):
- return typing.Union[self, other]
-
- def __ror__(self, other):
- return typing.Union[other, self]
-
- def __instancecheck__(self, obj):
- raise TypeError(f"{self} cannot be used with isinstance()")
-
- def __subclasscheck__(self, cls):
- raise TypeError(f"{self} cannot be used with issubclass()")
-
- @typing._tp_cache
- def __getitem__(self, parameters):
- return self._getitem(self, parameters)
-
- @_SpecialForm
- def Self(self, params):
- """Used to spell the type of "self" in classes.
-
- Example::
-
- from typing import Self
-
- class ReturnsSelf:
- def parse(self, data: bytes) -> Self:
- ...
- return self
-
- """
-
- raise TypeError(f"{self} is not subscriptable")
-else:
- class _Self(typing._FinalTypingBase, _root=True):
- """Used to spell the type of "self" in classes.
-
- Example::
-
- from typing import Self
-
- class ReturnsSelf:
- def parse(self, data: bytes) -> Self:
- ...
- return self
-
- """
-
- __slots__ = ()
-
- def __instancecheck__(self, obj):
- raise TypeError(f"{self} cannot be used with isinstance().")
-
- def __subclasscheck__(self, cls):
- raise TypeError(f"{self} cannot be used with issubclass().")
-
- Self = _Self(_root=True)
-
-
-if hasattr(typing, 'Required'):
- Required = typing.Required
- NotRequired = typing.NotRequired
-elif sys.version_info[:2] >= (3, 9):
- class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- @_ExtensionsSpecialForm
- def Required(self, parameters):
- """A special typing construct to mark a key of a total=False TypedDict
- as required. For example:
-
- class Movie(TypedDict, total=False):
- title: Required[str]
- year: int
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
-
- There is no runtime checking that a required key is actually provided
- when instantiating a related TypedDict.
- """
- item = typing._type_check(parameters, f'{self._name} accepts only single type')
- return typing._GenericAlias(self, (item,))
-
- @_ExtensionsSpecialForm
- def NotRequired(self, parameters):
- """A special typing construct to mark a key of a TypedDict as
- potentially missing. For example:
-
- class Movie(TypedDict):
- title: str
- year: NotRequired[int]
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
- """
- item = typing._type_check(parameters, f'{self._name} accepts only single type')
- return typing._GenericAlias(self, (item,))
-
-elif sys.version_info[:2] >= (3, 7):
- class _RequiredForm(typing._SpecialForm, _root=True):
- def __repr__(self):
- return 'typing_extensions.' + self._name
-
- def __getitem__(self, parameters):
- item = typing._type_check(parameters,
- '{} accepts only single type'.format(self._name))
- return typing._GenericAlias(self, (item,))
-
- Required = _RequiredForm(
- 'Required',
- doc="""A special typing construct to mark a key of a total=False TypedDict
- as required. For example:
-
- class Movie(TypedDict, total=False):
- title: Required[str]
- year: int
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
-
- There is no runtime checking that a required key is actually provided
- when instantiating a related TypedDict.
- """)
- NotRequired = _RequiredForm(
- 'NotRequired',
- doc="""A special typing construct to mark a key of a TypedDict as
- potentially missing. For example:
-
- class Movie(TypedDict):
- title: str
- year: NotRequired[int]
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
- """)
-else:
- # NOTE: Modeled after _Final's implementation when _FinalTypingBase available
- class _MaybeRequired(typing._FinalTypingBase, _root=True):
- __slots__ = ('__type__',)
-
- def __init__(self, tp=None, **kwds):
- self.__type__ = tp
-
- def __getitem__(self, item):
- cls = type(self)
- if self.__type__ is None:
- return cls(typing._type_check(item,
- '{} accepts only single type.'.format(cls.__name__[1:])),
- _root=True)
- raise TypeError('{} cannot be further subscripted'
- .format(cls.__name__[1:]))
-
- def _eval_type(self, globalns, localns):
- new_tp = typing._eval_type(self.__type__, globalns, localns)
- if new_tp == self.__type__:
- return self
- return type(self)(new_tp, _root=True)
-
- def __repr__(self):
- r = super().__repr__()
- if self.__type__ is not None:
- r += '[{}]'.format(typing._type_repr(self.__type__))
- return r
-
- def __hash__(self):
- return hash((type(self).__name__, self.__type__))
-
- def __eq__(self, other):
- if not isinstance(other, type(self)):
- return NotImplemented
- if self.__type__ is not None:
- return self.__type__ == other.__type__
- return self is other
-
- class _Required(_MaybeRequired, _root=True):
- """A special typing construct to mark a key of a total=False TypedDict
- as required. For example:
-
- class Movie(TypedDict, total=False):
- title: Required[str]
- year: int
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
-
- There is no runtime checking that a required key is actually provided
- when instantiating a related TypedDict.
- """
-
- class _NotRequired(_MaybeRequired, _root=True):
- """A special typing construct to mark a key of a TypedDict as
- potentially missing. For example:
-
- class Movie(TypedDict):
- title: str
- year: NotRequired[int]
-
- m = Movie(
- title='The Matrix', # typechecker error if key is omitted
- year=1999,
- )
- """
-
- Required = _Required(_root=True)
- NotRequired = _NotRequired(_root=True)
diff --git a/venv/lib/python3.11/site-packages/setuptools/_vendor/zipp.py b/venv/lib/python3.11/site-packages/setuptools/_vendor/zipp.py
deleted file mode 100644
index 26b723c..0000000
--- a/venv/lib/python3.11/site-packages/setuptools/_vendor/zipp.py
+++ /dev/null
@@ -1,329 +0,0 @@
-import io
-import posixpath
-import zipfile
-import itertools
-import contextlib
-import sys
-import pathlib
-
-if sys.version_info < (3, 7):
- from collections import OrderedDict
-else:
- OrderedDict = dict
-
-
-__all__ = ['Path']
-
-
-def _parents(path):
- """
- Given a path with elements separated by
- posixpath.sep, generate all parents of that path.
-
- >>> list(_parents('b/d'))
- ['b']
- >>> list(_parents('/b/d/'))
- ['/b']
- >>> list(_parents('b/d/f/'))
- ['b/d', 'b']
- >>> list(_parents('b'))
- []
- >>> list(_parents(''))
- []
- """
- return itertools.islice(_ancestry(path), 1, None)
-
-
-def _ancestry(path):
- """
- Given a path with elements separated by
- posixpath.sep, generate all elements of that path
-
- >>> list(_ancestry('b/d'))
- ['b/d', 'b']
- >>> list(_ancestry('/b/d/'))
- ['/b/d', '/b']
- >>> list(_ancestry('b/d/f/'))
- ['b/d/f', 'b/d', 'b']
- >>> list(_ancestry('b'))
- ['b']
- >>> list(_ancestry(''))
- []
- """
- path = path.rstrip(posixpath.sep)
- while path and path != posixpath.sep:
- yield path
- path, tail = posixpath.split(path)
-
-
-_dedupe = OrderedDict.fromkeys
-"""Deduplicate an iterable in original order"""
-
-
-def _difference(minuend, subtrahend):
- """
- Return items in minuend not in subtrahend, retaining order
- with O(1) lookup.
- """
- return itertools.filterfalse(set(subtrahend).__contains__, minuend)
-
-
-class CompleteDirs(zipfile.ZipFile):
- """
- A ZipFile subclass that ensures that implied directories
- are always included in the namelist.
- """
-
- @staticmethod
- def _implied_dirs(names):
- parents = itertools.chain.from_iterable(map(_parents, names))
- as_dirs = (p + posixpath.sep for p in parents)
- return _dedupe(_difference(as_dirs, names))
-
- def namelist(self):
- names = super(CompleteDirs, self).namelist()
- return names + list(self._implied_dirs(names))
-
- def _name_set(self):
- return set(self.namelist())
-
- def resolve_dir(self, name):
- """
- If the name represents a directory, return that name
- as a directory (with the trailing slash).
- """
- names = self._name_set()
- dirname = name + '/'
- dir_match = name not in names and dirname in names
- return dirname if dir_match else name
-
- @classmethod
- def make(cls, source):
- """
- Given a source (filename or zipfile), return an
- appropriate CompleteDirs subclass.
- """
- if isinstance(source, CompleteDirs):
- return source
-
- if not isinstance(source, zipfile.ZipFile):
- return cls(_pathlib_compat(source))
-
- # Only allow for FastLookup when supplied zipfile is read-only
- if 'r' not in source.mode:
- cls = CompleteDirs
-
- source.__class__ = cls
- return source
-
-
-class FastLookup(CompleteDirs):
- """
- ZipFile subclass to ensure implicit
- dirs exist and are resolved rapidly.
- """
-
- def namelist(self):
- with contextlib.suppress(AttributeError):
- return self.__names
- self.__names = super(FastLookup, self).namelist()
- return self.__names
-
- def _name_set(self):
- with contextlib.suppress(AttributeError):
- return self.__lookup
- self.__lookup = super(FastLookup, self)._name_set()
- return self.__lookup
-
-
-def _pathlib_compat(path):
- """
- For path-like objects, convert to a filename for compatibility
- on Python 3.6.1 and earlier.
- """
- try:
- return path.__fspath__()
- except AttributeError:
- return str(path)
-
-
-class Path:
- """
- A pathlib-compatible interface for zip files.
-
- Consider a zip file with this structure::
-
- .
- ├── a.txt
- └── b
- ├── c.txt
- └── d
- └── e.txt
-
- >>> data = io.BytesIO()
- >>> zf = zipfile.ZipFile(data, 'w')
- >>> zf.writestr('a.txt', 'content of a')
- >>> zf.writestr('b/c.txt', 'content of c')
- >>> zf.writestr('b/d/e.txt', 'content of e')
- >>> zf.filename = 'mem/abcde.zip'
-
- Path accepts the zipfile object itself or a filename
-
- >>> root = Path(zf)
-
- From there, several path operations are available.
-
- Directory iteration (including the zip file itself):
-
- >>> a, b = root.iterdir()
- >>> a
- Path('mem/abcde.zip', 'a.txt')
- >>> b
- Path('mem/abcde.zip', 'b/')
-
- name property:
-
- >>> b.name
- 'b'
-
- join with divide operator:
-
- >>> c = b / 'c.txt'
- >>> c
- Path('mem/abcde.zip', 'b/c.txt')
- >>> c.name
- 'c.txt'
-
- Read text:
-
- >>> c.read_text()
- 'content of c'
-
- existence:
-
- >>> c.exists()
- True
- >>> (b / 'missing.txt').exists()
- False
-
- Coercion to string:
-
- >>> import os
- >>> str(c).replace(os.sep, posixpath.sep)
- 'mem/abcde.zip/b/c.txt'
-
- At the root, ``name``, ``filename``, and ``parent``
- resolve to the zipfile. Note these attributes are not
- valid and will raise a ``ValueError`` if the zipfile
- has no filename.
-
- >>> root.name
- 'abcde.zip'
- >>> str(root.filename).replace(os.sep, posixpath.sep)
- 'mem/abcde.zip'
- >>> str(root.parent)
- 'mem'
- """
-
- __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
-
- def __init__(self, root, at=""):
- """
- Construct a Path from a ZipFile or filename.
-
- Note: When the source is an existing ZipFile object,
- its type (__class__) will be mutated to a
- specialized type. If the caller wishes to retain the
- original type, the caller should either create a
- separate ZipFile object or pass a filename.
- """
- self.root = FastLookup.make(root)
- self.at = at
-
- def open(self, mode='r', *args, pwd=None, **kwargs):
- """
- Open this entry as text or binary following the semantics
- of ``pathlib.Path.open()`` by passing arguments through
- to io.TextIOWrapper().
- """
- if self.is_dir():
- raise IsADirectoryError(self)
- zip_mode = mode[0]
- if not self.exists() and zip_mode == 'r':
- raise FileNotFoundError(self)
- stream = self.root.open(self.at, zip_mode, pwd=pwd)
- if 'b' in mode:
- if args or kwargs:
- raise ValueError("encoding args invalid for binary operation")
- return stream
- return io.TextIOWrapper(stream, *args, **kwargs)
-
- @property
- def name(self):
- return pathlib.Path(self.at).name or self.filename.name
-
- @property
- def suffix(self):
- return pathlib.Path(self.at).suffix or self.filename.suffix
-
- @property
- def suffixes(self):
- return pathlib.Path(self.at).suffixes or self.filename.suffixes
-
- @property
- def stem(self):
- return pathlib.Path(self.at).stem or self.filename.stem
-
- @property
- def filename(self):
- return pathlib.Path(self.root.filename).joinpath(self.at)
-
- def read_text(self, *args, **kwargs):
- with self.open('r', *args, **kwargs) as strm:
- return strm.read()
-
- def read_bytes(self):
- with self.open('rb') as strm:
- return strm.read()
-
- def _is_child(self, path):
- return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
-
- def _next(self, at):
- return self.__class__(self.root, at)
-
- def is_dir(self):
- return not self.at or self.at.endswith("/")
-
- def is_file(self):
- return self.exists() and not self.is_dir()
-
- def exists(self):
- return self.at in self.root._name_set()
-
- def iterdir(self):
- if not self.is_dir():
- raise ValueError("Can't listdir a file")
- subs = map(self._next, self.root.namelist())
- return filter(self._is_child, subs)
-
- def __str__(self):
- return posixpath.join(self.root.filename, self.at)
-
- def __repr__(self):
- return self.__repr.format(self=self)
-
- def joinpath(self, *other):
- next = posixpath.join(self.at, *map(_pathlib_compat, other))
- return self._next(self.root.resolve_dir(next))
-
- __truediv__ = joinpath
-
- @property
- def parent(self):
- if not self.at:
- return self.filename.parent
- parent_at = posixpath.dirname(self.at.rstrip('/'))
- if parent_at:
- parent_at += '/'
- return self._next(parent_at)