diff options
Diffstat (limited to 'venv/lib/python3.11/site-packages/pip/_vendor/pygments')
70 files changed, 10384 insertions, 0 deletions
diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py new file mode 100644 index 0000000..39c84aa --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__init__.py @@ -0,0 +1,82 @@ +""" + Pygments + ~~~~~~~~ + + Pygments is a syntax highlighting package written in Python. + + It is a generic syntax highlighter for general use in all kinds of software + such as forum systems, wikis or other applications that need to prettify + source code. Highlights are: + + * a wide range of common languages and markup formats is supported + * special attention is paid to details, increasing quality by a fair amount + * support for new languages and formats are added easily + * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image + formats that PIL supports, and ANSI sequences + * it is usable as a command-line tool and as a library + * ... and it highlights even Brainfuck! + + The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``. + + .. _Pygments master branch: + https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +from io import StringIO, BytesIO + +__version__ = '2.15.1' +__docformat__ = 'restructuredtext' + +__all__ = ['lex', 'format', 'highlight'] + + +def lex(code, lexer): + """ + Lex `code` with the `lexer` (must be a `Lexer` instance) + and return an iterable of tokens. Currently, this only calls + `lexer.get_tokens()`. + """ + try: + return lexer.get_tokens(code) + except TypeError: + # Heuristic to catch a common mistake. + from pip._vendor.pygments.lexer import RegexLexer + if isinstance(lexer, type) and issubclass(lexer, RegexLexer): + raise TypeError('lex() argument must be a lexer instance, ' + 'not a class') + raise + + +def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin + """ + Format ``tokens`` (an iterable of tokens) with the formatter ``formatter`` + (a `Formatter` instance). + + If ``outfile`` is given and a valid file object (an object with a + ``write`` method), the result will be written to it, otherwise it + is returned as a string. + """ + try: + if not outfile: + realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO() + formatter.format(tokens, realoutfile) + return realoutfile.getvalue() + else: + formatter.format(tokens, outfile) + except TypeError: + # Heuristic to catch a common mistake. + from pip._vendor.pygments.formatter import Formatter + if isinstance(formatter, type) and issubclass(formatter, Formatter): + raise TypeError('format() argument must be a formatter instance, ' + 'not a class') + raise + + +def highlight(code, lexer, formatter, outfile=None): + """ + This is the most high-level highlighting function. It combines `lex` and + `format` in one function. + """ + return format(lex(code, lexer), formatter, outfile) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__main__.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__main__.py new file mode 100644 index 0000000..2f7f8cb --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__main__.py @@ -0,0 +1,17 @@ +""" + pygments.__main__ + ~~~~~~~~~~~~~~~~~ + + Main entry point for ``python -m pygments``. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import sys +from pip._vendor.pygments.cmdline import main + +try: + sys.exit(main(sys.argv)) +except KeyboardInterrupt: + sys.exit(1) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..0474d7c --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/__init__.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..def03ae --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/__main__.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/cmdline.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/cmdline.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..fbb5345 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/cmdline.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..be6fa1f --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/console.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..c1d35cc --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/filter.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..85797c9 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/formatter.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..82db94c --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/lexer.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..b249d6a --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/modeline.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..219e371 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/plugin.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..50086e2 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/regexopt.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..7f06ea7 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/scanner.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..ef6d989 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/sphinxext.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..236113c --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/style.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..a4ca012 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/token.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..82b34f7 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/unistring.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..994e8c2 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/__pycache__/util.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py new file mode 100644 index 0000000..eec1775 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/cmdline.py @@ -0,0 +1,668 @@ +""" + pygments.cmdline + ~~~~~~~~~~~~~~~~ + + Command line interface. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import sys +import shutil +import argparse +from textwrap import dedent + +from pip._vendor.pygments import __version__, highlight +from pip._vendor.pygments.util import ClassNotFound, OptionError, docstring_headline, \ + guess_decode, guess_decode_from_terminal, terminal_encoding, \ + UnclosingTextIOWrapper +from pip._vendor.pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ + load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename +from pip._vendor.pygments.lexers.special import TextLexer +from pip._vendor.pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter +from pip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \ + load_formatter_from_file, get_formatter_for_filename, find_formatter_class +from pip._vendor.pygments.formatters.terminal import TerminalFormatter +from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter +from pip._vendor.pygments.filters import get_all_filters, find_filter_class +from pip._vendor.pygments.styles import get_all_styles, get_style_by_name + + +def _parse_options(o_strs): + opts = {} + if not o_strs: + return opts + for o_str in o_strs: + if not o_str.strip(): + continue + o_args = o_str.split(',') + for o_arg in o_args: + o_arg = o_arg.strip() + try: + o_key, o_val = o_arg.split('=', 1) + o_key = o_key.strip() + o_val = o_val.strip() + except ValueError: + opts[o_arg] = True + else: + opts[o_key] = o_val + return opts + + +def _parse_filters(f_strs): + filters = [] + if not f_strs: + return filters + for f_str in f_strs: + if ':' in f_str: + fname, fopts = f_str.split(':', 1) + filters.append((fname, _parse_options([fopts]))) + else: + filters.append((f_str, {})) + return filters + + +def _print_help(what, name): + try: + if what == 'lexer': + cls = get_lexer_by_name(name) + print("Help on the %s lexer:" % cls.name) + print(dedent(cls.__doc__)) + elif what == 'formatter': + cls = find_formatter_class(name) + print("Help on the %s formatter:" % cls.name) + print(dedent(cls.__doc__)) + elif what == 'filter': + cls = find_filter_class(name) + print("Help on the %s filter:" % name) + print(dedent(cls.__doc__)) + return 0 + except (AttributeError, ValueError): + print("%s not found!" % what, file=sys.stderr) + return 1 + + +def _print_list(what): + if what == 'lexer': + print() + print("Lexers:") + print("~~~~~~~") + + info = [] + for fullname, names, exts, _ in get_all_lexers(): + tup = (', '.join(names)+':', fullname, + exts and '(filenames ' + ', '.join(exts) + ')' or '') + info.append(tup) + info.sort() + for i in info: + print(('* %s\n %s %s') % i) + + elif what == 'formatter': + print() + print("Formatters:") + print("~~~~~~~~~~~") + + info = [] + for cls in get_all_formatters(): + doc = docstring_headline(cls) + tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and + '(filenames ' + ', '.join(cls.filenames) + ')' or '') + info.append(tup) + info.sort() + for i in info: + print(('* %s\n %s %s') % i) + + elif what == 'filter': + print() + print("Filters:") + print("~~~~~~~~") + + for name in get_all_filters(): + cls = find_filter_class(name) + print("* " + name + ':') + print(" %s" % docstring_headline(cls)) + + elif what == 'style': + print() + print("Styles:") + print("~~~~~~~") + + for name in get_all_styles(): + cls = get_style_by_name(name) + print("* " + name + ':') + print(" %s" % docstring_headline(cls)) + + +def _print_list_as_json(requested_items): + import json + result = {} + if 'lexer' in requested_items: + info = {} + for fullname, names, filenames, mimetypes in get_all_lexers(): + info[fullname] = { + 'aliases': names, + 'filenames': filenames, + 'mimetypes': mimetypes + } + result['lexers'] = info + + if 'formatter' in requested_items: + info = {} + for cls in get_all_formatters(): + doc = docstring_headline(cls) + info[cls.name] = { + 'aliases': cls.aliases, + 'filenames': cls.filenames, + 'doc': doc + } + result['formatters'] = info + + if 'filter' in requested_items: + info = {} + for name in get_all_filters(): + cls = find_filter_class(name) + info[name] = { + 'doc': docstring_headline(cls) + } + result['filters'] = info + + if 'style' in requested_items: + info = {} + for name in get_all_styles(): + cls = get_style_by_name(name) + info[name] = { + 'doc': docstring_headline(cls) + } + result['styles'] = info + + json.dump(result, sys.stdout) + +def main_inner(parser, argns): + if argns.help: + parser.print_help() + return 0 + + if argns.V: + print('Pygments version %s, (c) 2006-2023 by Georg Brandl, Matthäus ' + 'Chajdas and contributors.' % __version__) + return 0 + + def is_only_option(opt): + return not any(v for (k, v) in vars(argns).items() if k != opt) + + # handle ``pygmentize -L`` + if argns.L is not None: + arg_set = set() + for k, v in vars(argns).items(): + if v: + arg_set.add(k) + + arg_set.discard('L') + arg_set.discard('json') + + if arg_set: + parser.print_help(sys.stderr) + return 2 + + # print version + if not argns.json: + main(['', '-V']) + allowed_types = {'lexer', 'formatter', 'filter', 'style'} + largs = [arg.rstrip('s') for arg in argns.L] + if any(arg not in allowed_types for arg in largs): + parser.print_help(sys.stderr) + return 0 + if not largs: + largs = allowed_types + if not argns.json: + for arg in largs: + _print_list(arg) + else: + _print_list_as_json(largs) + return 0 + + # handle ``pygmentize -H`` + if argns.H: + if not is_only_option('H'): + parser.print_help(sys.stderr) + return 2 + what, name = argns.H + if what not in ('lexer', 'formatter', 'filter'): + parser.print_help(sys.stderr) + return 2 + return _print_help(what, name) + + # parse -O options + parsed_opts = _parse_options(argns.O or []) + + # parse -P options + for p_opt in argns.P or []: + try: + name, value = p_opt.split('=', 1) + except ValueError: + parsed_opts[p_opt] = True + else: + parsed_opts[name] = value + + # encodings + inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) + outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) + + # handle ``pygmentize -N`` + if argns.N: + lexer = find_lexer_class_for_filename(argns.N) + if lexer is None: + lexer = TextLexer + + print(lexer.aliases[0]) + return 0 + + # handle ``pygmentize -C`` + if argns.C: + inp = sys.stdin.buffer.read() + try: + lexer = guess_lexer(inp, inencoding=inencoding) + except ClassNotFound: + lexer = TextLexer + + print(lexer.aliases[0]) + return 0 + + # handle ``pygmentize -S`` + S_opt = argns.S + a_opt = argns.a + if S_opt is not None: + f_opt = argns.f + if not f_opt: + parser.print_help(sys.stderr) + return 2 + if argns.l or argns.INPUTFILE: + parser.print_help(sys.stderr) + return 2 + + try: + parsed_opts['style'] = S_opt + fmter = get_formatter_by_name(f_opt, **parsed_opts) + except ClassNotFound as err: + print(err, file=sys.stderr) + return 1 + + print(fmter.get_style_defs(a_opt or '')) + return 0 + + # if no -S is given, -a is not allowed + if argns.a is not None: + parser.print_help(sys.stderr) + return 2 + + # parse -F options + F_opts = _parse_filters(argns.F or []) + + # -x: allow custom (eXternal) lexers and formatters + allow_custom_lexer_formatter = bool(argns.x) + + # select lexer + lexer = None + + # given by name? + lexername = argns.l + if lexername: + # custom lexer, located relative to user's cwd + if allow_custom_lexer_formatter and '.py' in lexername: + try: + filename = None + name = None + if ':' in lexername: + filename, name = lexername.rsplit(':', 1) + + if '.py' in name: + # This can happen on Windows: If the lexername is + # C:\lexer.py -- return to normal load path in that case + name = None + + if filename and name: + lexer = load_lexer_from_file(filename, name, + **parsed_opts) + else: + lexer = load_lexer_from_file(lexername, **parsed_opts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + else: + try: + lexer = get_lexer_by_name(lexername, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + + # read input code + code = None + + if argns.INPUTFILE: + if argns.s: + print('Error: -s option not usable when input file specified', + file=sys.stderr) + return 2 + + infn = argns.INPUTFILE + try: + with open(infn, 'rb') as infp: + code = infp.read() + except Exception as err: + print('Error: cannot read infile:', err, file=sys.stderr) + return 1 + if not inencoding: + code, inencoding = guess_decode(code) + + # do we have to guess the lexer? + if not lexer: + try: + lexer = get_lexer_for_filename(infn, code, **parsed_opts) + except ClassNotFound as err: + if argns.g: + try: + lexer = guess_lexer(code, **parsed_opts) + except ClassNotFound: + lexer = TextLexer(**parsed_opts) + else: + print('Error:', err, file=sys.stderr) + return 1 + except OptionError as err: + print('Error:', err, file=sys.stderr) + return 1 + + elif not argns.s: # treat stdin as full file (-s support is later) + # read code from terminal, always in binary mode since we want to + # decode ourselves and be tolerant with it + code = sys.stdin.buffer.read() # use .buffer to get a binary stream + if not inencoding: + code, inencoding = guess_decode_from_terminal(code, sys.stdin) + # else the lexer will do the decoding + if not lexer: + try: + lexer = guess_lexer(code, **parsed_opts) + except ClassNotFound: + lexer = TextLexer(**parsed_opts) + + else: # -s option needs a lexer with -l + if not lexer: + print('Error: when using -s a lexer has to be selected with -l', + file=sys.stderr) + return 2 + + # process filters + for fname, fopts in F_opts: + try: + lexer.add_filter(fname, **fopts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + + # select formatter + outfn = argns.o + fmter = argns.f + if fmter: + # custom formatter, located relative to user's cwd + if allow_custom_lexer_formatter and '.py' in fmter: + try: + filename = None + name = None + if ':' in fmter: + # Same logic as above for custom lexer + filename, name = fmter.rsplit(':', 1) + + if '.py' in name: + name = None + + if filename and name: + fmter = load_formatter_from_file(filename, name, + **parsed_opts) + else: + fmter = load_formatter_from_file(fmter, **parsed_opts) + except ClassNotFound as err: + print('Error:', err, file=sys.stderr) + return 1 + else: + try: + fmter = get_formatter_by_name(fmter, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + + if outfn: + if not fmter: + try: + fmter = get_formatter_for_filename(outfn, **parsed_opts) + except (OptionError, ClassNotFound) as err: + print('Error:', err, file=sys.stderr) + return 1 + try: + outfile = open(outfn, 'wb') + except Exception as err: + print('Error: cannot open outfile:', err, file=sys.stderr) + return 1 + else: + if not fmter: + if os.environ.get('COLORTERM','') in ('truecolor', '24bit'): + fmter = TerminalTrueColorFormatter(**parsed_opts) + elif '256' in os.environ.get('TERM', ''): + fmter = Terminal256Formatter(**parsed_opts) + else: + fmter = TerminalFormatter(**parsed_opts) + outfile = sys.stdout.buffer + + # determine output encoding if not explicitly selected + if not outencoding: + if outfn: + # output file? use lexer encoding for now (can still be None) + fmter.encoding = inencoding + else: + # else use terminal encoding + fmter.encoding = terminal_encoding(sys.stdout) + + # provide coloring under Windows, if possible + if not outfn and sys.platform in ('win32', 'cygwin') and \ + fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover + # unfortunately colorama doesn't support binary streams on Py3 + outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) + fmter.encoding = None + try: + import pip._vendor.colorama.initialise as colorama_initialise + except ImportError: + pass + else: + outfile = colorama_initialise.wrap_stream( + outfile, convert=None, strip=None, autoreset=False, wrap=True) + + # When using the LaTeX formatter and the option `escapeinside` is + # specified, we need a special lexer which collects escaped text + # before running the chosen language lexer. + escapeinside = parsed_opts.get('escapeinside', '') + if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): + left = escapeinside[0] + right = escapeinside[1] + lexer = LatexEmbeddedLexer(left, right, lexer) + + # ... and do it! + if not argns.s: + # process whole input as per normal... + try: + highlight(code, lexer, fmter, outfile) + finally: + if outfn: + outfile.close() + return 0 + else: + # line by line processing of stdin (eg: for 'tail -f')... + try: + while 1: + line = sys.stdin.buffer.readline() + if not line: + break + if not inencoding: + line = guess_decode_from_terminal(line, sys.stdin)[0] + highlight(line, lexer, fmter, outfile) + if hasattr(outfile, 'flush'): + outfile.flush() + return 0 + except KeyboardInterrupt: # pragma: no cover + return 0 + finally: + if outfn: + outfile.close() + + +class HelpFormatter(argparse.HelpFormatter): + def __init__(self, prog, indent_increment=2, max_help_position=16, width=None): + if width is None: + try: + width = shutil.get_terminal_size().columns - 2 + except Exception: + pass + argparse.HelpFormatter.__init__(self, prog, indent_increment, + max_help_position, width) + + +def main(args=sys.argv): + """ + Main command line entry point. + """ + desc = "Highlight an input file and write the result to an output file." + parser = argparse.ArgumentParser(description=desc, add_help=False, + formatter_class=HelpFormatter) + + operation = parser.add_argument_group('Main operation') + lexersel = operation.add_mutually_exclusive_group() + lexersel.add_argument( + '-l', metavar='LEXER', + help='Specify the lexer to use. (Query names with -L.) If not ' + 'given and -g is not present, the lexer is guessed from the filename.') + lexersel.add_argument( + '-g', action='store_true', + help='Guess the lexer from the file contents, or pass through ' + 'as plain text if nothing can be guessed.') + operation.add_argument( + '-F', metavar='FILTER[:options]', action='append', + help='Add a filter to the token stream. (Query names with -L.) ' + 'Filter options are given after a colon if necessary.') + operation.add_argument( + '-f', metavar='FORMATTER', + help='Specify the formatter to use. (Query names with -L.) ' + 'If not given, the formatter is guessed from the output filename, ' + 'and defaults to the terminal formatter if the output is to the ' + 'terminal or an unknown file extension.') + operation.add_argument( + '-O', metavar='OPTION=value[,OPTION=value,...]', action='append', + help='Give options to the lexer and formatter as a comma-separated ' + 'list of key-value pairs. ' + 'Example: `-O bg=light,python=cool`.') + operation.add_argument( + '-P', metavar='OPTION=value', action='append', + help='Give a single option to the lexer and formatter - with this ' + 'you can pass options whose value contains commas and equal signs. ' + 'Example: `-P "heading=Pygments, the Python highlighter"`.') + operation.add_argument( + '-o', metavar='OUTPUTFILE', + help='Where to write the output. Defaults to standard output.') + + operation.add_argument( + 'INPUTFILE', nargs='?', + help='Where to read the input. Defaults to standard input.') + + flags = parser.add_argument_group('Operation flags') + flags.add_argument( + '-v', action='store_true', + help='Print a detailed traceback on unhandled exceptions, which ' + 'is useful for debugging and bug reports.') + flags.add_argument( + '-s', action='store_true', + help='Process lines one at a time until EOF, rather than waiting to ' + 'process the entire file. This only works for stdin, only for lexers ' + 'with no line-spanning constructs, and is intended for streaming ' + 'input such as you get from `tail -f`. ' + 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.') + flags.add_argument( + '-x', action='store_true', + help='Allow custom lexers and formatters to be loaded from a .py file ' + 'relative to the current working directory. For example, ' + '`-l ./customlexer.py -x`. By default, this option expects a file ' + 'with a class named CustomLexer or CustomFormatter; you can also ' + 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). ' + 'Users should be very careful not to use this option with untrusted ' + 'files, because it will import and run them.') + flags.add_argument('--json', help='Output as JSON. This can ' + 'be only used in conjunction with -L.', + default=False, + action='store_true') + + special_modes_group = parser.add_argument_group( + 'Special modes - do not do any highlighting') + special_modes = special_modes_group.add_mutually_exclusive_group() + special_modes.add_argument( + '-S', metavar='STYLE -f formatter', + help='Print style definitions for STYLE for a formatter ' + 'given with -f. The argument given by -a is formatter ' + 'dependent.') + special_modes.add_argument( + '-L', nargs='*', metavar='WHAT', + help='List lexers, formatters, styles or filters -- ' + 'give additional arguments for the thing(s) you want to list ' + '(e.g. "styles"), or omit them to list everything.') + special_modes.add_argument( + '-N', metavar='FILENAME', + help='Guess and print out a lexer name based solely on the given ' + 'filename. Does not take input or highlight anything. If no specific ' + 'lexer can be determined, "text" is printed.') + special_modes.add_argument( + '-C', action='store_true', + help='Like -N, but print out a lexer name based solely on ' + 'a given content from standard input.') + special_modes.add_argument( + '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'), + help='Print detailed help for the object <name> of type <type>, ' + 'where <type> is one of "lexer", "formatter" or "filter".') + special_modes.add_argument( + '-V', action='store_true', + help='Print the package version.') + special_modes.add_argument( + '-h', '--help', action='store_true', + help='Print this help.') + special_modes_group.add_argument( + '-a', metavar='ARG', + help='Formatter-specific additional argument for the -S (print ' + 'style sheet) mode.') + + argns = parser.parse_args(args[1:]) + + try: + return main_inner(parser, argns) + except BrokenPipeError: + # someone closed our stdout, e.g. by quitting a pager. + return 0 + except Exception: + if argns.v: + print(file=sys.stderr) + print('*' * 65, file=sys.stderr) + print('An unhandled exception occurred while highlighting.', + file=sys.stderr) + print('Please report the whole traceback to the issue tracker at', + file=sys.stderr) + print('<https://github.com/pygments/pygments/issues>.', + file=sys.stderr) + print('*' * 65, file=sys.stderr) + print(file=sys.stderr) + raise + import traceback + info = traceback.format_exception(*sys.exc_info()) + msg = info[-1].strip() + if len(info) >= 3: + # extract relevant file and position info + msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:] + print(file=sys.stderr) + print('*** Error while highlighting:', file=sys.stderr) + print(msg, file=sys.stderr) + print('*** If this is a bug you want to report, please rerun with -v.', + file=sys.stderr) + return 1 diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py new file mode 100644 index 0000000..deb4937 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/console.py @@ -0,0 +1,70 @@ +""" + pygments.console + ~~~~~~~~~~~~~~~~ + + Format colored console output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +esc = "\x1b[" + +codes = {} +codes[""] = "" +codes["reset"] = esc + "39;49;00m" + +codes["bold"] = esc + "01m" +codes["faint"] = esc + "02m" +codes["standout"] = esc + "03m" +codes["underline"] = esc + "04m" +codes["blink"] = esc + "05m" +codes["overline"] = esc + "06m" + +dark_colors = ["black", "red", "green", "yellow", "blue", + "magenta", "cyan", "gray"] +light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue", + "brightmagenta", "brightcyan", "white"] + +x = 30 +for d, l in zip(dark_colors, light_colors): + codes[d] = esc + "%im" % x + codes[l] = esc + "%im" % (60 + x) + x += 1 + +del d, l, x + +codes["white"] = codes["bold"] + + +def reset_color(): + return codes["reset"] + + +def colorize(color_key, text): + return codes[color_key] + text + codes["reset"] + + +def ansiformat(attr, text): + """ + Format ``text`` with a color and/or some attributes:: + + color normal color + *color* bold color + _color_ underlined color + +color+ blinking color + """ + result = [] + if attr[:1] == attr[-1:] == '+': + result.append(codes['blink']) + attr = attr[1:-1] + if attr[:1] == attr[-1:] == '*': + result.append(codes['bold']) + attr = attr[1:-1] + if attr[:1] == attr[-1:] == '_': + result.append(codes['underline']) + attr = attr[1:-1] + result.append(codes[attr]) + result.append(text) + result.append(codes['reset']) + return ''.join(result) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py new file mode 100644 index 0000000..dafa08d --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filter.py @@ -0,0 +1,71 @@ +""" + pygments.filter + ~~~~~~~~~~~~~~~ + + Module that implements the default filter. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +def apply_filters(stream, filters, lexer=None): + """ + Use this method to apply an iterable of filters to + a stream. If lexer is given it's forwarded to the + filter, otherwise the filter receives `None`. + """ + def _apply(filter_, stream): + yield from filter_.filter(lexer, stream) + for filter_ in filters: + stream = _apply(filter_, stream) + return stream + + +def simplefilter(f): + """ + Decorator that converts a function into a filter:: + + @simplefilter + def lowercase(self, lexer, stream, options): + for ttype, value in stream: + yield ttype, value.lower() + """ + return type(f.__name__, (FunctionFilter,), { + '__module__': getattr(f, '__module__'), + '__doc__': f.__doc__, + 'function': f, + }) + + +class Filter: + """ + Default filter. Subclass this class or use the `simplefilter` + decorator to create own filters. + """ + + def __init__(self, **options): + self.options = options + + def filter(self, lexer, stream): + raise NotImplementedError() + + +class FunctionFilter(Filter): + """ + Abstract class used by `simplefilter` to create simple + function filters on the fly. The `simplefilter` decorator + automatically creates subclasses of this class for + functions passed to it. + """ + function = None + + def __init__(self, **options): + if not hasattr(self, 'function'): + raise TypeError('%r used without bound function' % + self.__class__.__name__) + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + # pylint: disable=not-callable + yield from self.function(lexer, stream, self.options) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py new file mode 100644 index 0000000..5aa9ecb --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__init__.py @@ -0,0 +1,940 @@ +""" + pygments.filters + ~~~~~~~~~~~~~~~~ + + Module containing filter lookup functions and default + filters. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \ + string_to_tokentype +from pip._vendor.pygments.filter import Filter +from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \ + get_choice_opt, ClassNotFound, OptionError +from pip._vendor.pygments.plugin import find_plugin_filters + + +def find_filter_class(filtername): + """Lookup a filter by name. Return None if not found.""" + if filtername in FILTERS: + return FILTERS[filtername] + for name, cls in find_plugin_filters(): + if name == filtername: + return cls + return None + + +def get_filter_by_name(filtername, **options): + """Return an instantiated filter. + + Options are passed to the filter initializer if wanted. + Raise a ClassNotFound if not found. + """ + cls = find_filter_class(filtername) + if cls: + return cls(**options) + else: + raise ClassNotFound('filter %r not found' % filtername) + + +def get_all_filters(): + """Return a generator of all filter names.""" + yield from FILTERS + for name, _ in find_plugin_filters(): + yield name + + +def _replace_special(ttype, value, regex, specialttype, + replacefunc=lambda x: x): + last = 0 + for match in regex.finditer(value): + start, end = match.start(), match.end() + if start != last: + yield ttype, value[last:start] + yield specialttype, replacefunc(value[start:end]) + last = end + if last != len(value): + yield ttype, value[last:] + + +class CodeTagFilter(Filter): + """Highlight special code tags in comments and docstrings. + + Options accepted: + + `codetags` : list of strings + A list of strings that are flagged as code tags. The default is to + highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``. + + .. versionchanged:: 2.13 + Now recognizes ``FIXME`` by default. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + tags = get_list_opt(options, 'codetags', + ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE']) + self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([ + re.escape(tag) for tag in tags if tag + ])) + + def filter(self, lexer, stream): + regex = self.tag_re + for ttype, value in stream: + if ttype in String.Doc or \ + ttype in Comment and \ + ttype not in Comment.Preproc: + yield from _replace_special(ttype, value, regex, Comment.Special) + else: + yield ttype, value + + +class SymbolFilter(Filter): + """Convert mathematical symbols such as \\<longrightarrow> in Isabelle + or \\longrightarrow in LaTeX into Unicode characters. + + This is mostly useful for HTML or console output when you want to + approximate the source rendering you'd see in an IDE. + + Options accepted: + + `lang` : string + The symbol language. Must be one of ``'isabelle'`` or + ``'latex'``. The default is ``'isabelle'``. + """ + + latex_symbols = { + '\\alpha' : '\U000003b1', + '\\beta' : '\U000003b2', + '\\gamma' : '\U000003b3', + '\\delta' : '\U000003b4', + '\\varepsilon' : '\U000003b5', + '\\zeta' : '\U000003b6', + '\\eta' : '\U000003b7', + '\\vartheta' : '\U000003b8', + '\\iota' : '\U000003b9', + '\\kappa' : '\U000003ba', + '\\lambda' : '\U000003bb', + '\\mu' : '\U000003bc', + '\\nu' : '\U000003bd', + '\\xi' : '\U000003be', + '\\pi' : '\U000003c0', + '\\varrho' : '\U000003c1', + '\\sigma' : '\U000003c3', + '\\tau' : '\U000003c4', + '\\upsilon' : '\U000003c5', + '\\varphi' : '\U000003c6', + '\\chi' : '\U000003c7', + '\\psi' : '\U000003c8', + '\\omega' : '\U000003c9', + '\\Gamma' : '\U00000393', + '\\Delta' : '\U00000394', + '\\Theta' : '\U00000398', + '\\Lambda' : '\U0000039b', + '\\Xi' : '\U0000039e', + '\\Pi' : '\U000003a0', + '\\Sigma' : '\U000003a3', + '\\Upsilon' : '\U000003a5', + '\\Phi' : '\U000003a6', + '\\Psi' : '\U000003a8', + '\\Omega' : '\U000003a9', + '\\leftarrow' : '\U00002190', + '\\longleftarrow' : '\U000027f5', + '\\rightarrow' : '\U00002192', + '\\longrightarrow' : '\U000027f6', + '\\Leftarrow' : '\U000021d0', + '\\Longleftarrow' : '\U000027f8', + '\\Rightarrow' : '\U000021d2', + '\\Longrightarrow' : '\U000027f9', + '\\leftrightarrow' : '\U00002194', + '\\longleftrightarrow' : '\U000027f7', + '\\Leftrightarrow' : '\U000021d4', + '\\Longleftrightarrow' : '\U000027fa', + '\\mapsto' : '\U000021a6', + '\\longmapsto' : '\U000027fc', + '\\relbar' : '\U00002500', + '\\Relbar' : '\U00002550', + '\\hookleftarrow' : '\U000021a9', + '\\hookrightarrow' : '\U000021aa', + '\\leftharpoondown' : '\U000021bd', + '\\rightharpoondown' : '\U000021c1', + '\\leftharpoonup' : '\U000021bc', + '\\rightharpoonup' : '\U000021c0', + '\\rightleftharpoons' : '\U000021cc', + '\\leadsto' : '\U0000219d', + '\\downharpoonleft' : '\U000021c3', + '\\downharpoonright' : '\U000021c2', + '\\upharpoonleft' : '\U000021bf', + '\\upharpoonright' : '\U000021be', + '\\restriction' : '\U000021be', + '\\uparrow' : '\U00002191', + '\\Uparrow' : '\U000021d1', + '\\downarrow' : '\U00002193', + '\\Downarrow' : '\U000021d3', + '\\updownarrow' : '\U00002195', + '\\Updownarrow' : '\U000021d5', + '\\langle' : '\U000027e8', + '\\rangle' : '\U000027e9', + '\\lceil' : '\U00002308', + '\\rceil' : '\U00002309', + '\\lfloor' : '\U0000230a', + '\\rfloor' : '\U0000230b', + '\\flqq' : '\U000000ab', + '\\frqq' : '\U000000bb', + '\\bot' : '\U000022a5', + '\\top' : '\U000022a4', + '\\wedge' : '\U00002227', + '\\bigwedge' : '\U000022c0', + '\\vee' : '\U00002228', + '\\bigvee' : '\U000022c1', + '\\forall' : '\U00002200', + '\\exists' : '\U00002203', + '\\nexists' : '\U00002204', + '\\neg' : '\U000000ac', + '\\Box' : '\U000025a1', + '\\Diamond' : '\U000025c7', + '\\vdash' : '\U000022a2', + '\\models' : '\U000022a8', + '\\dashv' : '\U000022a3', + '\\surd' : '\U0000221a', + '\\le' : '\U00002264', + '\\ge' : '\U00002265', + '\\ll' : '\U0000226a', + '\\gg' : '\U0000226b', + '\\lesssim' : '\U00002272', + '\\gtrsim' : '\U00002273', + '\\lessapprox' : '\U00002a85', + '\\gtrapprox' : '\U00002a86', + '\\in' : '\U00002208', + '\\notin' : '\U00002209', + '\\subset' : '\U00002282', + '\\supset' : '\U00002283', + '\\subseteq' : '\U00002286', + '\\supseteq' : '\U00002287', + '\\sqsubset' : '\U0000228f', + '\\sqsupset' : '\U00002290', + '\\sqsubseteq' : '\U00002291', + '\\sqsupseteq' : '\U00002292', + '\\cap' : '\U00002229', + '\\bigcap' : '\U000022c2', + '\\cup' : '\U0000222a', + '\\bigcup' : '\U000022c3', + '\\sqcup' : '\U00002294', + '\\bigsqcup' : '\U00002a06', + '\\sqcap' : '\U00002293', + '\\Bigsqcap' : '\U00002a05', + '\\setminus' : '\U00002216', + '\\propto' : '\U0000221d', + '\\uplus' : '\U0000228e', + '\\bigplus' : '\U00002a04', + '\\sim' : '\U0000223c', + '\\doteq' : '\U00002250', + '\\simeq' : '\U00002243', + '\\approx' : '\U00002248', + '\\asymp' : '\U0000224d', + '\\cong' : '\U00002245', + '\\equiv' : '\U00002261', + '\\Join' : '\U000022c8', + '\\bowtie' : '\U00002a1d', + '\\prec' : '\U0000227a', + '\\succ' : '\U0000227b', + '\\preceq' : '\U0000227c', + '\\succeq' : '\U0000227d', + '\\parallel' : '\U00002225', + '\\mid' : '\U000000a6', + '\\pm' : '\U000000b1', + '\\mp' : '\U00002213', + '\\times' : '\U000000d7', + '\\div' : '\U000000f7', + '\\cdot' : '\U000022c5', + '\\star' : '\U000022c6', + '\\circ' : '\U00002218', + '\\dagger' : '\U00002020', + '\\ddagger' : '\U00002021', + '\\lhd' : '\U000022b2', + '\\rhd' : '\U000022b3', + '\\unlhd' : '\U000022b4', + '\\unrhd' : '\U000022b5', + '\\triangleleft' : '\U000025c3', + '\\triangleright' : '\U000025b9', + '\\triangle' : '\U000025b3', + '\\triangleq' : '\U0000225c', + '\\oplus' : '\U00002295', + '\\bigoplus' : '\U00002a01', + '\\otimes' : '\U00002297', + '\\bigotimes' : '\U00002a02', + '\\odot' : '\U00002299', + '\\bigodot' : '\U00002a00', + '\\ominus' : '\U00002296', + '\\oslash' : '\U00002298', + '\\dots' : '\U00002026', + '\\cdots' : '\U000022ef', + '\\sum' : '\U00002211', + '\\prod' : '\U0000220f', + '\\coprod' : '\U00002210', + '\\infty' : '\U0000221e', + '\\int' : '\U0000222b', + '\\oint' : '\U0000222e', + '\\clubsuit' : '\U00002663', + '\\diamondsuit' : '\U00002662', + '\\heartsuit' : '\U00002661', + '\\spadesuit' : '\U00002660', + '\\aleph' : '\U00002135', + '\\emptyset' : '\U00002205', + '\\nabla' : '\U00002207', + '\\partial' : '\U00002202', + '\\flat' : '\U0000266d', + '\\natural' : '\U0000266e', + '\\sharp' : '\U0000266f', + '\\angle' : '\U00002220', + '\\copyright' : '\U000000a9', + '\\textregistered' : '\U000000ae', + '\\textonequarter' : '\U000000bc', + '\\textonehalf' : '\U000000bd', + '\\textthreequarters' : '\U000000be', + '\\textordfeminine' : '\U000000aa', + '\\textordmasculine' : '\U000000ba', + '\\euro' : '\U000020ac', + '\\pounds' : '\U000000a3', + '\\yen' : '\U000000a5', + '\\textcent' : '\U000000a2', + '\\textcurrency' : '\U000000a4', + '\\textdegree' : '\U000000b0', + } + + isabelle_symbols = { + '\\<zero>' : '\U0001d7ec', + '\\<one>' : '\U0001d7ed', + '\\<two>' : '\U0001d7ee', + '\\<three>' : '\U0001d7ef', + '\\<four>' : '\U0001d7f0', + '\\<five>' : '\U0001d7f1', + '\\<six>' : '\U0001d7f2', + '\\<seven>' : '\U0001d7f3', + '\\<eight>' : '\U0001d7f4', + '\\<nine>' : '\U0001d7f5', + '\\<A>' : '\U0001d49c', + '\\<B>' : '\U0000212c', + '\\<C>' : '\U0001d49e', + '\\<D>' : '\U0001d49f', + '\\<E>' : '\U00002130', + '\\<F>' : '\U00002131', + '\\<G>' : '\U0001d4a2', + '\\<H>' : '\U0000210b', + '\\<I>' : '\U00002110', + '\\<J>' : '\U0001d4a5', + '\\<K>' : '\U0001d4a6', + '\\<L>' : '\U00002112', + '\\<M>' : '\U00002133', + '\\<N>' : '\U0001d4a9', + '\\<O>' : '\U0001d4aa', + '\\<P>' : '\U0001d4ab', + '\\<Q>' : '\U0001d4ac', + '\\<R>' : '\U0000211b', + '\\<S>' : '\U0001d4ae', + '\\<T>' : '\U0001d4af', + '\\<U>' : '\U0001d4b0', + '\\<V>' : '\U0001d4b1', + '\\<W>' : '\U0001d4b2', + '\\<X>' : '\U0001d4b3', + '\\<Y>' : '\U0001d4b4', + '\\<Z>' : '\U0001d4b5', + '\\<a>' : '\U0001d5ba', + '\\<b>' : '\U0001d5bb', + '\\<c>' : '\U0001d5bc', + '\\<d>' : '\U0001d5bd', + '\\<e>' : '\U0001d5be', + '\\<f>' : '\U0001d5bf', + '\\<g>' : '\U0001d5c0', + '\\<h>' : '\U0001d5c1', + '\\<i>' : '\U0001d5c2', + '\\<j>' : '\U0001d5c3', + '\\<k>' : '\U0001d5c4', + '\\<l>' : '\U0001d5c5', + '\\<m>' : '\U0001d5c6', + '\\<n>' : '\U0001d5c7', + '\\<o>' : '\U0001d5c8', + '\\<p>' : '\U0001d5c9', + '\\<q>' : '\U0001d5ca', + '\\<r>' : '\U0001d5cb', + '\\<s>' : '\U0001d5cc', + '\\<t>' : '\U0001d5cd', + '\\<u>' : '\U0001d5ce', + '\\<v>' : '\U0001d5cf', + '\\<w>' : '\U0001d5d0', + '\\<x>' : '\U0001d5d1', + '\\<y>' : '\U0001d5d2', + '\\<z>' : '\U0001d5d3', + '\\<AA>' : '\U0001d504', + '\\<BB>' : '\U0001d505', + '\\<CC>' : '\U0000212d', + '\\<DD>' : '\U0001d507', + '\\<EE>' : '\U0001d508', + '\\<FF>' : '\U0001d509', + '\\<GG>' : '\U0001d50a', + '\\<HH>' : '\U0000210c', + '\\<II>' : '\U00002111', + '\\<JJ>' : '\U0001d50d', + '\\<KK>' : '\U0001d50e', + '\\<LL>' : '\U0001d50f', + '\\<MM>' : '\U0001d510', + '\\<NN>' : '\U0001d511', + '\\<OO>' : '\U0001d512', + '\\<PP>' : '\U0001d513', + '\\<QQ>' : '\U0001d514', + '\\<RR>' : '\U0000211c', + '\\<SS>' : '\U0001d516', + '\\<TT>' : '\U0001d517', + '\\<UU>' : '\U0001d518', + '\\<VV>' : '\U0001d519', + '\\<WW>' : '\U0001d51a', + '\\<XX>' : '\U0001d51b', + '\\<YY>' : '\U0001d51c', + '\\<ZZ>' : '\U00002128', + '\\<aa>' : '\U0001d51e', + '\\<bb>' : '\U0001d51f', + '\\<cc>' : '\U0001d520', + '\\<dd>' : '\U0001d521', + '\\<ee>' : '\U0001d522', + '\\<ff>' : '\U0001d523', + '\\<gg>' : '\U0001d524', + '\\<hh>' : '\U0001d525', + '\\<ii>' : '\U0001d526', + '\\<jj>' : '\U0001d527', + '\\<kk>' : '\U0001d528', + '\\<ll>' : '\U0001d529', + '\\<mm>' : '\U0001d52a', + '\\<nn>' : '\U0001d52b', + '\\<oo>' : '\U0001d52c', + '\\<pp>' : '\U0001d52d', + '\\<qq>' : '\U0001d52e', + '\\<rr>' : '\U0001d52f', + '\\<ss>' : '\U0001d530', + '\\<tt>' : '\U0001d531', + '\\<uu>' : '\U0001d532', + '\\<vv>' : '\U0001d533', + '\\<ww>' : '\U0001d534', + '\\<xx>' : '\U0001d535', + '\\<yy>' : '\U0001d536', + '\\<zz>' : '\U0001d537', + '\\<alpha>' : '\U000003b1', + '\\<beta>' : '\U000003b2', + '\\<gamma>' : '\U000003b3', + '\\<delta>' : '\U000003b4', + '\\<epsilon>' : '\U000003b5', + '\\<zeta>' : '\U000003b6', + '\\<eta>' : '\U000003b7', + '\\<theta>' : '\U000003b8', + '\\<iota>' : '\U000003b9', + '\\<kappa>' : '\U000003ba', + '\\<lambda>' : '\U000003bb', + '\\<mu>' : '\U000003bc', + '\\<nu>' : '\U000003bd', + '\\<xi>' : '\U000003be', + '\\<pi>' : '\U000003c0', + '\\<rho>' : '\U000003c1', + '\\<sigma>' : '\U000003c3', + '\\<tau>' : '\U000003c4', + '\\<upsilon>' : '\U000003c5', + '\\<phi>' : '\U000003c6', + '\\<chi>' : '\U000003c7', + '\\<psi>' : '\U000003c8', + '\\<omega>' : '\U000003c9', + '\\<Gamma>' : '\U00000393', + '\\<Delta>' : '\U00000394', + '\\<Theta>' : '\U00000398', + '\\<Lambda>' : '\U0000039b', + '\\<Xi>' : '\U0000039e', + '\\<Pi>' : '\U000003a0', + '\\<Sigma>' : '\U000003a3', + '\\<Upsilon>' : '\U000003a5', + '\\<Phi>' : '\U000003a6', + '\\<Psi>' : '\U000003a8', + '\\<Omega>' : '\U000003a9', + '\\<bool>' : '\U0001d539', + '\\<complex>' : '\U00002102', + '\\<nat>' : '\U00002115', + '\\<rat>' : '\U0000211a', + '\\<real>' : '\U0000211d', + '\\<int>' : '\U00002124', + '\\<leftarrow>' : '\U00002190', + '\\<longleftarrow>' : '\U000027f5', + '\\<rightarrow>' : '\U00002192', + '\\<longrightarrow>' : '\U000027f6', + '\\<Leftarrow>' : '\U000021d0', + '\\<Longleftarrow>' : '\U000027f8', + '\\<Rightarrow>' : '\U000021d2', + '\\<Longrightarrow>' : '\U000027f9', + '\\<leftrightarrow>' : '\U00002194', + '\\<longleftrightarrow>' : '\U000027f7', + '\\<Leftrightarrow>' : '\U000021d4', + '\\<Longleftrightarrow>' : '\U000027fa', + '\\<mapsto>' : '\U000021a6', + '\\<longmapsto>' : '\U000027fc', + '\\<midarrow>' : '\U00002500', + '\\<Midarrow>' : '\U00002550', + '\\<hookleftarrow>' : '\U000021a9', + '\\<hookrightarrow>' : '\U000021aa', + '\\<leftharpoondown>' : '\U000021bd', + '\\<rightharpoondown>' : '\U000021c1', + '\\<leftharpoonup>' : '\U000021bc', + '\\<rightharpoonup>' : '\U000021c0', + '\\<rightleftharpoons>' : '\U000021cc', + '\\<leadsto>' : '\U0000219d', + '\\<downharpoonleft>' : '\U000021c3', + '\\<downharpoonright>' : '\U000021c2', + '\\<upharpoonleft>' : '\U000021bf', + '\\<upharpoonright>' : '\U000021be', + '\\<restriction>' : '\U000021be', + '\\<Colon>' : '\U00002237', + '\\<up>' : '\U00002191', + '\\<Up>' : '\U000021d1', + '\\<down>' : '\U00002193', + '\\<Down>' : '\U000021d3', + '\\<updown>' : '\U00002195', + '\\<Updown>' : '\U000021d5', + '\\<langle>' : '\U000027e8', + '\\<rangle>' : '\U000027e9', + '\\<lceil>' : '\U00002308', + '\\<rceil>' : '\U00002309', + '\\<lfloor>' : '\U0000230a', + '\\<rfloor>' : '\U0000230b', + '\\<lparr>' : '\U00002987', + '\\<rparr>' : '\U00002988', + '\\<lbrakk>' : '\U000027e6', + '\\<rbrakk>' : '\U000027e7', + '\\<lbrace>' : '\U00002983', + '\\<rbrace>' : '\U00002984', + '\\<guillemotleft>' : '\U000000ab', + '\\<guillemotright>' : '\U000000bb', + '\\<bottom>' : '\U000022a5', + '\\<top>' : '\U000022a4', + '\\<and>' : '\U00002227', + '\\<And>' : '\U000022c0', + '\\<or>' : '\U00002228', + '\\<Or>' : '\U000022c1', + '\\<forall>' : '\U00002200', + '\\<exists>' : '\U00002203', + '\\<nexists>' : '\U00002204', + '\\<not>' : '\U000000ac', + '\\<box>' : '\U000025a1', + '\\<diamond>' : '\U000025c7', + '\\<turnstile>' : '\U000022a2', + '\\<Turnstile>' : '\U000022a8', + '\\<tturnstile>' : '\U000022a9', + '\\<TTurnstile>' : '\U000022ab', + '\\<stileturn>' : '\U000022a3', + '\\<surd>' : '\U0000221a', + '\\<le>' : '\U00002264', + '\\<ge>' : '\U00002265', + '\\<lless>' : '\U0000226a', + '\\<ggreater>' : '\U0000226b', + '\\<lesssim>' : '\U00002272', + '\\<greatersim>' : '\U00002273', + '\\<lessapprox>' : '\U00002a85', + '\\<greaterapprox>' : '\U00002a86', + '\\<in>' : '\U00002208', + '\\<notin>' : '\U00002209', + '\\<subset>' : '\U00002282', + '\\<supset>' : '\U00002283', + '\\<subseteq>' : '\U00002286', + '\\<supseteq>' : '\U00002287', + '\\<sqsubset>' : '\U0000228f', + '\\<sqsupset>' : '\U00002290', + '\\<sqsubseteq>' : '\U00002291', + '\\<sqsupseteq>' : '\U00002292', + '\\<inter>' : '\U00002229', + '\\<Inter>' : '\U000022c2', + '\\<union>' : '\U0000222a', + '\\<Union>' : '\U000022c3', + '\\<squnion>' : '\U00002294', + '\\<Squnion>' : '\U00002a06', + '\\<sqinter>' : '\U00002293', + '\\<Sqinter>' : '\U00002a05', + '\\<setminus>' : '\U00002216', + '\\<propto>' : '\U0000221d', + '\\<uplus>' : '\U0000228e', + '\\<Uplus>' : '\U00002a04', + '\\<noteq>' : '\U00002260', + '\\<sim>' : '\U0000223c', + '\\<doteq>' : '\U00002250', + '\\<simeq>' : '\U00002243', + '\\<approx>' : '\U00002248', + '\\<asymp>' : '\U0000224d', + '\\<cong>' : '\U00002245', + '\\<smile>' : '\U00002323', + '\\<equiv>' : '\U00002261', + '\\<frown>' : '\U00002322', + '\\<Join>' : '\U000022c8', + '\\<bowtie>' : '\U00002a1d', + '\\<prec>' : '\U0000227a', + '\\<succ>' : '\U0000227b', + '\\<preceq>' : '\U0000227c', + '\\<succeq>' : '\U0000227d', + '\\<parallel>' : '\U00002225', + '\\<bar>' : '\U000000a6', + '\\<plusminus>' : '\U000000b1', + '\\<minusplus>' : '\U00002213', + '\\<times>' : '\U000000d7', + '\\<div>' : '\U000000f7', + '\\<cdot>' : '\U000022c5', + '\\<star>' : '\U000022c6', + '\\<bullet>' : '\U00002219', + '\\<circ>' : '\U00002218', + '\\<dagger>' : '\U00002020', + '\\<ddagger>' : '\U00002021', + '\\<lhd>' : '\U000022b2', + '\\<rhd>' : '\U000022b3', + '\\<unlhd>' : '\U000022b4', + '\\<unrhd>' : '\U000022b5', + '\\<triangleleft>' : '\U000025c3', + '\\<triangleright>' : '\U000025b9', + '\\<triangle>' : '\U000025b3', + '\\<triangleq>' : '\U0000225c', + '\\<oplus>' : '\U00002295', + '\\<Oplus>' : '\U00002a01', + '\\<otimes>' : '\U00002297', + '\\<Otimes>' : '\U00002a02', + '\\<odot>' : '\U00002299', + '\\<Odot>' : '\U00002a00', + '\\<ominus>' : '\U00002296', + '\\<oslash>' : '\U00002298', + '\\<dots>' : '\U00002026', + '\\<cdots>' : '\U000022ef', + '\\<Sum>' : '\U00002211', + '\\<Prod>' : '\U0000220f', + '\\<Coprod>' : '\U00002210', + '\\<infinity>' : '\U0000221e', + '\\<integral>' : '\U0000222b', + '\\<ointegral>' : '\U0000222e', + '\\<clubsuit>' : '\U00002663', + '\\<diamondsuit>' : '\U00002662', + '\\<heartsuit>' : '\U00002661', + '\\<spadesuit>' : '\U00002660', + '\\<aleph>' : '\U00002135', + '\\<emptyset>' : '\U00002205', + '\\<nabla>' : '\U00002207', + '\\<partial>' : '\U00002202', + '\\<flat>' : '\U0000266d', + '\\<natural>' : '\U0000266e', + '\\<sharp>' : '\U0000266f', + '\\<angle>' : '\U00002220', + '\\<copyright>' : '\U000000a9', + '\\<registered>' : '\U000000ae', + '\\<hyphen>' : '\U000000ad', + '\\<inverse>' : '\U000000af', + '\\<onequarter>' : '\U000000bc', + '\\<onehalf>' : '\U000000bd', + '\\<threequarters>' : '\U000000be', + '\\<ordfeminine>' : '\U000000aa', + '\\<ordmasculine>' : '\U000000ba', + '\\<section>' : '\U000000a7', + '\\<paragraph>' : '\U000000b6', + '\\<exclamdown>' : '\U000000a1', + '\\<questiondown>' : '\U000000bf', + '\\<euro>' : '\U000020ac', + '\\<pounds>' : '\U000000a3', + '\\<yen>' : '\U000000a5', + '\\<cent>' : '\U000000a2', + '\\<currency>' : '\U000000a4', + '\\<degree>' : '\U000000b0', + '\\<amalg>' : '\U00002a3f', + '\\<mho>' : '\U00002127', + '\\<lozenge>' : '\U000025ca', + '\\<wp>' : '\U00002118', + '\\<wrong>' : '\U00002240', + '\\<struct>' : '\U000022c4', + '\\<acute>' : '\U000000b4', + '\\<index>' : '\U00000131', + '\\<dieresis>' : '\U000000a8', + '\\<cedilla>' : '\U000000b8', + '\\<hungarumlaut>' : '\U000002dd', + '\\<some>' : '\U000003f5', + '\\<newline>' : '\U000023ce', + '\\<open>' : '\U00002039', + '\\<close>' : '\U0000203a', + '\\<here>' : '\U00002302', + '\\<^sub>' : '\U000021e9', + '\\<^sup>' : '\U000021e7', + '\\<^bold>' : '\U00002759', + '\\<^bsub>' : '\U000021d8', + '\\<^esub>' : '\U000021d9', + '\\<^bsup>' : '\U000021d7', + '\\<^esup>' : '\U000021d6', + } + + lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols} + + def __init__(self, **options): + Filter.__init__(self, **options) + lang = get_choice_opt(options, 'lang', + ['isabelle', 'latex'], 'isabelle') + self.symbols = self.lang_map[lang] + + def filter(self, lexer, stream): + for ttype, value in stream: + if value in self.symbols: + yield ttype, self.symbols[value] + else: + yield ttype, value + + +class KeywordCaseFilter(Filter): + """Convert keywords to lowercase or uppercase or capitalize them, which + means first letter uppercase, rest lowercase. + + This can be useful e.g. if you highlight Pascal code and want to adapt the + code to your styleguide. + + Options accepted: + + `case` : string + The casing to convert keywords to. Must be one of ``'lower'``, + ``'upper'`` or ``'capitalize'``. The default is ``'lower'``. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + case = get_choice_opt(options, 'case', + ['lower', 'upper', 'capitalize'], 'lower') + self.convert = getattr(str, case) + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Keyword: + yield ttype, self.convert(value) + else: + yield ttype, value + + +class NameHighlightFilter(Filter): + """Highlight a normal Name (and Name.*) token with a different token type. + + Example:: + + filter = NameHighlightFilter( + names=['foo', 'bar', 'baz'], + tokentype=Name.Function, + ) + + This would highlight the names "foo", "bar" and "baz" + as functions. `Name.Function` is the default token type. + + Options accepted: + + `names` : list of strings + A list of names that should be given the different token type. + There is no default. + `tokentype` : TokenType or string + A token type or a string containing a token type name that is + used for highlighting the strings in `names`. The default is + `Name.Function`. + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.names = set(get_list_opt(options, 'names', [])) + tokentype = options.get('tokentype') + if tokentype: + self.tokentype = string_to_tokentype(tokentype) + else: + self.tokentype = Name.Function + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype in Name and value in self.names: + yield self.tokentype, value + else: + yield ttype, value + + +class ErrorToken(Exception): + pass + + +class RaiseOnErrorTokenFilter(Filter): + """Raise an exception when the lexer generates an error token. + + Options accepted: + + `excclass` : Exception class + The exception class to raise. + The default is `pygments.filters.ErrorToken`. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + self.exception = options.get('excclass', ErrorToken) + try: + # issubclass() will raise TypeError if first argument is not a class + if not issubclass(self.exception, Exception): + raise TypeError + except TypeError: + raise OptionError('excclass option is not an exception class') + + def filter(self, lexer, stream): + for ttype, value in stream: + if ttype is Error: + raise self.exception(value) + yield ttype, value + + +class VisibleWhitespaceFilter(Filter): + """Convert tabs, newlines and/or spaces to visible characters. + + Options accepted: + + `spaces` : string or bool + If this is a one-character string, spaces will be replaces by this string. + If it is another true value, spaces will be replaced by ``·`` (unicode + MIDDLE DOT). If it is a false value, spaces will not be replaced. The + default is ``False``. + `tabs` : string or bool + The same as for `spaces`, but the default replacement character is ``»`` + (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value + is ``False``. Note: this will not work if the `tabsize` option for the + lexer is nonzero, as tabs will already have been expanded then. + `tabsize` : int + If tabs are to be replaced by this filter (see the `tabs` option), this + is the total number of characters that a tab should be expanded to. + The default is ``8``. + `newlines` : string or bool + The same as for `spaces`, but the default replacement character is ``¶`` + (unicode PILCROW SIGN). The default value is ``False``. + `wstokentype` : bool + If true, give whitespace the special `Whitespace` token type. This allows + styling the visible whitespace differently (e.g. greyed out), but it can + disrupt background colors. The default is ``True``. + + .. versionadded:: 0.8 + """ + + def __init__(self, **options): + Filter.__init__(self, **options) + for name, default in [('spaces', '·'), + ('tabs', '»'), + ('newlines', '¶')]: + opt = options.get(name, False) + if isinstance(opt, str) and len(opt) == 1: + setattr(self, name, opt) + else: + setattr(self, name, (opt and default or '')) + tabsize = get_int_opt(options, 'tabsize', 8) + if self.tabs: + self.tabs += ' ' * (tabsize - 1) + if self.newlines: + self.newlines += '\n' + self.wstt = get_bool_opt(options, 'wstokentype', True) + + def filter(self, lexer, stream): + if self.wstt: + spaces = self.spaces or ' ' + tabs = self.tabs or '\t' + newlines = self.newlines or '\n' + regex = re.compile(r'\s') + + def replacefunc(wschar): + if wschar == ' ': + return spaces + elif wschar == '\t': + return tabs + elif wschar == '\n': + return newlines + return wschar + + for ttype, value in stream: + yield from _replace_special(ttype, value, regex, Whitespace, + replacefunc) + else: + spaces, tabs, newlines = self.spaces, self.tabs, self.newlines + # simpler processing + for ttype, value in stream: + if spaces: + value = value.replace(' ', spaces) + if tabs: + value = value.replace('\t', tabs) + if newlines: + value = value.replace('\n', newlines) + yield ttype, value + + +class GobbleFilter(Filter): + """Gobbles source code lines (eats initial characters). + + This filter drops the first ``n`` characters off every line of code. This + may be useful when the source code fed to the lexer is indented by a fixed + amount of space that isn't desired in the output. + + Options accepted: + + `n` : int + The number of characters to gobble. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + self.n = get_int_opt(options, 'n', 0) + + def gobble(self, value, left): + if left < len(value): + return value[left:], 0 + else: + return '', left - len(value) + + def filter(self, lexer, stream): + n = self.n + left = n # How many characters left to gobble. + for ttype, value in stream: + # Remove ``left`` tokens from first line, ``n`` from all others. + parts = value.split('\n') + (parts[0], left) = self.gobble(parts[0], left) + for i in range(1, len(parts)): + (parts[i], left) = self.gobble(parts[i], n) + value = '\n'.join(parts) + + if value != '': + yield ttype, value + + +class TokenMergeFilter(Filter): + """Merges consecutive tokens with the same token type in the output + stream of a lexer. + + .. versionadded:: 1.2 + """ + def __init__(self, **options): + Filter.__init__(self, **options) + + def filter(self, lexer, stream): + current_type = None + current_value = None + for ttype, value in stream: + if ttype is current_type: + current_value += value + else: + if current_type is not None: + yield current_type, current_value + current_type = ttype + current_value = value + if current_type is not None: + yield current_type, current_value + + +FILTERS = { + 'codetagify': CodeTagFilter, + 'keywordcase': KeywordCaseFilter, + 'highlight': NameHighlightFilter, + 'raiseonerror': RaiseOnErrorTokenFilter, + 'whitespace': VisibleWhitespaceFilter, + 'gobble': GobbleFilter, + 'tokenmerge': TokenMergeFilter, + 'symbols': SymbolFilter, +} diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..2745141 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/filters/__pycache__/__init__.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py new file mode 100644 index 0000000..3ca4892 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatter.py @@ -0,0 +1,124 @@ +""" + pygments.formatter + ~~~~~~~~~~~~~~~~~~ + + Base formatter class. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import codecs + +from pip._vendor.pygments.util import get_bool_opt +from pip._vendor.pygments.styles import get_style_by_name + +__all__ = ['Formatter'] + + +def _lookup_style(style): + if isinstance(style, str): + return get_style_by_name(style) + return style + + +class Formatter: + """ + Converts a token stream to text. + + Formatters should have attributes to help selecting them. These + are similar to the corresponding :class:`~pygments.lexer.Lexer` + attributes. + + .. autoattribute:: name + :no-value: + + .. autoattribute:: aliases + :no-value: + + .. autoattribute:: filenames + :no-value: + + You can pass options as keyword arguments to the constructor. + All formatters accept these basic options: + + ``style`` + The style to use, can be a string or a Style subclass + (default: "default"). Not used by e.g. the + TerminalFormatter. + ``full`` + Tells the formatter to output a "full" document, i.e. + a complete self-contained document. This doesn't have + any effect for some formatters (default: false). + ``title`` + If ``full`` is true, the title that should be used to + caption the document (default: ''). + ``encoding`` + If given, must be an encoding name. This will be used to + convert the Unicode token strings to byte strings in the + output. If it is "" or None, Unicode strings will be written + to the output file, which most file-like objects do not + support (default: None). + ``outencoding`` + Overrides ``encoding`` if given. + + """ + + #: Full name for the formatter, in human-readable form. + name = None + + #: A list of short, unique identifiers that can be used to lookup + #: the formatter from a list, e.g. using :func:`.get_formatter_by_name()`. + aliases = [] + + #: A list of fnmatch patterns that match filenames for which this + #: formatter can produce output. The patterns in this list should be unique + #: among all formatters. + filenames = [] + + #: If True, this formatter outputs Unicode strings when no encoding + #: option is given. + unicodeoutput = True + + def __init__(self, **options): + """ + As with lexers, this constructor takes arbitrary optional arguments, + and if you override it, you should first process your own options, then + call the base class implementation. + """ + self.style = _lookup_style(options.get('style', 'default')) + self.full = get_bool_opt(options, 'full', False) + self.title = options.get('title', '') + self.encoding = options.get('encoding', None) or None + if self.encoding in ('guess', 'chardet'): + # can happen for e.g. pygmentize -O encoding=guess + self.encoding = 'utf-8' + self.encoding = options.get('outencoding') or self.encoding + self.options = options + + def get_style_defs(self, arg=''): + """ + This method must return statements or declarations suitable to define + the current style for subsequent highlighted text (e.g. CSS classes + in the `HTMLFormatter`). + + The optional argument `arg` can be used to modify the generation and + is formatter dependent (it is standardized because it can be given on + the command line). + + This method is called by the ``-S`` :doc:`command-line option <cmdline>`, + the `arg` is then given by the ``-a`` option. + """ + return '' + + def format(self, tokensource, outfile): + """ + This method must format the tokens from the `tokensource` iterable and + write the formatted version to the file object `outfile`. + + Formatter options can control how exactly the tokens are converted. + """ + if self.encoding: + # wrap the outfile in a StreamWriter + outfile = codecs.lookup(self.encoding)[3](outfile) + return self.format_unencoded(tokensource, outfile) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py new file mode 100644 index 0000000..39db842 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__init__.py @@ -0,0 +1,158 @@ +""" + pygments.formatters + ~~~~~~~~~~~~~~~~~~~ + + Pygments formatters. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import types +import fnmatch +from os.path import basename + +from pip._vendor.pygments.formatters._mapping import FORMATTERS +from pip._vendor.pygments.plugin import find_plugin_formatters +from pip._vendor.pygments.util import ClassNotFound + +__all__ = ['get_formatter_by_name', 'get_formatter_for_filename', + 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS) + +_formatter_cache = {} # classes by name +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + + +def _load_formatters(module_name): + """Load a formatter (and all others in the module too).""" + mod = __import__(module_name, None, None, ['__all__']) + for formatter_name in mod.__all__: + cls = getattr(mod, formatter_name) + _formatter_cache[cls.name] = cls + + +def get_all_formatters(): + """Return a generator for all formatter classes.""" + # NB: this returns formatter classes, not info like get_all_lexers(). + for info in FORMATTERS.values(): + if info[1] not in _formatter_cache: + _load_formatters(info[0]) + yield _formatter_cache[info[1]] + for _, formatter in find_plugin_formatters(): + yield formatter + + +def find_formatter_class(alias): + """Lookup a formatter by alias. + + Returns None if not found. + """ + for module_name, name, aliases, _, _ in FORMATTERS.values(): + if alias in aliases: + if name not in _formatter_cache: + _load_formatters(module_name) + return _formatter_cache[name] + for _, cls in find_plugin_formatters(): + if alias in cls.aliases: + return cls + + +def get_formatter_by_name(_alias, **options): + """ + Return an instance of a :class:`.Formatter` subclass that has `alias` in its + aliases list. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter with that + alias is found. + """ + cls = find_formatter_class(_alias) + if cls is None: + raise ClassNotFound("no formatter found for name %r" % _alias) + return cls(**options) + + +def load_formatter_from_file(filename, formattername="CustomFormatter", **options): + """ + Return a `Formatter` subclass instance loaded from the provided file, relative + to the current directory. + + The file is expected to contain a Formatter class named ``formattername`` + (by default, CustomFormatter). Users should be very careful with the input, because + this method is equivalent to running ``eval()`` on the input file. The formatter is + given the `options` at its instantiation. + + :exc:`pygments.util.ClassNotFound` is raised if there are any errors loading + the formatter. + + .. versionadded:: 2.2 + """ + try: + # This empty dict will contain the namespace for the exec'd file + custom_namespace = {} + with open(filename, 'rb') as f: + exec(f.read(), custom_namespace) + # Retrieve the class `formattername` from that namespace + if formattername not in custom_namespace: + raise ClassNotFound('no valid %s class found in %s' % + (formattername, filename)) + formatter_class = custom_namespace[formattername] + # And finally instantiate it with the options + return formatter_class(**options) + except OSError as err: + raise ClassNotFound('cannot read %s: %s' % (filename, err)) + except ClassNotFound: + raise + except Exception as err: + raise ClassNotFound('error when loading custom formatter: %s' % err) + + +def get_formatter_for_filename(fn, **options): + """ + Return a :class:`.Formatter` subclass instance that has a filename pattern + matching `fn`. The formatter is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no formatter for that filename + is found. + """ + fn = basename(fn) + for modname, name, _, filenames, _ in FORMATTERS.values(): + for filename in filenames: + if _fn_matches(fn, filename): + if name not in _formatter_cache: + _load_formatters(modname) + return _formatter_cache[name](**options) + for cls in find_plugin_formatters(): + for filename in cls.filenames: + if _fn_matches(fn, filename): + return cls(**options) + raise ClassNotFound("no formatter found for file name %r" % fn) + + +class _automodule(types.ModuleType): + """Automatically import formatters.""" + + def __getattr__(self, name): + info = FORMATTERS.get(name) + if info: + _load_formatters(info[0]) + cls = _formatter_cache[info[1]] + setattr(self, name, cls) + return cls + raise AttributeError(name) + + +oldmod = sys.modules[__name__] +newmod = _automodule(__name__) +newmod.__dict__.update(oldmod.__dict__) +sys.modules[__name__] = newmod +del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..09ad310 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/__init__.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..58e5651 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/_mapping.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..54122d9 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/bbcode.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/groff.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/groff.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..df140ea --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/groff.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/html.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/html.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..35e5f33 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/html.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/img.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/img.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..0585426 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/img.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/irc.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/irc.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..54f0ff1 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/irc.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/latex.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/latex.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..a1ad73c --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/latex.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/other.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/other.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..a86645e --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/other.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..6a9968d --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/pangomarkup.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..8dbc539 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/rtf.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/svg.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/svg.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..82d789e --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/svg.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..7d0ea79 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/terminal.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..3ff54b3 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/__pycache__/terminal256.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/_mapping.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/_mapping.py new file mode 100644 index 0000000..72ca840 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/_mapping.py @@ -0,0 +1,23 @@ +# Automatically generated by scripts/gen_mapfiles.py. +# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead. + +FORMATTERS = { + 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'), + 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'), + 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). The ``<div>``'s CSS class can be set by the `cssclass` option."), + 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'), + 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'), + 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'), + 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'), + 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'), + 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'), + 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'), + 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'), + 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'), + 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'), +} diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py new file mode 100644 index 0000000..c4db8f4 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/bbcode.py @@ -0,0 +1,108 @@ +""" + pygments.formatters.bbcode + ~~~~~~~~~~~~~~~~~~~~~~~~~~ + + BBcode formatter. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.util import get_bool_opt + +__all__ = ['BBCodeFormatter'] + + +class BBCodeFormatter(Formatter): + """ + Format tokens with BBcodes. These formatting codes are used by many + bulletin boards, so you can highlight your sourcecode with pygments before + posting it there. + + This formatter has no support for background colors and borders, as there + are no common BBcode tags for that. + + Some board systems (e.g. phpBB) don't support colors in their [code] tag, + so you can't use the highlighting together with that tag. + Text in a [code] tag usually is shown with a monospace font (which this + formatter can do with the ``monofont`` option) and no spaces (which you + need for indentation) are removed. + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `codetag` + If set to true, put the output into ``[code]`` tags (default: + ``false``) + + `monofont` + If set to true, add a tag to show the code with a monospace font + (default: ``false``). + """ + name = 'BBCode' + aliases = ['bbcode', 'bb'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self._code = get_bool_opt(options, 'codetag', False) + self._mono = get_bool_opt(options, 'monofont', False) + + self.styles = {} + self._make_styles() + + def _make_styles(self): + for ttype, ndef in self.style: + start = end = '' + if ndef['color']: + start += '[color=#%s]' % ndef['color'] + end = '[/color]' + end + if ndef['bold']: + start += '[b]' + end = '[/b]' + end + if ndef['italic']: + start += '[i]' + end = '[/i]' + end + if ndef['underline']: + start += '[u]' + end = '[/u]' + end + # there are no common BBcodes for background-color and border + + self.styles[ttype] = start, end + + def format_unencoded(self, tokensource, outfile): + if self._code: + outfile.write('[code]') + if self._mono: + outfile.write('[font=monospace]') + + lastval = '' + lasttype = None + + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + if ttype == lasttype: + lastval += value + else: + if lastval: + start, end = self.styles[lasttype] + outfile.write(''.join((start, lastval, end))) + lastval = value + lasttype = ttype + + if lastval: + start, end = self.styles[lasttype] + outfile.write(''.join((start, lastval, end))) + + if self._mono: + outfile.write('[/font]') + if self._code: + outfile.write('[/code]') + if self._code or self._mono: + outfile.write('\n') diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py new file mode 100644 index 0000000..30a528e --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/groff.py @@ -0,0 +1,170 @@ +""" + pygments.formatters.groff + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for groff output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import math +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.util import get_bool_opt, get_int_opt + +__all__ = ['GroffFormatter'] + + +class GroffFormatter(Formatter): + """ + Format tokens with groff escapes to change their color and font style. + + .. versionadded:: 2.11 + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `monospaced` + If set to true, monospace font will be used (default: ``true``). + + `linenos` + If set to true, print the line numbers (default: ``false``). + + `wrap` + Wrap lines to the specified number of characters. Disabled if set to 0 + (default: ``0``). + """ + + name = 'groff' + aliases = ['groff','troff','roff'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + + self.monospaced = get_bool_opt(options, 'monospaced', True) + self.linenos = get_bool_opt(options, 'linenos', False) + self._lineno = 0 + self.wrap = get_int_opt(options, 'wrap', 0) + self._linelen = 0 + + self.styles = {} + self._make_styles() + + + def _make_styles(self): + regular = '\\f[CR]' if self.monospaced else '\\f[R]' + bold = '\\f[CB]' if self.monospaced else '\\f[B]' + italic = '\\f[CI]' if self.monospaced else '\\f[I]' + + for ttype, ndef in self.style: + start = end = '' + if ndef['color']: + start += '\\m[%s]' % ndef['color'] + end = '\\m[]' + end + if ndef['bold']: + start += bold + end = regular + end + if ndef['italic']: + start += italic + end = regular + end + if ndef['bgcolor']: + start += '\\M[%s]' % ndef['bgcolor'] + end = '\\M[]' + end + + self.styles[ttype] = start, end + + + def _define_colors(self, outfile): + colors = set() + for _, ndef in self.style: + if ndef['color'] is not None: + colors.add(ndef['color']) + + for color in sorted(colors): + outfile.write('.defcolor ' + color + ' rgb #' + color + '\n') + + + def _write_lineno(self, outfile): + self._lineno += 1 + outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno)) + + + def _wrap_line(self, line): + length = len(line.rstrip('\n')) + space = ' ' if self.linenos else '' + newline = '' + + if length > self.wrap: + for i in range(0, math.floor(length / self.wrap)): + chunk = line[i*self.wrap:i*self.wrap+self.wrap] + newline += (chunk + '\n' + space) + remainder = length % self.wrap + if remainder > 0: + newline += line[-remainder-1:] + self._linelen = remainder + elif self._linelen + length > self.wrap: + newline = ('\n' + space) + line + self._linelen = length + else: + newline = line + self._linelen += length + + return newline + + + def _escape_chars(self, text): + text = text.replace('\\', '\\[u005C]'). \ + replace('.', '\\[char46]'). \ + replace('\'', '\\[u0027]'). \ + replace('`', '\\[u0060]'). \ + replace('~', '\\[u007E]') + copy = text + + for char in copy: + if len(char) != len(char.encode()): + uni = char.encode('unicode_escape') \ + .decode()[1:] \ + .replace('x', 'u00') \ + .upper() + text = text.replace(char, '\\[u' + uni[1:] + ']') + + return text + + + def format_unencoded(self, tokensource, outfile): + self._define_colors(outfile) + + outfile.write('.nf\n\\f[CR]\n') + + if self.linenos: + self._write_lineno(outfile) + + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + start, end = self.styles[ttype] + + for line in value.splitlines(True): + if self.wrap > 0: + line = self._wrap_line(line) + + if start and end: + text = self._escape_chars(line.rstrip('\n')) + if text != '': + outfile.write(''.join((start, text, end))) + else: + outfile.write(self._escape_chars(line.rstrip('\n'))) + + if line.endswith('\n'): + if self.linenos: + self._write_lineno(outfile) + self._linelen = 0 + else: + outfile.write('\n') + self._linelen = 0 + + outfile.write('\n.fi') diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/html.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/html.py new file mode 100644 index 0000000..931d7c3 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/html.py @@ -0,0 +1,989 @@ +""" + pygments.formatters.html + ~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for HTML output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import functools +import os +import sys +import os.path +from io import StringIO + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES +from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt + +try: + import ctags +except ImportError: + ctags = None + +__all__ = ['HtmlFormatter'] + + +_escape_html_table = { + ord('&'): '&', + ord('<'): '<', + ord('>'): '>', + ord('"'): '"', + ord("'"): ''', +} + + +def escape_html(text, table=_escape_html_table): + """Escape &, <, > as well as single and double quotes for HTML.""" + return text.translate(table) + + +def webify(color): + if color.startswith('calc') or color.startswith('var'): + return color + else: + return '#' + color + + +def _get_ttype_class(ttype): + fname = STANDARD_TYPES.get(ttype) + if fname: + return fname + aname = '' + while fname is None: + aname = '-' + ttype[-1] + aname + ttype = ttype.parent + fname = STANDARD_TYPES.get(ttype) + return fname + aname + + +CSSFILE_TEMPLATE = '''\ +/* +generated by Pygments <https://pygments.org/> +Copyright 2006-2023 by the Pygments team. +Licensed under the BSD license, see LICENSE for details. +*/ +%(styledefs)s +''' + +DOC_HEADER = '''\ +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" + "http://www.w3.org/TR/html4/strict.dtd"> +<!-- +generated by Pygments <https://pygments.org/> +Copyright 2006-2023 by the Pygments team. +Licensed under the BSD license, see LICENSE for details. +--> +<html> +<head> + <title>%(title)s</title> + <meta http-equiv="content-type" content="text/html; charset=%(encoding)s"> + <style type="text/css"> +''' + CSSFILE_TEMPLATE + ''' + </style> +</head> +<body> +<h2>%(title)s</h2> + +''' + +DOC_HEADER_EXTERNALCSS = '''\ +<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" + "http://www.w3.org/TR/html4/strict.dtd"> + +<html> +<head> + <title>%(title)s</title> + <meta http-equiv="content-type" content="text/html; charset=%(encoding)s"> + <link rel="stylesheet" href="%(cssfile)s" type="text/css"> +</head> +<body> +<h2>%(title)s</h2> + +''' + +DOC_FOOTER = '''\ +</body> +</html> +''' + + +class HtmlFormatter(Formatter): + r""" + Format tokens as HTML 4 ``<span>`` tags. By default, the content is enclosed + in a ``<pre>`` tag, itself wrapped in a ``<div>`` tag (but see the `nowrap` option). + The ``<div>``'s CSS class can be set by the `cssclass` option. + + If the `linenos` option is set to ``"table"``, the ``<pre>`` is + additionally wrapped inside a ``<table>`` which has one row and two + cells: one containing the line numbers and one containing the code. + Example: + + .. sourcecode:: html + + <div class="highlight" > + <table><tr> + <td class="linenos" title="click to toggle" + onclick="with (this.firstChild.style) + { display = (display == '') ? 'none' : '' }"> + <pre>1 + 2</pre> + </td> + <td class="code"> + <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar): + <span class="Ke">pass</span> + </pre> + </td> + </tr></table></div> + + (whitespace added to improve clarity). + + A list of lines can be specified using the `hl_lines` option to make these + lines highlighted (as of Pygments 0.11). + + With the `full` option, a complete HTML 4 document is output, including + the style definitions inside a ``<style>`` tag, or in a separate file if + the `cssfile` option is given. + + When `tagsfile` is set to the path of a ctags index file, it is used to + generate hyperlinks from names to their definition. You must enable + `lineanchors` and run ctags with the `-n` option for this to work. The + `python-ctags` module from PyPI must be installed to use this feature; + otherwise a `RuntimeError` will be raised. + + The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string + containing CSS rules for the CSS classes used by the formatter. The + argument `arg` can be used to specify additional CSS selectors that + are prepended to the classes. A call `fmter.get_style_defs('td .code')` + would result in the following CSS classes: + + .. sourcecode:: css + + td .code .kw { font-weight: bold; color: #00FF00 } + td .code .cm { color: #999999 } + ... + + If you have Pygments 0.6 or higher, you can also pass a list or tuple to the + `get_style_defs()` method to request multiple prefixes for the tokens: + + .. sourcecode:: python + + formatter.get_style_defs(['div.syntax pre', 'pre.syntax']) + + The output would then look like this: + + .. sourcecode:: css + + div.syntax pre .kw, + pre.syntax .kw { font-weight: bold; color: #00FF00 } + div.syntax pre .cm, + pre.syntax .cm { color: #999999 } + ... + + Additional options accepted: + + `nowrap` + If set to ``True``, don't add a ``<pre>`` and a ``<div>`` tag + around the tokens. This disables most other options (default: ``False``). + + `full` + Tells the formatter to output a "full" document, i.e. a complete + self-contained document (default: ``False``). + + `title` + If `full` is true, the title that should be used to caption the + document (default: ``''``). + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). This option has no effect if the `cssfile` + and `noclobber_cssfile` option are given and the file specified in + `cssfile` exists. + + `noclasses` + If set to true, token ``<span>`` tags (as well as line number elements) + will not use CSS classes, but inline styles. This is not recommended + for larger pieces of code since it increases output size by quite a bit + (default: ``False``). + + `classprefix` + Since the token types use relatively short class names, they may clash + with some of your own class names. In this case you can use the + `classprefix` option to give a string to prepend to all Pygments-generated + CSS class names for token types. + Note that this option also affects the output of `get_style_defs()`. + + `cssclass` + CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``). + If you set this option, the default selector for `get_style_defs()` + will be this class. + + .. versionadded:: 0.9 + If you select the ``'table'`` line numbers, the wrapping table will + have a CSS class of this string plus ``'table'``, the default is + accordingly ``'highlighttable'``. + + `cssstyles` + Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``). + + `prestyles` + Inline CSS styles for the ``<pre>`` tag (default: ``''``). + + .. versionadded:: 0.11 + + `cssfile` + If the `full` option is true and this option is given, it must be the + name of an external file. If the filename does not include an absolute + path, the file's path will be assumed to be relative to the main output + file's path, if the latter can be found. The stylesheet is then written + to this file instead of the HTML file. + + .. versionadded:: 0.6 + + `noclobber_cssfile` + If `cssfile` is given and the specified file exists, the css file will + not be overwritten. This allows the use of the `full` option in + combination with a user specified css file. Default is ``False``. + + .. versionadded:: 1.1 + + `linenos` + If set to ``'table'``, output line numbers as a table with two cells, + one containing the line numbers, the other the whole code. This is + copy-and-paste-friendly, but may cause alignment problems with some + browsers or fonts. If set to ``'inline'``, the line numbers will be + integrated in the ``<pre>`` tag that contains the code (that setting + is *new in Pygments 0.8*). + + For compatibility with Pygments 0.7 and earlier, every true value + except ``'inline'`` means the same as ``'table'`` (in particular, that + means also ``True``). + + The default value is ``False``, which means no line numbers at all. + + **Note:** with the default ("table") line number mechanism, the line + numbers and code can have different line heights in Internet Explorer + unless you give the enclosing ``<pre>`` tags an explicit ``line-height`` + CSS property (you get the default line spacing with ``line-height: + 125%``). + + `hl_lines` + Specify a list of lines to be highlighted. The line numbers are always + relative to the input (i.e. the first line is line 1) and are + independent of `linenostart`. + + .. versionadded:: 0.11 + + `linenostart` + The line number for the first line (default: ``1``). + + `linenostep` + If set to a number n > 1, only every nth line number is printed. + + `linenospecial` + If set to a number n > 0, every nth line number is given the CSS + class ``"special"`` (default: ``0``). + + `nobackground` + If set to ``True``, the formatter won't output the background color + for the wrapping element (this automatically defaults to ``False`` + when there is no wrapping element [eg: no argument for the + `get_syntax_defs` method given]) (default: ``False``). + + .. versionadded:: 0.6 + + `lineseparator` + This string is output between lines of code. It defaults to ``"\n"``, + which is enough to break a line inside ``<pre>`` tags, but you can + e.g. set it to ``"<br>"`` to get HTML line breaks. + + .. versionadded:: 0.7 + + `lineanchors` + If set to a nonempty string, e.g. ``foo``, the formatter will wrap each + output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``. + This allows easy linking to certain lines. + + .. versionadded:: 0.9 + + `linespans` + If set to a nonempty string, e.g. ``foo``, the formatter will wrap each + output line in a span tag with an ``id`` of ``foo-linenumber``. + This allows easy access to lines via javascript. + + .. versionadded:: 1.6 + + `anchorlinenos` + If set to `True`, will wrap line numbers in <a> tags. Used in + combination with `linenos` and `lineanchors`. + + `tagsfile` + If set to the path of a ctags file, wrap names in anchor tags that + link to their definitions. `lineanchors` should be used, and the + tags file should specify line numbers (see the `-n` option to ctags). + + .. versionadded:: 1.6 + + `tagurlformat` + A string formatting pattern used to generate links to ctags definitions. + Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`. + Defaults to an empty string, resulting in just `#prefix-number` links. + + .. versionadded:: 1.6 + + `filename` + A string used to generate a filename when rendering ``<pre>`` blocks, + for example if displaying source code. If `linenos` is set to + ``'table'`` then the filename will be rendered in an initial row + containing a single `<th>` which spans both columns. + + .. versionadded:: 2.1 + + `wrapcode` + Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended + by the HTML5 specification. + + .. versionadded:: 2.4 + + `debug_token_types` + Add ``title`` attributes to all token ``<span>`` tags that show the + name of the token. + + .. versionadded:: 2.10 + + + **Subclassing the HTML formatter** + + .. versionadded:: 0.7 + + The HTML formatter is now built in a way that allows easy subclassing, thus + customizing the output HTML code. The `format()` method calls + `self._format_lines()` which returns a generator that yields tuples of ``(1, + line)``, where the ``1`` indicates that the ``line`` is a line of the + formatted source code. + + If the `nowrap` option is set, the generator is the iterated over and the + resulting HTML is output. + + Otherwise, `format()` calls `self.wrap()`, which wraps the generator with + other generators. These may add some HTML code to the one generated by + `_format_lines()`, either by modifying the lines generated by the latter, + then yielding them again with ``(1, line)``, and/or by yielding other HTML + code before or after the lines, with ``(0, html)``. The distinction between + source lines and other code makes it possible to wrap the generator multiple + times. + + The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag. + + A custom `HtmlFormatter` subclass could look like this: + + .. sourcecode:: python + + class CodeHtmlFormatter(HtmlFormatter): + + def wrap(self, source, *, include_div): + return self._wrap_code(source) + + def _wrap_code(self, source): + yield 0, '<code>' + for i, t in source: + if i == 1: + # it's a line of formatted code + t += '<br>' + yield i, t + yield 0, '</code>' + + This results in wrapping the formatted lines with a ``<code>`` tag, where the + source lines are broken using ``<br>`` tags. + + After calling `wrap()`, the `format()` method also adds the "line numbers" + and/or "full document" wrappers if the respective options are set. Then, all + HTML yielded by the wrapped generator is output. + """ + + name = 'HTML' + aliases = ['html'] + filenames = ['*.html', '*.htm'] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self.title = self._decodeifneeded(self.title) + self.nowrap = get_bool_opt(options, 'nowrap', False) + self.noclasses = get_bool_opt(options, 'noclasses', False) + self.classprefix = options.get('classprefix', '') + self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight')) + self.cssstyles = self._decodeifneeded(options.get('cssstyles', '')) + self.prestyles = self._decodeifneeded(options.get('prestyles', '')) + self.cssfile = self._decodeifneeded(options.get('cssfile', '')) + self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False) + self.tagsfile = self._decodeifneeded(options.get('tagsfile', '')) + self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', '')) + self.filename = self._decodeifneeded(options.get('filename', '')) + self.wrapcode = get_bool_opt(options, 'wrapcode', False) + self.span_element_openers = {} + self.debug_token_types = get_bool_opt(options, 'debug_token_types', False) + + if self.tagsfile: + if not ctags: + raise RuntimeError('The "ctags" package must to be installed ' + 'to be able to use the "tagsfile" feature.') + self._ctags = ctags.CTags(self.tagsfile) + + linenos = options.get('linenos', False) + if linenos == 'inline': + self.linenos = 2 + elif linenos: + # compatibility with <= 0.7 + self.linenos = 1 + else: + self.linenos = 0 + self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) + self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) + self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0)) + self.nobackground = get_bool_opt(options, 'nobackground', False) + self.lineseparator = options.get('lineseparator', '\n') + self.lineanchors = options.get('lineanchors', '') + self.linespans = options.get('linespans', '') + self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False) + self.hl_lines = set() + for lineno in get_list_opt(options, 'hl_lines', []): + try: + self.hl_lines.add(int(lineno)) + except ValueError: + pass + + self._create_stylesheet() + + def _get_css_class(self, ttype): + """Return the css class of this token type prefixed with + the classprefix option.""" + ttypeclass = _get_ttype_class(ttype) + if ttypeclass: + return self.classprefix + ttypeclass + return '' + + def _get_css_classes(self, ttype): + """Return the CSS classes of this token type prefixed with the classprefix option.""" + cls = self._get_css_class(ttype) + while ttype not in STANDARD_TYPES: + ttype = ttype.parent + cls = self._get_css_class(ttype) + ' ' + cls + return cls or '' + + def _get_css_inline_styles(self, ttype): + """Return the inline CSS styles for this token type.""" + cclass = self.ttype2class.get(ttype) + while cclass is None: + ttype = ttype.parent + cclass = self.ttype2class.get(ttype) + return cclass or '' + + def _create_stylesheet(self): + t2c = self.ttype2class = {Token: ''} + c2s = self.class2style = {} + for ttype, ndef in self.style: + name = self._get_css_class(ttype) + style = '' + if ndef['color']: + style += 'color: %s; ' % webify(ndef['color']) + if ndef['bold']: + style += 'font-weight: bold; ' + if ndef['italic']: + style += 'font-style: italic; ' + if ndef['underline']: + style += 'text-decoration: underline; ' + if ndef['bgcolor']: + style += 'background-color: %s; ' % webify(ndef['bgcolor']) + if ndef['border']: + style += 'border: 1px solid %s; ' % webify(ndef['border']) + if style: + t2c[ttype] = name + # save len(ttype) to enable ordering the styles by + # hierarchy (necessary for CSS cascading rules!) + c2s[name] = (style[:-2], ttype, len(ttype)) + + def get_style_defs(self, arg=None): + """ + Return CSS style definitions for the classes produced by the current + highlighting style. ``arg`` can be a string or list of selectors to + insert before the token type classes. + """ + style_lines = [] + + style_lines.extend(self.get_linenos_style_defs()) + style_lines.extend(self.get_background_style_defs(arg)) + style_lines.extend(self.get_token_style_defs(arg)) + + return '\n'.join(style_lines) + + def get_token_style_defs(self, arg=None): + prefix = self.get_css_prefix(arg) + + styles = [ + (level, ttype, cls, style) + for cls, (style, ttype, level) in self.class2style.items() + if cls and style + ] + styles.sort() + + lines = [ + '%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:]) + for (level, ttype, cls, style) in styles + ] + + return lines + + def get_background_style_defs(self, arg=None): + prefix = self.get_css_prefix(arg) + bg_color = self.style.background_color + hl_color = self.style.highlight_color + + lines = [] + + if arg and not self.nobackground and bg_color is not None: + text_style = '' + if Text in self.ttype2class: + text_style = ' ' + self.class2style[self.ttype2class[Text]][0] + lines.insert( + 0, '%s{ background: %s;%s }' % ( + prefix(''), bg_color, text_style + ) + ) + if hl_color is not None: + lines.insert( + 0, '%s { background-color: %s }' % (prefix('hll'), hl_color) + ) + + return lines + + def get_linenos_style_defs(self): + lines = [ + 'pre { %s }' % self._pre_style, + 'td.linenos .normal { %s }' % self._linenos_style, + 'span.linenos { %s }' % self._linenos_style, + 'td.linenos .special { %s }' % self._linenos_special_style, + 'span.linenos.special { %s }' % self._linenos_special_style, + ] + + return lines + + def get_css_prefix(self, arg): + if arg is None: + arg = ('cssclass' in self.options and '.'+self.cssclass or '') + if isinstance(arg, str): + args = [arg] + else: + args = list(arg) + + def prefix(cls): + if cls: + cls = '.' + cls + tmp = [] + for arg in args: + tmp.append((arg and arg + ' ' or '') + cls) + return ', '.join(tmp) + + return prefix + + @property + def _pre_style(self): + return 'line-height: 125%;' + + @property + def _linenos_style(self): + return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % ( + self.style.line_number_color, + self.style.line_number_background_color + ) + + @property + def _linenos_special_style(self): + return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % ( + self.style.line_number_special_color, + self.style.line_number_special_background_color + ) + + def _decodeifneeded(self, value): + if isinstance(value, bytes): + if self.encoding: + return value.decode(self.encoding) + return value.decode() + return value + + def _wrap_full(self, inner, outfile): + if self.cssfile: + if os.path.isabs(self.cssfile): + # it's an absolute filename + cssfilename = self.cssfile + else: + try: + filename = outfile.name + if not filename or filename[0] == '<': + # pseudo files, e.g. name == '<fdopen>' + raise AttributeError + cssfilename = os.path.join(os.path.dirname(filename), + self.cssfile) + except AttributeError: + print('Note: Cannot determine output file name, ' + 'using current directory as base for the CSS file name', + file=sys.stderr) + cssfilename = self.cssfile + # write CSS file only if noclobber_cssfile isn't given as an option. + try: + if not os.path.exists(cssfilename) or not self.noclobber_cssfile: + with open(cssfilename, "w", encoding="utf-8") as cf: + cf.write(CSSFILE_TEMPLATE % + {'styledefs': self.get_style_defs('body')}) + except OSError as err: + err.strerror = 'Error writing CSS file: ' + err.strerror + raise + + yield 0, (DOC_HEADER_EXTERNALCSS % + dict(title=self.title, + cssfile=self.cssfile, + encoding=self.encoding)) + else: + yield 0, (DOC_HEADER % + dict(title=self.title, + styledefs=self.get_style_defs('body'), + encoding=self.encoding)) + + yield from inner + yield 0, DOC_FOOTER + + def _wrap_tablelinenos(self, inner): + dummyoutfile = StringIO() + lncount = 0 + for t, line in inner: + if t: + lncount += 1 + dummyoutfile.write(line) + + fl = self.linenostart + mw = len(str(lncount + fl - 1)) + sp = self.linenospecial + st = self.linenostep + anchor_name = self.lineanchors or self.linespans + aln = self.anchorlinenos + nocls = self.noclasses + + lines = [] + + for i in range(fl, fl+lncount): + print_line = i % st == 0 + special_line = sp and i % sp == 0 + + if print_line: + line = '%*d' % (mw, i) + if aln: + line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line) + else: + line = ' ' * mw + + if nocls: + if special_line: + style = ' style="%s"' % self._linenos_special_style + else: + style = ' style="%s"' % self._linenos_style + else: + if special_line: + style = ' class="special"' + else: + style = ' class="normal"' + + if style: + line = '<span%s>%s</span>' % (style, line) + + lines.append(line) + + ls = '\n'.join(lines) + + # If a filename was specified, we can't put it into the code table as it + # would misalign the line numbers. Hence we emit a separate row for it. + filename_tr = "" + if self.filename: + filename_tr = ( + '<tr><th colspan="2" class="filename">' + '<span class="filename">' + self.filename + '</span>' + '</th></tr>') + + # in case you wonder about the seemingly redundant <div> here: since the + # content in the other cell also is wrapped in a div, some browsers in + # some configurations seem to mess up the formatting... + yield 0, (f'<table class="{self.cssclass}table">' + filename_tr + + '<tr><td class="linenos"><div class="linenodiv"><pre>' + + ls + '</pre></div></td><td class="code">') + yield 0, '<div>' + yield 0, dummyoutfile.getvalue() + yield 0, '</div>' + yield 0, '</td></tr></table>' + + + def _wrap_inlinelinenos(self, inner): + # need a list of lines since we need the width of a single number :( + inner_lines = list(inner) + sp = self.linenospecial + st = self.linenostep + num = self.linenostart + mw = len(str(len(inner_lines) + num - 1)) + anchor_name = self.lineanchors or self.linespans + aln = self.anchorlinenos + nocls = self.noclasses + + for _, inner_line in inner_lines: + print_line = num % st == 0 + special_line = sp and num % sp == 0 + + if print_line: + line = '%*d' % (mw, num) + else: + line = ' ' * mw + + if nocls: + if special_line: + style = ' style="%s"' % self._linenos_special_style + else: + style = ' style="%s"' % self._linenos_style + else: + if special_line: + style = ' class="linenos special"' + else: + style = ' class="linenos"' + + if style: + linenos = '<span%s>%s</span>' % (style, line) + else: + linenos = line + + if aln: + yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) + + inner_line) + else: + yield 1, linenos + inner_line + num += 1 + + def _wrap_lineanchors(self, inner): + s = self.lineanchors + # subtract 1 since we have to increment i *before* yielding + i = self.linenostart - 1 + for t, line in inner: + if t: + i += 1 + href = "" if self.linenos else ' href="#%s-%d"' % (s, i) + yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line + else: + yield 0, line + + def _wrap_linespans(self, inner): + s = self.linespans + i = self.linenostart - 1 + for t, line in inner: + if t: + i += 1 + yield 1, '<span id="%s-%d">%s</span>' % (s, i, line) + else: + yield 0, line + + def _wrap_div(self, inner): + style = [] + if (self.noclasses and not self.nobackground and + self.style.background_color is not None): + style.append('background: %s' % (self.style.background_color,)) + if self.cssstyles: + style.append(self.cssstyles) + style = '; '.join(style) + + yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) + + (style and (' style="%s"' % style)) + '>') + yield from inner + yield 0, '</div>\n' + + def _wrap_pre(self, inner): + style = [] + if self.prestyles: + style.append(self.prestyles) + if self.noclasses: + style.append(self._pre_style) + style = '; '.join(style) + + if self.filename and self.linenos != 1: + yield 0, ('<span class="filename">' + self.filename + '</span>') + + # the empty span here is to keep leading empty lines from being + # ignored by HTML parsers + yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>') + yield from inner + yield 0, '</pre>' + + def _wrap_code(self, inner): + yield 0, '<code>' + yield from inner + yield 0, '</code>' + + @functools.lru_cache(maxsize=100) + def _translate_parts(self, value): + """HTML-escape a value and split it by newlines.""" + return value.translate(_escape_html_table).split('\n') + + def _format_lines(self, tokensource): + """ + Just format the tokens, without any wrapping tags. + Yield individual lines. + """ + nocls = self.noclasses + lsep = self.lineseparator + tagsfile = self.tagsfile + + lspan = '' + line = [] + for ttype, value in tokensource: + try: + cspan = self.span_element_openers[ttype] + except KeyError: + title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else '' + if nocls: + css_style = self._get_css_inline_styles(ttype) + if css_style: + css_style = self.class2style[css_style][0] + cspan = '<span style="%s"%s>' % (css_style, title) + else: + cspan = '' + else: + css_class = self._get_css_classes(ttype) + if css_class: + cspan = '<span class="%s"%s>' % (css_class, title) + else: + cspan = '' + self.span_element_openers[ttype] = cspan + + parts = self._translate_parts(value) + + if tagsfile and ttype in Token.Name: + filename, linenumber = self._lookup_ctag(value) + if linenumber: + base, filename = os.path.split(filename) + if base: + base += '/' + filename, extension = os.path.splitext(filename) + url = self.tagurlformat % {'path': base, 'fname': filename, + 'fext': extension} + parts[0] = "<a href=\"%s#%s-%d\">%s" % \ + (url, self.lineanchors, linenumber, parts[0]) + parts[-1] = parts[-1] + "</a>" + + # for all but the last line + for part in parts[:-1]: + if line: + # Also check for part being non-empty, so we avoid creating + # empty <span> tags + if lspan != cspan and part: + line.extend(((lspan and '</span>'), cspan, part, + (cspan and '</span>'), lsep)) + else: # both are the same, or the current part was empty + line.extend((part, (lspan and '</span>'), lsep)) + yield 1, ''.join(line) + line = [] + elif part: + yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) + else: + yield 1, lsep + # for the last line + if line and parts[-1]: + if lspan != cspan: + line.extend(((lspan and '</span>'), cspan, parts[-1])) + lspan = cspan + else: + line.append(parts[-1]) + elif parts[-1]: + line = [cspan, parts[-1]] + lspan = cspan + # else we neither have to open a new span nor set lspan + + if line: + line.extend(((lspan and '</span>'), lsep)) + yield 1, ''.join(line) + + def _lookup_ctag(self, token): + entry = ctags.TagEntry() + if self._ctags.find(entry, token.encode(), 0): + return entry['file'], entry['lineNumber'] + else: + return None, None + + def _highlight_lines(self, tokensource): + """ + Highlighted the lines specified in the `hl_lines` option by + post-processing the token stream coming from `_format_lines`. + """ + hls = self.hl_lines + + for i, (t, value) in enumerate(tokensource): + if t != 1: + yield t, value + if i + 1 in hls: # i + 1 because Python indexes start at 0 + if self.noclasses: + style = '' + if self.style.highlight_color is not None: + style = (' style="background-color: %s"' % + (self.style.highlight_color,)) + yield 1, '<span%s>%s</span>' % (style, value) + else: + yield 1, '<span class="hll">%s</span>' % value + else: + yield 1, value + + def wrap(self, source): + """ + Wrap the ``source``, which is a generator yielding + individual lines, in custom generators. See docstring + for `format`. Can be overridden. + """ + + output = source + if self.wrapcode: + output = self._wrap_code(output) + + output = self._wrap_pre(output) + + return output + + def format_unencoded(self, tokensource, outfile): + """ + The formatting process uses several nested generators; which of + them are used is determined by the user's options. + + Each generator should take at least one argument, ``inner``, + and wrap the pieces of text generated by this. + + Always yield 2-tuples: (code, text). If "code" is 1, the text + is part of the original tokensource being highlighted, if it's + 0, the text is some piece of wrapping. This makes it possible to + use several different wrappers that process the original source + linewise, e.g. line number generators. + """ + source = self._format_lines(tokensource) + + # As a special case, we wrap line numbers before line highlighting + # so the line numbers get wrapped in the highlighting tag. + if not self.nowrap and self.linenos == 2: + source = self._wrap_inlinelinenos(source) + + if self.hl_lines: + source = self._highlight_lines(source) + + if not self.nowrap: + if self.lineanchors: + source = self._wrap_lineanchors(source) + if self.linespans: + source = self._wrap_linespans(source) + source = self.wrap(source) + if self.linenos == 1: + source = self._wrap_tablelinenos(source) + source = self._wrap_div(source) + if self.full: + source = self._wrap_full(source, outfile) + + for t, piece in source: + outfile.write(piece) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py new file mode 100644 index 0000000..a338c15 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/img.py @@ -0,0 +1,645 @@ +""" + pygments.formatters.img + ~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for Pixmap output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import os +import sys + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ + get_choice_opt + +import subprocess + +# Import this carefully +try: + from PIL import Image, ImageDraw, ImageFont + pil_available = True +except ImportError: + pil_available = False + +try: + import _winreg +except ImportError: + try: + import winreg as _winreg + except ImportError: + _winreg = None + +__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter', + 'BmpImageFormatter'] + + +# For some unknown reason every font calls it something different +STYLES = { + 'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'], + 'ITALIC': ['Oblique', 'Italic'], + 'BOLD': ['Bold'], + 'BOLDITALIC': ['Bold Oblique', 'Bold Italic'], +} + +# A sane default for modern systems +DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono' +DEFAULT_FONT_NAME_WIN = 'Courier New' +DEFAULT_FONT_NAME_MAC = 'Menlo' + + +class PilNotAvailable(ImportError): + """When Python imaging library is not available""" + + +class FontNotFound(Exception): + """When there are no usable fonts specified""" + + +class FontManager: + """ + Manages a set of fonts: normal, italic, bold, etc... + """ + + def __init__(self, font_name, font_size=14): + self.font_name = font_name + self.font_size = font_size + self.fonts = {} + self.encoding = None + if sys.platform.startswith('win'): + if not font_name: + self.font_name = DEFAULT_FONT_NAME_WIN + self._create_win() + elif sys.platform.startswith('darwin'): + if not font_name: + self.font_name = DEFAULT_FONT_NAME_MAC + self._create_mac() + else: + if not font_name: + self.font_name = DEFAULT_FONT_NAME_NIX + self._create_nix() + + def _get_nix_font_path(self, name, style): + proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'], + stdout=subprocess.PIPE, stderr=None) + stdout, _ = proc.communicate() + if proc.returncode == 0: + lines = stdout.splitlines() + for line in lines: + if line.startswith(b'Fontconfig warning:'): + continue + path = line.decode().strip().strip(':') + if path: + return path + return None + + def _create_nix(self): + for name in STYLES['NORMAL']: + path = self._get_nix_font_path(self.font_name, name) + if path is not None: + self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) + break + else: + raise FontNotFound('No usable fonts named: "%s"' % + self.font_name) + for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): + for stylename in STYLES[style]: + path = self._get_nix_font_path(self.font_name, stylename) + if path is not None: + self.fonts[style] = ImageFont.truetype(path, self.font_size) + break + else: + if style == 'BOLDITALIC': + self.fonts[style] = self.fonts['BOLD'] + else: + self.fonts[style] = self.fonts['NORMAL'] + + def _get_mac_font_path(self, font_map, name, style): + return font_map.get((name + ' ' + style).strip().lower()) + + def _create_mac(self): + font_map = {} + for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'), + '/Library/Fonts/', '/System/Library/Fonts/'): + font_map.update( + (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f)) + for f in os.listdir(font_dir) + if f.lower().endswith(('ttf', 'ttc'))) + + for name in STYLES['NORMAL']: + path = self._get_mac_font_path(font_map, self.font_name, name) + if path is not None: + self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) + break + else: + raise FontNotFound('No usable fonts named: "%s"' % + self.font_name) + for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): + for stylename in STYLES[style]: + path = self._get_mac_font_path(font_map, self.font_name, stylename) + if path is not None: + self.fonts[style] = ImageFont.truetype(path, self.font_size) + break + else: + if style == 'BOLDITALIC': + self.fonts[style] = self.fonts['BOLD'] + else: + self.fonts[style] = self.fonts['NORMAL'] + + def _lookup_win(self, key, basename, styles, fail=False): + for suffix in ('', ' (TrueType)'): + for style in styles: + try: + valname = '%s%s%s' % (basename, style and ' '+style, suffix) + val, _ = _winreg.QueryValueEx(key, valname) + return val + except OSError: + continue + else: + if fail: + raise FontNotFound('Font %s (%s) not found in registry' % + (basename, styles[0])) + return None + + def _create_win(self): + lookuperror = None + keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'), + (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'), + (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'), + (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ] + for keyname in keynames: + try: + key = _winreg.OpenKey(*keyname) + try: + path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True) + self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size) + for style in ('ITALIC', 'BOLD', 'BOLDITALIC'): + path = self._lookup_win(key, self.font_name, STYLES[style]) + if path: + self.fonts[style] = ImageFont.truetype(path, self.font_size) + else: + if style == 'BOLDITALIC': + self.fonts[style] = self.fonts['BOLD'] + else: + self.fonts[style] = self.fonts['NORMAL'] + return + except FontNotFound as err: + lookuperror = err + finally: + _winreg.CloseKey(key) + except OSError: + pass + else: + # If we get here, we checked all registry keys and had no luck + # We can be in one of two situations now: + # * All key lookups failed. In this case lookuperror is None and we + # will raise a generic error + # * At least one lookup failed with a FontNotFound error. In this + # case, we will raise that as a more specific error + if lookuperror: + raise lookuperror + raise FontNotFound('Can\'t open Windows font registry key') + + def get_char_size(self): + """ + Get the character size. + """ + return self.get_text_size('M') + + def get_text_size(self, text): + """ + Get the text size (width, height). + """ + font = self.fonts['NORMAL'] + if hasattr(font, 'getbbox'): # Pillow >= 9.2.0 + return font.getbbox(text)[2:4] + else: + return font.getsize(text) + + def get_font(self, bold, oblique): + """ + Get the font based on bold and italic flags. + """ + if bold and oblique: + return self.fonts['BOLDITALIC'] + elif bold: + return self.fonts['BOLD'] + elif oblique: + return self.fonts['ITALIC'] + else: + return self.fonts['NORMAL'] + + +class ImageFormatter(Formatter): + """ + Create a PNG image from source code. This uses the Python Imaging Library to + generate a pixmap from the source code. + + .. versionadded:: 0.10 + + Additional options accepted: + + `image_format` + An image format to output to that is recognised by PIL, these include: + + * "PNG" (default) + * "JPEG" + * "BMP" + * "GIF" + + `line_pad` + The extra spacing (in pixels) between each line of text. + + Default: 2 + + `font_name` + The font name to be used as the base font from which others, such as + bold and italic fonts will be generated. This really should be a + monospace font to look sane. + + Default: "Courier New" on Windows, "Menlo" on Mac OS, and + "DejaVu Sans Mono" on \\*nix + + `font_size` + The font size in points to be used. + + Default: 14 + + `image_pad` + The padding, in pixels to be used at each edge of the resulting image. + + Default: 10 + + `line_numbers` + Whether line numbers should be shown: True/False + + Default: True + + `line_number_start` + The line number of the first line. + + Default: 1 + + `line_number_step` + The step used when printing line numbers. + + Default: 1 + + `line_number_bg` + The background colour (in "#123456" format) of the line number bar, or + None to use the style background color. + + Default: "#eed" + + `line_number_fg` + The text color of the line numbers (in "#123456"-like format). + + Default: "#886" + + `line_number_chars` + The number of columns of line numbers allowable in the line number + margin. + + Default: 2 + + `line_number_bold` + Whether line numbers will be bold: True/False + + Default: False + + `line_number_italic` + Whether line numbers will be italicized: True/False + + Default: False + + `line_number_separator` + Whether a line will be drawn between the line number area and the + source code area: True/False + + Default: True + + `line_number_pad` + The horizontal padding (in pixels) between the line number margin, and + the source code area. + + Default: 6 + + `hl_lines` + Specify a list of lines to be highlighted. + + .. versionadded:: 1.2 + + Default: empty list + + `hl_color` + Specify the color for highlighting lines. + + .. versionadded:: 1.2 + + Default: highlight color of the selected style + """ + + # Required by the pygments mapper + name = 'img' + aliases = ['img', 'IMG', 'png'] + filenames = ['*.png'] + + unicodeoutput = False + + default_image_format = 'png' + + def __init__(self, **options): + """ + See the class docstring for explanation of options. + """ + if not pil_available: + raise PilNotAvailable( + 'Python Imaging Library is required for this formatter') + Formatter.__init__(self, **options) + self.encoding = 'latin1' # let pygments.format() do the right thing + # Read the style + self.styles = dict(self.style) + if self.style.background_color is None: + self.background_color = '#fff' + else: + self.background_color = self.style.background_color + # Image options + self.image_format = get_choice_opt( + options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'], + self.default_image_format, normcase=True) + self.image_pad = get_int_opt(options, 'image_pad', 10) + self.line_pad = get_int_opt(options, 'line_pad', 2) + # The fonts + fontsize = get_int_opt(options, 'font_size', 14) + self.fonts = FontManager(options.get('font_name', ''), fontsize) + self.fontw, self.fonth = self.fonts.get_char_size() + # Line number options + self.line_number_fg = options.get('line_number_fg', '#886') + self.line_number_bg = options.get('line_number_bg', '#eed') + self.line_number_chars = get_int_opt(options, + 'line_number_chars', 2) + self.line_number_bold = get_bool_opt(options, + 'line_number_bold', False) + self.line_number_italic = get_bool_opt(options, + 'line_number_italic', False) + self.line_number_pad = get_int_opt(options, 'line_number_pad', 6) + self.line_numbers = get_bool_opt(options, 'line_numbers', True) + self.line_number_separator = get_bool_opt(options, + 'line_number_separator', True) + self.line_number_step = get_int_opt(options, 'line_number_step', 1) + self.line_number_start = get_int_opt(options, 'line_number_start', 1) + if self.line_numbers: + self.line_number_width = (self.fontw * self.line_number_chars + + self.line_number_pad * 2) + else: + self.line_number_width = 0 + self.hl_lines = [] + hl_lines_str = get_list_opt(options, 'hl_lines', []) + for line in hl_lines_str: + try: + self.hl_lines.append(int(line)) + except ValueError: + pass + self.hl_color = options.get('hl_color', + self.style.highlight_color) or '#f90' + self.drawables = [] + + def get_style_defs(self, arg=''): + raise NotImplementedError('The -S option is meaningless for the image ' + 'formatter. Use -O style=<stylename> instead.') + + def _get_line_height(self): + """ + Get the height of a line. + """ + return self.fonth + self.line_pad + + def _get_line_y(self, lineno): + """ + Get the Y coordinate of a line number. + """ + return lineno * self._get_line_height() + self.image_pad + + def _get_char_width(self): + """ + Get the width of a character. + """ + return self.fontw + + def _get_char_x(self, linelength): + """ + Get the X coordinate of a character position. + """ + return linelength + self.image_pad + self.line_number_width + + def _get_text_pos(self, linelength, lineno): + """ + Get the actual position for a character and line position. + """ + return self._get_char_x(linelength), self._get_line_y(lineno) + + def _get_linenumber_pos(self, lineno): + """ + Get the actual position for the start of a line number. + """ + return (self.image_pad, self._get_line_y(lineno)) + + def _get_text_color(self, style): + """ + Get the correct color for the token from the style. + """ + if style['color'] is not None: + fill = '#' + style['color'] + else: + fill = '#000' + return fill + + def _get_text_bg_color(self, style): + """ + Get the correct background color for the token from the style. + """ + if style['bgcolor'] is not None: + bg_color = '#' + style['bgcolor'] + else: + bg_color = None + return bg_color + + def _get_style_font(self, style): + """ + Get the correct font for the style. + """ + return self.fonts.get_font(style['bold'], style['italic']) + + def _get_image_size(self, maxlinelength, maxlineno): + """ + Get the required image size. + """ + return (self._get_char_x(maxlinelength) + self.image_pad, + self._get_line_y(maxlineno + 0) + self.image_pad) + + def _draw_linenumber(self, posno, lineno): + """ + Remember a line number drawable to paint later. + """ + self._draw_text( + self._get_linenumber_pos(posno), + str(lineno).rjust(self.line_number_chars), + font=self.fonts.get_font(self.line_number_bold, + self.line_number_italic), + text_fg=self.line_number_fg, + text_bg=None, + ) + + def _draw_text(self, pos, text, font, text_fg, text_bg): + """ + Remember a single drawable tuple to paint later. + """ + self.drawables.append((pos, text, font, text_fg, text_bg)) + + def _create_drawables(self, tokensource): + """ + Create drawables for the token content. + """ + lineno = charno = maxcharno = 0 + maxlinelength = linelength = 0 + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + style = self.styles[ttype] + # TODO: make sure tab expansion happens earlier in the chain. It + # really ought to be done on the input, as to do it right here is + # quite complex. + value = value.expandtabs(4) + lines = value.splitlines(True) + # print lines + for i, line in enumerate(lines): + temp = line.rstrip('\n') + if temp: + self._draw_text( + self._get_text_pos(linelength, lineno), + temp, + font = self._get_style_font(style), + text_fg = self._get_text_color(style), + text_bg = self._get_text_bg_color(style), + ) + temp_width, _ = self.fonts.get_text_size(temp) + linelength += temp_width + maxlinelength = max(maxlinelength, linelength) + charno += len(temp) + maxcharno = max(maxcharno, charno) + if line.endswith('\n'): + # add a line for each extra line in the value + linelength = 0 + charno = 0 + lineno += 1 + self.maxlinelength = maxlinelength + self.maxcharno = maxcharno + self.maxlineno = lineno + + def _draw_line_numbers(self): + """ + Create drawables for the line numbers. + """ + if not self.line_numbers: + return + for p in range(self.maxlineno): + n = p + self.line_number_start + if (n % self.line_number_step) == 0: + self._draw_linenumber(p, n) + + def _paint_line_number_bg(self, im): + """ + Paint the line number background on the image. + """ + if not self.line_numbers: + return + if self.line_number_fg is None: + return + draw = ImageDraw.Draw(im) + recth = im.size[-1] + rectw = self.image_pad + self.line_number_width - self.line_number_pad + draw.rectangle([(0, 0), (rectw, recth)], + fill=self.line_number_bg) + if self.line_number_separator: + draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg) + del draw + + def format(self, tokensource, outfile): + """ + Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` + tuples and write it into ``outfile``. + + This implementation calculates where it should draw each token on the + pixmap, then calculates the required pixmap size and draws the items. + """ + self._create_drawables(tokensource) + self._draw_line_numbers() + im = Image.new( + 'RGB', + self._get_image_size(self.maxlinelength, self.maxlineno), + self.background_color + ) + self._paint_line_number_bg(im) + draw = ImageDraw.Draw(im) + # Highlight + if self.hl_lines: + x = self.image_pad + self.line_number_width - self.line_number_pad + 1 + recth = self._get_line_height() + rectw = im.size[0] - x + for linenumber in self.hl_lines: + y = self._get_line_y(linenumber - 1) + draw.rectangle([(x, y), (x + rectw, y + recth)], + fill=self.hl_color) + for pos, value, font, text_fg, text_bg in self.drawables: + if text_bg: + text_size = draw.textsize(text=value, font=font) + draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg) + draw.text(pos, value, font=font, fill=text_fg) + im.save(outfile, self.image_format.upper()) + + +# Add one formatter per format, so that the "-f gif" option gives the correct result +# when used in pygmentize. + +class GifImageFormatter(ImageFormatter): + """ + Create a GIF image from source code. This uses the Python Imaging Library to + generate a pixmap from the source code. + + .. versionadded:: 1.0 + """ + + name = 'img_gif' + aliases = ['gif'] + filenames = ['*.gif'] + default_image_format = 'gif' + + +class JpgImageFormatter(ImageFormatter): + """ + Create a JPEG image from source code. This uses the Python Imaging Library to + generate a pixmap from the source code. + + .. versionadded:: 1.0 + """ + + name = 'img_jpg' + aliases = ['jpg', 'jpeg'] + filenames = ['*.jpg'] + default_image_format = 'jpeg' + + +class BmpImageFormatter(ImageFormatter): + """ + Create a bitmap image from source code. This uses the Python Imaging Library to + generate a pixmap from the source code. + + .. versionadded:: 1.0 + """ + + name = 'img_bmp' + aliases = ['bmp', 'bitmap'] + filenames = ['*.bmp'] + default_image_format = 'bmp' diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py new file mode 100644 index 0000000..2144d43 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/irc.py @@ -0,0 +1,154 @@ +""" + pygments.formatters.irc + ~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for IRC output + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic, Token, Whitespace +from pip._vendor.pygments.util import get_choice_opt + + +__all__ = ['IRCFormatter'] + + +#: Map token types to a tuple of color values for light and dark +#: backgrounds. +IRC_COLORS = { + Token: ('', ''), + + Whitespace: ('gray', 'brightblack'), + Comment: ('gray', 'brightblack'), + Comment.Preproc: ('cyan', 'brightcyan'), + Keyword: ('blue', 'brightblue'), + Keyword.Type: ('cyan', 'brightcyan'), + Operator.Word: ('magenta', 'brightcyan'), + Name.Builtin: ('cyan', 'brightcyan'), + Name.Function: ('green', 'brightgreen'), + Name.Namespace: ('_cyan_', '_brightcyan_'), + Name.Class: ('_green_', '_brightgreen_'), + Name.Exception: ('cyan', 'brightcyan'), + Name.Decorator: ('brightblack', 'gray'), + Name.Variable: ('red', 'brightred'), + Name.Constant: ('red', 'brightred'), + Name.Attribute: ('cyan', 'brightcyan'), + Name.Tag: ('brightblue', 'brightblue'), + String: ('yellow', 'yellow'), + Number: ('blue', 'brightblue'), + + Generic.Deleted: ('brightred', 'brightred'), + Generic.Inserted: ('green', 'brightgreen'), + Generic.Heading: ('**', '**'), + Generic.Subheading: ('*magenta*', '*brightmagenta*'), + Generic.Error: ('brightred', 'brightred'), + + Error: ('_brightred_', '_brightred_'), +} + + +IRC_COLOR_MAP = { + 'white': 0, + 'black': 1, + 'blue': 2, + 'brightgreen': 3, + 'brightred': 4, + 'yellow': 5, + 'magenta': 6, + 'orange': 7, + 'green': 7, #compat w/ ansi + 'brightyellow': 8, + 'lightgreen': 9, + 'brightcyan': 9, # compat w/ ansi + 'cyan': 10, + 'lightblue': 11, + 'red': 11, # compat w/ ansi + 'brightblue': 12, + 'brightmagenta': 13, + 'brightblack': 14, + 'gray': 15, +} + +def ircformat(color, text): + if len(color) < 1: + return text + add = sub = '' + if '_' in color: # italic + add += '\x1D' + sub = '\x1D' + sub + color = color.strip('_') + if '*' in color: # bold + add += '\x02' + sub = '\x02' + sub + color = color.strip('*') + # underline (\x1F) not supported + # backgrounds (\x03FF,BB) not supported + if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff + add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2) + sub = '\x03' + sub + return add + text + sub + return '<'+add+'>'+text+'</'+sub+'>' + + +class IRCFormatter(Formatter): + r""" + Format tokens with IRC color sequences + + The `get_style_defs()` method doesn't do anything special since there is + no support for common styles. + + Options accepted: + + `bg` + Set to ``"light"`` or ``"dark"`` depending on the terminal's background + (default: ``"light"``). + + `colorscheme` + A dictionary mapping token types to (lightbg, darkbg) color names or + ``None`` (default: ``None`` = use builtin colorscheme). + + `linenos` + Set to ``True`` to have line numbers in the output as well + (default: ``False`` = no line numbers). + """ + name = 'IRC' + aliases = ['irc', 'IRC'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self.darkbg = get_choice_opt(options, 'bg', + ['light', 'dark'], 'light') == 'dark' + self.colorscheme = options.get('colorscheme', None) or IRC_COLORS + self.linenos = options.get('linenos', False) + self._lineno = 0 + + def _write_lineno(self, outfile): + if self.linenos: + self._lineno += 1 + outfile.write("%04d: " % self._lineno) + + def format_unencoded(self, tokensource, outfile): + self._write_lineno(outfile) + + for ttype, value in tokensource: + color = self.colorscheme.get(ttype) + while color is None: + ttype = ttype[:-1] + color = self.colorscheme.get(ttype) + if color: + color = color[self.darkbg] + spl = value.split('\n') + for line in spl[:-1]: + if line: + outfile.write(ircformat(color, line)) + outfile.write('\n') + self._write_lineno(outfile) + if spl[-1]: + outfile.write(ircformat(color, spl[-1])) + else: + outfile.write(value) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py new file mode 100644 index 0000000..ca539b4 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/latex.py @@ -0,0 +1,521 @@ +""" + pygments.formatters.latex + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for LaTeX fancyvrb output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from io import StringIO + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.lexer import Lexer, do_insertions +from pip._vendor.pygments.token import Token, STANDARD_TYPES +from pip._vendor.pygments.util import get_bool_opt, get_int_opt + + +__all__ = ['LatexFormatter'] + + +def escape_tex(text, commandprefix): + return text.replace('\\', '\x00'). \ + replace('{', '\x01'). \ + replace('}', '\x02'). \ + replace('\x00', r'\%sZbs{}' % commandprefix). \ + replace('\x01', r'\%sZob{}' % commandprefix). \ + replace('\x02', r'\%sZcb{}' % commandprefix). \ + replace('^', r'\%sZca{}' % commandprefix). \ + replace('_', r'\%sZus{}' % commandprefix). \ + replace('&', r'\%sZam{}' % commandprefix). \ + replace('<', r'\%sZlt{}' % commandprefix). \ + replace('>', r'\%sZgt{}' % commandprefix). \ + replace('#', r'\%sZsh{}' % commandprefix). \ + replace('%', r'\%sZpc{}' % commandprefix). \ + replace('$', r'\%sZdl{}' % commandprefix). \ + replace('-', r'\%sZhy{}' % commandprefix). \ + replace("'", r'\%sZsq{}' % commandprefix). \ + replace('"', r'\%sZdq{}' % commandprefix). \ + replace('~', r'\%sZti{}' % commandprefix) + + +DOC_TEMPLATE = r''' +\documentclass{%(docclass)s} +\usepackage{fancyvrb} +\usepackage{color} +\usepackage[%(encoding)s]{inputenc} +%(preamble)s + +%(styledefs)s + +\begin{document} + +\section*{%(title)s} + +%(code)s +\end{document} +''' + +## Small explanation of the mess below :) +# +# The previous version of the LaTeX formatter just assigned a command to +# each token type defined in the current style. That obviously is +# problematic if the highlighted code is produced for a different style +# than the style commands themselves. +# +# This version works much like the HTML formatter which assigns multiple +# CSS classes to each <span> tag, from the most specific to the least +# specific token type, thus falling back to the parent token type if one +# is not defined. Here, the classes are there too and use the same short +# forms given in token.STANDARD_TYPES. +# +# Highlighted code now only uses one custom command, which by default is +# \PY and selectable by the commandprefix option (and in addition the +# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for +# backwards compatibility purposes). +# +# \PY has two arguments: the classes, separated by +, and the text to +# render in that style. The classes are resolved into the respective +# style commands by magic, which serves to ignore unknown classes. +# +# The magic macros are: +# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text +# to render in \PY@do. Their definition determines the style. +# * \PY@reset resets \PY@it etc. to do nothing. +# * \PY@toks parses the list of classes, using magic inspired by the +# keyval package (but modified to use plusses instead of commas +# because fancyvrb redefines commas inside its environments). +# * \PY@tok processes one class, calling the \PY@tok@classname command +# if it exists. +# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style +# for its class. +# * \PY resets the style, parses the classnames and then calls \PY@do. +# +# Tip: to read this code, print it out in substituted form using e.g. +# >>> print STYLE_TEMPLATE % {'cp': 'PY'} + +STYLE_TEMPLATE = r''' +\makeatletter +\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%% + \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%% + \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax} +\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname} +\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%% + \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi} +\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%% + \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}} +\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}} + +%(styles)s + +\def\%(cp)sZbs{\char`\\} +\def\%(cp)sZus{\char`\_} +\def\%(cp)sZob{\char`\{} +\def\%(cp)sZcb{\char`\}} +\def\%(cp)sZca{\char`\^} +\def\%(cp)sZam{\char`\&} +\def\%(cp)sZlt{\char`\<} +\def\%(cp)sZgt{\char`\>} +\def\%(cp)sZsh{\char`\#} +\def\%(cp)sZpc{\char`\%%} +\def\%(cp)sZdl{\char`\$} +\def\%(cp)sZhy{\char`\-} +\def\%(cp)sZsq{\char`\'} +\def\%(cp)sZdq{\char`\"} +\def\%(cp)sZti{\char`\~} +%% for compatibility with earlier versions +\def\%(cp)sZat{@} +\def\%(cp)sZlb{[} +\def\%(cp)sZrb{]} +\makeatother +''' + + +def _get_ttype_name(ttype): + fname = STANDARD_TYPES.get(ttype) + if fname: + return fname + aname = '' + while fname is None: + aname = ttype[-1] + aname + ttype = ttype.parent + fname = STANDARD_TYPES.get(ttype) + return fname + aname + + +class LatexFormatter(Formatter): + r""" + Format tokens as LaTeX code. This needs the `fancyvrb` and `color` + standard packages. + + Without the `full` option, code is formatted as one ``Verbatim`` + environment, like this: + + .. sourcecode:: latex + + \begin{Verbatim}[commandchars=\\\{\}] + \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}): + \PY{k}{pass} + \end{Verbatim} + + Wrapping can be disabled using the `nowrap` option. + + The special command used here (``\PY``) and all the other macros it needs + are output by the `get_style_defs` method. + + With the `full` option, a complete LaTeX document is output, including + the command definitions in the preamble. + + The `get_style_defs()` method of a `LatexFormatter` returns a string + containing ``\def`` commands defining the macros needed inside the + ``Verbatim`` environments. + + Additional options accepted: + + `nowrap` + If set to ``True``, don't wrap the tokens at all, not even inside a + ``\begin{Verbatim}`` environment. This disables most other options + (default: ``False``). + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `full` + Tells the formatter to output a "full" document, i.e. a complete + self-contained document (default: ``False``). + + `title` + If `full` is true, the title that should be used to caption the + document (default: ``''``). + + `docclass` + If the `full` option is enabled, this is the document class to use + (default: ``'article'``). + + `preamble` + If the `full` option is enabled, this can be further preamble commands, + e.g. ``\usepackage`` (default: ``''``). + + `linenos` + If set to ``True``, output line numbers (default: ``False``). + + `linenostart` + The line number for the first line (default: ``1``). + + `linenostep` + If set to a number n > 1, only every nth line number is printed. + + `verboptions` + Additional options given to the Verbatim environment (see the *fancyvrb* + docs for possible values) (default: ``''``). + + `commandprefix` + The LaTeX commands used to produce colored output are constructed + using this prefix and some letters (default: ``'PY'``). + + .. versionadded:: 0.7 + .. versionchanged:: 0.10 + The default is now ``'PY'`` instead of ``'C'``. + + `texcomments` + If set to ``True``, enables LaTeX comment lines. That is, LaTex markup + in comment tokens is not escaped so that LaTeX can render it (default: + ``False``). + + .. versionadded:: 1.2 + + `mathescape` + If set to ``True``, enables LaTeX math mode escape in comments. That + is, ``'$...$'`` inside a comment will trigger math mode (default: + ``False``). + + .. versionadded:: 1.2 + + `escapeinside` + If set to a string of length 2, enables escaping to LaTeX. Text + delimited by these 2 characters is read as LaTeX code and + typeset accordingly. It has no effect in string literals. It has + no effect in comments if `texcomments` or `mathescape` is + set. (default: ``''``). + + .. versionadded:: 2.0 + + `envname` + Allows you to pick an alternative environment name replacing Verbatim. + The alternate environment still has to support Verbatim's option syntax. + (default: ``'Verbatim'``). + + .. versionadded:: 2.0 + """ + name = 'LaTeX' + aliases = ['latex', 'tex'] + filenames = ['*.tex'] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self.nowrap = get_bool_opt(options, 'nowrap', False) + self.docclass = options.get('docclass', 'article') + self.preamble = options.get('preamble', '') + self.linenos = get_bool_opt(options, 'linenos', False) + self.linenostart = abs(get_int_opt(options, 'linenostart', 1)) + self.linenostep = abs(get_int_opt(options, 'linenostep', 1)) + self.verboptions = options.get('verboptions', '') + self.nobackground = get_bool_opt(options, 'nobackground', False) + self.commandprefix = options.get('commandprefix', 'PY') + self.texcomments = get_bool_opt(options, 'texcomments', False) + self.mathescape = get_bool_opt(options, 'mathescape', False) + self.escapeinside = options.get('escapeinside', '') + if len(self.escapeinside) == 2: + self.left = self.escapeinside[0] + self.right = self.escapeinside[1] + else: + self.escapeinside = '' + self.envname = options.get('envname', 'Verbatim') + + self._create_stylesheet() + + def _create_stylesheet(self): + t2n = self.ttype2name = {Token: ''} + c2d = self.cmd2def = {} + cp = self.commandprefix + + def rgbcolor(col): + if col: + return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0) + for i in (0, 2, 4)]) + else: + return '1,1,1' + + for ttype, ndef in self.style: + name = _get_ttype_name(ttype) + cmndef = '' + if ndef['bold']: + cmndef += r'\let\$$@bf=\textbf' + if ndef['italic']: + cmndef += r'\let\$$@it=\textit' + if ndef['underline']: + cmndef += r'\let\$$@ul=\underline' + if ndef['roman']: + cmndef += r'\let\$$@ff=\textrm' + if ndef['sans']: + cmndef += r'\let\$$@ff=\textsf' + if ndef['mono']: + cmndef += r'\let\$$@ff=\textsf' + if ndef['color']: + cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' % + rgbcolor(ndef['color'])) + if ndef['border']: + cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}' + r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' % + (rgbcolor(ndef['border']), + rgbcolor(ndef['bgcolor']))) + elif ndef['bgcolor']: + cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}' + r'\colorbox[rgb]{%s}{\strut ##1}}}' % + rgbcolor(ndef['bgcolor'])) + if cmndef == '': + continue + cmndef = cmndef.replace('$$', cp) + t2n[ttype] = name + c2d[name] = cmndef + + def get_style_defs(self, arg=''): + """ + Return the command sequences needed to define the commands + used to format text in the verbatim environment. ``arg`` is ignored. + """ + cp = self.commandprefix + styles = [] + for name, definition in self.cmd2def.items(): + styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition)) + return STYLE_TEMPLATE % {'cp': self.commandprefix, + 'styles': '\n'.join(styles)} + + def format_unencoded(self, tokensource, outfile): + # TODO: add support for background colors + t2n = self.ttype2name + cp = self.commandprefix + + if self.full: + realoutfile = outfile + outfile = StringIO() + + if not self.nowrap: + outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}') + if self.linenos: + start, step = self.linenostart, self.linenostep + outfile.write(',numbers=left' + + (start and ',firstnumber=%d' % start or '') + + (step and ',stepnumber=%d' % step or '')) + if self.mathescape or self.texcomments or self.escapeinside: + outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7' + '\\catcode`\\_=8\\relax}') + if self.verboptions: + outfile.write(',' + self.verboptions) + outfile.write(']\n') + + for ttype, value in tokensource: + if ttype in Token.Comment: + if self.texcomments: + # Try to guess comment starting lexeme and escape it ... + start = value[0:1] + for i in range(1, len(value)): + if start[0] != value[i]: + break + start += value[i] + + value = value[len(start):] + start = escape_tex(start, cp) + + # ... but do not escape inside comment. + value = start + value + elif self.mathescape: + # Only escape parts not inside a math environment. + parts = value.split('$') + in_math = False + for i, part in enumerate(parts): + if not in_math: + parts[i] = escape_tex(part, cp) + in_math = not in_math + value = '$'.join(parts) + elif self.escapeinside: + text = value + value = '' + while text: + a, sep1, text = text.partition(self.left) + if sep1: + b, sep2, text = text.partition(self.right) + if sep2: + value += escape_tex(a, cp) + b + else: + value += escape_tex(a + sep1 + b, cp) + else: + value += escape_tex(a, cp) + else: + value = escape_tex(value, cp) + elif ttype not in Token.Escape: + value = escape_tex(value, cp) + styles = [] + while ttype is not Token: + try: + styles.append(t2n[ttype]) + except KeyError: + # not in current style + styles.append(_get_ttype_name(ttype)) + ttype = ttype.parent + styleval = '+'.join(reversed(styles)) + if styleval: + spl = value.split('\n') + for line in spl[:-1]: + if line: + outfile.write("\\%s{%s}{%s}" % (cp, styleval, line)) + outfile.write('\n') + if spl[-1]: + outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1])) + else: + outfile.write(value) + + if not self.nowrap: + outfile.write('\\end{' + self.envname + '}\n') + + if self.full: + encoding = self.encoding or 'utf8' + # map known existings encodings from LaTeX distribution + encoding = { + 'utf_8': 'utf8', + 'latin_1': 'latin1', + 'iso_8859_1': 'latin1', + }.get(encoding.replace('-', '_'), encoding) + realoutfile.write(DOC_TEMPLATE % + dict(docclass = self.docclass, + preamble = self.preamble, + title = self.title, + encoding = encoding, + styledefs = self.get_style_defs(), + code = outfile.getvalue())) + + +class LatexEmbeddedLexer(Lexer): + """ + This lexer takes one lexer as argument, the lexer for the language + being formatted, and the left and right delimiters for escaped text. + + First everything is scanned using the language lexer to obtain + strings and comments. All other consecutive tokens are merged and + the resulting text is scanned for escaped segments, which are given + the Token.Escape type. Finally text that is not escaped is scanned + again with the language lexer. + """ + def __init__(self, left, right, lang, **options): + self.left = left + self.right = right + self.lang = lang + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + # find and remove all the escape tokens (replace with an empty string) + # this is very similar to DelegatingLexer.get_tokens_unprocessed. + buffered = '' + insertions = [] + insertion_buf = [] + for i, t, v in self._find_safe_escape_tokens(text): + if t is None: + if insertion_buf: + insertions.append((len(buffered), insertion_buf)) + insertion_buf = [] + buffered += v + else: + insertion_buf.append((i, t, v)) + if insertion_buf: + insertions.append((len(buffered), insertion_buf)) + return do_insertions(insertions, + self.lang.get_tokens_unprocessed(buffered)) + + def _find_safe_escape_tokens(self, text): + """ find escape tokens that are not in strings or comments """ + for i, t, v in self._filter_to( + self.lang.get_tokens_unprocessed(text), + lambda t: t in Token.Comment or t in Token.String + ): + if t is None: + for i2, t2, v2 in self._find_escape_tokens(v): + yield i + i2, t2, v2 + else: + yield i, None, v + + def _filter_to(self, it, pred): + """ Keep only the tokens that match `pred`, merge the others together """ + buf = '' + idx = 0 + for i, t, v in it: + if pred(t): + if buf: + yield idx, None, buf + buf = '' + yield i, t, v + else: + if not buf: + idx = i + buf += v + if buf: + yield idx, None, buf + + def _find_escape_tokens(self, text): + """ Find escape tokens within text, give token=None otherwise """ + index = 0 + while text: + a, sep1, text = text.partition(self.left) + if a: + yield index, None, a + index += len(a) + if sep1: + b, sep2, text = text.partition(self.right) + if sep2: + yield index + len(sep1), Token.Escape, b + index += len(sep1) + len(b) + len(sep2) + else: + yield index, Token.Error, sep1 + index += len(sep1) + text = b diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/other.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/other.py new file mode 100644 index 0000000..990ead4 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/other.py @@ -0,0 +1,161 @@ +""" + pygments.formatters.other + ~~~~~~~~~~~~~~~~~~~~~~~~~ + + Other formatters: NullFormatter, RawTokenFormatter. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.util import get_choice_opt +from pip._vendor.pygments.token import Token +from pip._vendor.pygments.console import colorize + +__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter'] + + +class NullFormatter(Formatter): + """ + Output the text unchanged without any formatting. + """ + name = 'Text only' + aliases = ['text', 'null'] + filenames = ['*.txt'] + + def format(self, tokensource, outfile): + enc = self.encoding + for ttype, value in tokensource: + if enc: + outfile.write(value.encode(enc)) + else: + outfile.write(value) + + +class RawTokenFormatter(Formatter): + r""" + Format tokens as a raw representation for storing token streams. + + The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later + be converted to a token stream with the `RawTokenLexer`, described in the + :doc:`lexer list <lexers>`. + + Only two options are accepted: + + `compress` + If set to ``'gz'`` or ``'bz2'``, compress the output with the given + compression algorithm after encoding (default: ``''``). + `error_color` + If set to a color name, highlight error tokens using that color. If + set but with no value, defaults to ``'red'``. + + .. versionadded:: 0.11 + + """ + name = 'Raw tokens' + aliases = ['raw', 'tokens'] + filenames = ['*.raw'] + + unicodeoutput = False + + def __init__(self, **options): + Formatter.__init__(self, **options) + # We ignore self.encoding if it is set, since it gets set for lexer + # and formatter if given with -Oencoding on the command line. + # The RawTokenFormatter outputs only ASCII. Override here. + self.encoding = 'ascii' # let pygments.format() do the right thing + self.compress = get_choice_opt(options, 'compress', + ['', 'none', 'gz', 'bz2'], '') + self.error_color = options.get('error_color', None) + if self.error_color is True: + self.error_color = 'red' + if self.error_color is not None: + try: + colorize(self.error_color, '') + except KeyError: + raise ValueError("Invalid color %r specified" % + self.error_color) + + def format(self, tokensource, outfile): + try: + outfile.write(b'') + except TypeError: + raise TypeError('The raw tokens formatter needs a binary ' + 'output file') + if self.compress == 'gz': + import gzip + outfile = gzip.GzipFile('', 'wb', 9, outfile) + + write = outfile.write + flush = outfile.close + elif self.compress == 'bz2': + import bz2 + compressor = bz2.BZ2Compressor(9) + + def write(text): + outfile.write(compressor.compress(text)) + + def flush(): + outfile.write(compressor.flush()) + outfile.flush() + else: + write = outfile.write + flush = outfile.flush + + if self.error_color: + for ttype, value in tokensource: + line = b"%r\t%r\n" % (ttype, value) + if ttype is Token.Error: + write(colorize(self.error_color, line)) + else: + write(line) + else: + for ttype, value in tokensource: + write(b"%r\t%r\n" % (ttype, value)) + flush() + + +TESTCASE_BEFORE = '''\ + def testNeedsName(lexer): + fragment = %r + tokens = [ +''' +TESTCASE_AFTER = '''\ + ] + assert list(lexer.get_tokens(fragment)) == tokens +''' + + +class TestcaseFormatter(Formatter): + """ + Format tokens as appropriate for a new testcase. + + .. versionadded:: 2.0 + """ + name = 'Testcase' + aliases = ['testcase'] + + def __init__(self, **options): + Formatter.__init__(self, **options) + if self.encoding is not None and self.encoding != 'utf-8': + raise ValueError("Only None and utf-8 are allowed encodings.") + + def format(self, tokensource, outfile): + indentation = ' ' * 12 + rawbuf = [] + outbuf = [] + for ttype, value in tokensource: + rawbuf.append(value) + outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value)) + + before = TESTCASE_BEFORE % (''.join(rawbuf),) + during = ''.join(outbuf) + after = TESTCASE_AFTER + if self.encoding is None: + outfile.write(before + during + after) + else: + outfile.write(before.encode('utf-8')) + outfile.write(during.encode('utf-8')) + outfile.write(after.encode('utf-8')) + outfile.flush() diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py new file mode 100644 index 0000000..6bb325d --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/pangomarkup.py @@ -0,0 +1,83 @@ +""" + pygments.formatters.pangomarkup + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for Pango markup output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.formatter import Formatter + + +__all__ = ['PangoMarkupFormatter'] + + +_escape_table = { + ord('&'): '&', + ord('<'): '<', +} + + +def escape_special_chars(text, table=_escape_table): + """Escape & and < for Pango Markup.""" + return text.translate(table) + + +class PangoMarkupFormatter(Formatter): + """ + Format tokens as Pango Markup code. It can then be rendered to an SVG. + + .. versionadded:: 2.9 + """ + + name = 'Pango Markup' + aliases = ['pango', 'pangomarkup'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + + self.styles = {} + + for token, style in self.style: + start = '' + end = '' + if style['color']: + start += '<span fgcolor="#%s">' % style['color'] + end = '</span>' + end + if style['bold']: + start += '<b>' + end = '</b>' + end + if style['italic']: + start += '<i>' + end = '</i>' + end + if style['underline']: + start += '<u>' + end = '</u>' + end + self.styles[token] = (start, end) + + def format_unencoded(self, tokensource, outfile): + lastval = '' + lasttype = None + + outfile.write('<tt>') + + for ttype, value in tokensource: + while ttype not in self.styles: + ttype = ttype.parent + if ttype == lasttype: + lastval += escape_special_chars(value) + else: + if lastval: + stylebegin, styleend = self.styles[lasttype] + outfile.write(stylebegin + lastval + styleend) + lastval = escape_special_chars(value) + lasttype = ttype + + if lastval: + stylebegin, styleend = self.styles[lasttype] + outfile.write(stylebegin + lastval + styleend) + + outfile.write('</tt>') diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py new file mode 100644 index 0000000..125189c --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/rtf.py @@ -0,0 +1,146 @@ +""" + pygments.formatters.rtf + ~~~~~~~~~~~~~~~~~~~~~~~ + + A formatter that generates RTF files. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.util import get_int_opt, surrogatepair + + +__all__ = ['RtfFormatter'] + + +class RtfFormatter(Formatter): + """ + Format tokens as RTF markup. This formatter automatically outputs full RTF + documents with color information and other useful stuff. Perfect for Copy and + Paste into Microsoft(R) Word(R) documents. + + Please note that ``encoding`` and ``outencoding`` options are ignored. + The RTF format is ASCII natively, but handles unicode characters correctly + thanks to escape sequences. + + .. versionadded:: 0.6 + + Additional options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `fontface` + The used font family, for example ``Bitstream Vera Sans``. Defaults to + some generic font which is supposed to have fixed width. + + `fontsize` + Size of the font used. Size is specified in half points. The + default is 24 half-points, giving a size 12 font. + + .. versionadded:: 2.0 + """ + name = 'RTF' + aliases = ['rtf'] + filenames = ['*.rtf'] + + def __init__(self, **options): + r""" + Additional options accepted: + + ``fontface`` + Name of the font used. Could for example be ``'Courier New'`` + to further specify the default which is ``'\fmodern'``. The RTF + specification claims that ``\fmodern`` are "Fixed-pitch serif + and sans serif fonts". Hope every RTF implementation thinks + the same about modern... + + """ + Formatter.__init__(self, **options) + self.fontface = options.get('fontface') or '' + self.fontsize = get_int_opt(options, 'fontsize', 0) + + def _escape(self, text): + return text.replace('\\', '\\\\') \ + .replace('{', '\\{') \ + .replace('}', '\\}') + + def _escape_text(self, text): + # empty strings, should give a small performance improvement + if not text: + return '' + + # escape text + text = self._escape(text) + + buf = [] + for c in text: + cn = ord(c) + if cn < (2**7): + # ASCII character + buf.append(str(c)) + elif (2**7) <= cn < (2**16): + # single unicode escape sequence + buf.append('{\\u%d}' % cn) + elif (2**16) <= cn: + # RTF limits unicode to 16 bits. + # Force surrogate pairs + buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn)) + + return ''.join(buf).replace('\n', '\\par\n') + + def format_unencoded(self, tokensource, outfile): + # rtf 1.8 header + outfile.write('{\\rtf1\\ansi\\uc0\\deff0' + '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}' + '{\\colortbl;' % (self.fontface and + ' ' + self._escape(self.fontface) or + '')) + + # convert colors and save them in a mapping to access them later. + color_mapping = {} + offset = 1 + for _, style in self.style: + for color in style['color'], style['bgcolor'], style['border']: + if color and color not in color_mapping: + color_mapping[color] = offset + outfile.write('\\red%d\\green%d\\blue%d;' % ( + int(color[0:2], 16), + int(color[2:4], 16), + int(color[4:6], 16) + )) + offset += 1 + outfile.write('}\\f0 ') + if self.fontsize: + outfile.write('\\fs%d' % self.fontsize) + + # highlight stream + for ttype, value in tokensource: + while not self.style.styles_token(ttype) and ttype.parent: + ttype = ttype.parent + style = self.style.style_for_token(ttype) + buf = [] + if style['bgcolor']: + buf.append('\\cb%d' % color_mapping[style['bgcolor']]) + if style['color']: + buf.append('\\cf%d' % color_mapping[style['color']]) + if style['bold']: + buf.append('\\b') + if style['italic']: + buf.append('\\i') + if style['underline']: + buf.append('\\ul') + if style['border']: + buf.append('\\chbrdr\\chcfpat%d' % + color_mapping[style['border']]) + start = ''.join(buf) + if start: + outfile.write('{%s ' % start) + outfile.write(self._escape_text(value)) + if start: + outfile.write('}') + + outfile.write('}') diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py new file mode 100644 index 0000000..a8727ed --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/svg.py @@ -0,0 +1,188 @@ +""" + pygments.formatters.svg + ~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for SVG output. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.token import Comment +from pip._vendor.pygments.util import get_bool_opt, get_int_opt + +__all__ = ['SvgFormatter'] + + +def escape_html(text): + """Escape &, <, > as well as single and double quotes for HTML.""" + return text.replace('&', '&'). \ + replace('<', '<'). \ + replace('>', '>'). \ + replace('"', '"'). \ + replace("'", ''') + + +class2style = {} + +class SvgFormatter(Formatter): + """ + Format tokens as an SVG graphics file. This formatter is still experimental. + Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` + coordinates containing ``<tspan>`` elements with the individual token styles. + + By default, this formatter outputs a full SVG document including doctype + declaration and the ``<svg>`` root element. + + .. versionadded:: 0.9 + + Additional options accepted: + + `nowrap` + Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and + don't add a XML declaration and a doctype. If true, the `fontfamily` + and `fontsize` options are ignored. Defaults to ``False``. + + `fontfamily` + The value to give the wrapping ``<g>`` element's ``font-family`` + attribute, defaults to ``"monospace"``. + + `fontsize` + The value to give the wrapping ``<g>`` element's ``font-size`` + attribute, defaults to ``"14px"``. + + `linenos` + If ``True``, add line numbers (default: ``False``). + + `linenostart` + The line number for the first line (default: ``1``). + + `linenostep` + If set to a number n > 1, only every nth line number is printed. + + `linenowidth` + Maximum width devoted to line numbers (default: ``3*ystep``, sufficient + for up to 4-digit line numbers. Increase width for longer code blocks). + + `xoffset` + Starting offset in X direction, defaults to ``0``. + + `yoffset` + Starting offset in Y direction, defaults to the font size if it is given + in pixels, or ``20`` else. (This is necessary since text coordinates + refer to the text baseline, not the top edge.) + + `ystep` + Offset to add to the Y coordinate for each subsequent line. This should + roughly be the text size plus 5. It defaults to that value if the text + size is given in pixels, or ``25`` else. + + `spacehack` + Convert spaces in the source to `` ``, which are non-breaking + spaces. SVG provides the ``xml:space`` attribute to control how + whitespace inside tags is handled, in theory, the ``preserve`` value + could be used to keep all whitespace as-is. However, many current SVG + viewers don't obey that rule, so this option is provided as a workaround + and defaults to ``True``. + """ + name = 'SVG' + aliases = ['svg'] + filenames = ['*.svg'] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self.nowrap = get_bool_opt(options, 'nowrap', False) + self.fontfamily = options.get('fontfamily', 'monospace') + self.fontsize = options.get('fontsize', '14px') + self.xoffset = get_int_opt(options, 'xoffset', 0) + fs = self.fontsize.strip() + if fs.endswith('px'): fs = fs[:-2].strip() + try: + int_fs = int(fs) + except: + int_fs = 20 + self.yoffset = get_int_opt(options, 'yoffset', int_fs) + self.ystep = get_int_opt(options, 'ystep', int_fs + 5) + self.spacehack = get_bool_opt(options, 'spacehack', True) + self.linenos = get_bool_opt(options,'linenos',False) + self.linenostart = get_int_opt(options,'linenostart',1) + self.linenostep = get_int_opt(options,'linenostep',1) + self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep) + self._stylecache = {} + + def format_unencoded(self, tokensource, outfile): + """ + Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` + tuples and write it into ``outfile``. + + For our implementation we put all lines in their own 'line group'. + """ + x = self.xoffset + y = self.yoffset + if not self.nowrap: + if self.encoding: + outfile.write('<?xml version="1.0" encoding="%s"?>\n' % + self.encoding) + else: + outfile.write('<?xml version="1.0"?>\n') + outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' + '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' + 'svg10.dtd">\n') + outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') + outfile.write('<g font-family="%s" font-size="%s">\n' % + (self.fontfamily, self.fontsize)) + + counter = self.linenostart + counter_step = self.linenostep + counter_style = self._get_style(Comment) + line_x = x + + if self.linenos: + if counter % counter_step == 0: + outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' % + (x+self.linenowidth,y,counter_style,counter)) + line_x += self.linenowidth + self.ystep + counter += 1 + + outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y)) + for ttype, value in tokensource: + style = self._get_style(ttype) + tspan = style and '<tspan' + style + '>' or '' + tspanend = tspan and '</tspan>' or '' + value = escape_html(value) + if self.spacehack: + value = value.expandtabs().replace(' ', ' ') + parts = value.split('\n') + for part in parts[:-1]: + outfile.write(tspan + part + tspanend) + y += self.ystep + outfile.write('</text>\n') + if self.linenos and counter % counter_step == 0: + outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' % + (x+self.linenowidth,y,counter_style,counter)) + + counter += 1 + outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y)) + outfile.write(tspan + parts[-1] + tspanend) + outfile.write('</text>') + + if not self.nowrap: + outfile.write('</g></svg>\n') + + def _get_style(self, tokentype): + if tokentype in self._stylecache: + return self._stylecache[tokentype] + otokentype = tokentype + while not self.style.styles_token(tokentype): + tokentype = tokentype.parent + value = self.style.style_for_token(tokentype) + result = '' + if value['color']: + result = ' fill="#' + value['color'] + '"' + if value['bold']: + result += ' font-weight="bold"' + if value['italic']: + result += ' font-style="italic"' + self._stylecache[otokentype] = result + return result diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py new file mode 100644 index 0000000..abb8770 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal.py @@ -0,0 +1,127 @@ +""" + pygments.formatters.terminal + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for terminal output with ANSI sequences. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \ + Number, Operator, Generic, Token, Whitespace +from pip._vendor.pygments.console import ansiformat +from pip._vendor.pygments.util import get_choice_opt + + +__all__ = ['TerminalFormatter'] + + +#: Map token types to a tuple of color values for light and dark +#: backgrounds. +TERMINAL_COLORS = { + Token: ('', ''), + + Whitespace: ('gray', 'brightblack'), + Comment: ('gray', 'brightblack'), + Comment.Preproc: ('cyan', 'brightcyan'), + Keyword: ('blue', 'brightblue'), + Keyword.Type: ('cyan', 'brightcyan'), + Operator.Word: ('magenta', 'brightmagenta'), + Name.Builtin: ('cyan', 'brightcyan'), + Name.Function: ('green', 'brightgreen'), + Name.Namespace: ('_cyan_', '_brightcyan_'), + Name.Class: ('_green_', '_brightgreen_'), + Name.Exception: ('cyan', 'brightcyan'), + Name.Decorator: ('brightblack', 'gray'), + Name.Variable: ('red', 'brightred'), + Name.Constant: ('red', 'brightred'), + Name.Attribute: ('cyan', 'brightcyan'), + Name.Tag: ('brightblue', 'brightblue'), + String: ('yellow', 'yellow'), + Number: ('blue', 'brightblue'), + + Generic.Deleted: ('brightred', 'brightred'), + Generic.Inserted: ('green', 'brightgreen'), + Generic.Heading: ('**', '**'), + Generic.Subheading: ('*magenta*', '*brightmagenta*'), + Generic.Prompt: ('**', '**'), + Generic.Error: ('brightred', 'brightred'), + + Error: ('_brightred_', '_brightred_'), +} + + +class TerminalFormatter(Formatter): + r""" + Format tokens with ANSI color sequences, for output in a text console. + Color sequences are terminated at newlines, so that paging the output + works correctly. + + The `get_style_defs()` method doesn't do anything special since there is + no support for common styles. + + Options accepted: + + `bg` + Set to ``"light"`` or ``"dark"`` depending on the terminal's background + (default: ``"light"``). + + `colorscheme` + A dictionary mapping token types to (lightbg, darkbg) color names or + ``None`` (default: ``None`` = use builtin colorscheme). + + `linenos` + Set to ``True`` to have line numbers on the terminal output as well + (default: ``False`` = no line numbers). + """ + name = 'Terminal' + aliases = ['terminal', 'console'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + self.darkbg = get_choice_opt(options, 'bg', + ['light', 'dark'], 'light') == 'dark' + self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS + self.linenos = options.get('linenos', False) + self._lineno = 0 + + def format(self, tokensource, outfile): + return Formatter.format(self, tokensource, outfile) + + def _write_lineno(self, outfile): + self._lineno += 1 + outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno)) + + def _get_color(self, ttype): + # self.colorscheme is a dict containing usually generic types, so we + # have to walk the tree of dots. The base Token type must be a key, + # even if it's empty string, as in the default above. + colors = self.colorscheme.get(ttype) + while colors is None: + ttype = ttype.parent + colors = self.colorscheme.get(ttype) + return colors[self.darkbg] + + def format_unencoded(self, tokensource, outfile): + if self.linenos: + self._write_lineno(outfile) + + for ttype, value in tokensource: + color = self._get_color(ttype) + + for line in value.splitlines(True): + if color: + outfile.write(ansiformat(color, line.rstrip('\n'))) + else: + outfile.write(line.rstrip('\n')) + if line.endswith('\n'): + if self.linenos: + self._write_lineno(outfile) + else: + outfile.write('\n') + + if self.linenos: + outfile.write("\n") diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal256.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal256.py new file mode 100644 index 0000000..0cfe5d1 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/formatters/terminal256.py @@ -0,0 +1,338 @@ +""" + pygments.formatters.terminal256 + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + Formatter for 256-color terminal output with ANSI sequences. + + RGB-to-XTERM color conversion routines adapted from xterm256-conv + tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2) + by Wolfgang Frisch. + + Formatter version 1. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +# TODO: +# - Options to map style's bold/underline/italic/border attributes +# to some ANSI attrbutes (something like 'italic=underline') +# - An option to output "style RGB to xterm RGB/index" conversion table +# - An option to indicate that we are running in "reverse background" +# xterm. This means that default colors are white-on-black, not +# black-on-while, so colors like "white background" need to be converted +# to "white background, black foreground", etc... + +from pip._vendor.pygments.formatter import Formatter +from pip._vendor.pygments.console import codes +from pip._vendor.pygments.style import ansicolors + + +__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter'] + + +class EscapeSequence: + def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False): + self.fg = fg + self.bg = bg + self.bold = bold + self.underline = underline + self.italic = italic + + def escape(self, attrs): + if len(attrs): + return "\x1b[" + ";".join(attrs) + "m" + return "" + + def color_string(self): + attrs = [] + if self.fg is not None: + if self.fg in ansicolors: + esc = codes[self.fg.replace('ansi','')] + if ';01m' in esc: + self.bold = True + # extract fg color code. + attrs.append(esc[2:4]) + else: + attrs.extend(("38", "5", "%i" % self.fg)) + if self.bg is not None: + if self.bg in ansicolors: + esc = codes[self.bg.replace('ansi','')] + # extract fg color code, add 10 for bg. + attrs.append(str(int(esc[2:4])+10)) + else: + attrs.extend(("48", "5", "%i" % self.bg)) + if self.bold: + attrs.append("01") + if self.underline: + attrs.append("04") + if self.italic: + attrs.append("03") + return self.escape(attrs) + + def true_color_string(self): + attrs = [] + if self.fg: + attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2]))) + if self.bg: + attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2]))) + if self.bold: + attrs.append("01") + if self.underline: + attrs.append("04") + if self.italic: + attrs.append("03") + return self.escape(attrs) + + def reset_string(self): + attrs = [] + if self.fg is not None: + attrs.append("39") + if self.bg is not None: + attrs.append("49") + if self.bold or self.underline or self.italic: + attrs.append("00") + return self.escape(attrs) + + +class Terminal256Formatter(Formatter): + """ + Format tokens with ANSI color sequences, for output in a 256-color + terminal or console. Like in `TerminalFormatter` color sequences + are terminated at newlines, so that paging the output works correctly. + + The formatter takes colors from a style defined by the `style` option + and converts them to nearest ANSI 256-color escape sequences. Bold and + underline attributes from the style are preserved (and displayed). + + .. versionadded:: 0.9 + + .. versionchanged:: 2.2 + If the used style defines foreground colors in the form ``#ansi*``, then + `Terminal256Formatter` will map these to non extended foreground color. + See :ref:`AnsiTerminalStyle` for more information. + + .. versionchanged:: 2.4 + The ANSI color names have been updated with names that are easier to + understand and align with colornames of other projects and terminals. + See :ref:`this table <new-ansi-color-names>` for more information. + + + Options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + + `linenos` + Set to ``True`` to have line numbers on the terminal output as well + (default: ``False`` = no line numbers). + """ + name = 'Terminal256' + aliases = ['terminal256', 'console256', '256'] + filenames = [] + + def __init__(self, **options): + Formatter.__init__(self, **options) + + self.xterm_colors = [] + self.best_match = {} + self.style_string = {} + + self.usebold = 'nobold' not in options + self.useunderline = 'nounderline' not in options + self.useitalic = 'noitalic' not in options + + self._build_color_table() # build an RGB-to-256 color conversion table + self._setup_styles() # convert selected style's colors to term. colors + + self.linenos = options.get('linenos', False) + self._lineno = 0 + + def _build_color_table(self): + # colors 0..15: 16 basic colors + + self.xterm_colors.append((0x00, 0x00, 0x00)) # 0 + self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1 + self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2 + self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3 + self.xterm_colors.append((0x00, 0x00, 0xee)) # 4 + self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5 + self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6 + self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7 + self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8 + self.xterm_colors.append((0xff, 0x00, 0x00)) # 9 + self.xterm_colors.append((0x00, 0xff, 0x00)) # 10 + self.xterm_colors.append((0xff, 0xff, 0x00)) # 11 + self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12 + self.xterm_colors.append((0xff, 0x00, 0xff)) # 13 + self.xterm_colors.append((0x00, 0xff, 0xff)) # 14 + self.xterm_colors.append((0xff, 0xff, 0xff)) # 15 + + # colors 16..232: the 6x6x6 color cube + + valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff) + + for i in range(217): + r = valuerange[(i // 36) % 6] + g = valuerange[(i // 6) % 6] + b = valuerange[i % 6] + self.xterm_colors.append((r, g, b)) + + # colors 233..253: grayscale + + for i in range(1, 22): + v = 8 + i * 10 + self.xterm_colors.append((v, v, v)) + + def _closest_color(self, r, g, b): + distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff) + match = 0 + + for i in range(0, 254): + values = self.xterm_colors[i] + + rd = r - values[0] + gd = g - values[1] + bd = b - values[2] + d = rd*rd + gd*gd + bd*bd + + if d < distance: + match = i + distance = d + return match + + def _color_index(self, color): + index = self.best_match.get(color, None) + if color in ansicolors: + # strip the `ansi/#ansi` part and look up code + index = color + self.best_match[color] = index + if index is None: + try: + rgb = int(str(color), 16) + except ValueError: + rgb = 0 + + r = (rgb >> 16) & 0xff + g = (rgb >> 8) & 0xff + b = rgb & 0xff + index = self._closest_color(r, g, b) + self.best_match[color] = index + return index + + def _setup_styles(self): + for ttype, ndef in self.style: + escape = EscapeSequence() + # get foreground from ansicolor if set + if ndef['ansicolor']: + escape.fg = self._color_index(ndef['ansicolor']) + elif ndef['color']: + escape.fg = self._color_index(ndef['color']) + if ndef['bgansicolor']: + escape.bg = self._color_index(ndef['bgansicolor']) + elif ndef['bgcolor']: + escape.bg = self._color_index(ndef['bgcolor']) + if self.usebold and ndef['bold']: + escape.bold = True + if self.useunderline and ndef['underline']: + escape.underline = True + if self.useitalic and ndef['italic']: + escape.italic = True + self.style_string[str(ttype)] = (escape.color_string(), + escape.reset_string()) + + def _write_lineno(self, outfile): + self._lineno += 1 + outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno)) + + def format(self, tokensource, outfile): + return Formatter.format(self, tokensource, outfile) + + def format_unencoded(self, tokensource, outfile): + if self.linenos: + self._write_lineno(outfile) + + for ttype, value in tokensource: + not_found = True + while ttype and not_found: + try: + # outfile.write( "<" + str(ttype) + ">" ) + on, off = self.style_string[str(ttype)] + + # Like TerminalFormatter, add "reset colors" escape sequence + # on newline. + spl = value.split('\n') + for line in spl[:-1]: + if line: + outfile.write(on + line + off) + if self.linenos: + self._write_lineno(outfile) + else: + outfile.write('\n') + + if spl[-1]: + outfile.write(on + spl[-1] + off) + + not_found = False + # outfile.write( '#' + str(ttype) + '#' ) + + except KeyError: + # ottype = ttype + ttype = ttype.parent + # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' ) + + if not_found: + outfile.write(value) + + if self.linenos: + outfile.write("\n") + + + +class TerminalTrueColorFormatter(Terminal256Formatter): + r""" + Format tokens with ANSI color sequences, for output in a true-color + terminal or console. Like in `TerminalFormatter` color sequences + are terminated at newlines, so that paging the output works correctly. + + .. versionadded:: 2.1 + + Options accepted: + + `style` + The style to use, can be a string or a Style subclass (default: + ``'default'``). + """ + name = 'TerminalTrueColor' + aliases = ['terminal16m', 'console16m', '16m'] + filenames = [] + + def _build_color_table(self): + pass + + def _color_tuple(self, color): + try: + rgb = int(str(color), 16) + except ValueError: + return None + r = (rgb >> 16) & 0xff + g = (rgb >> 8) & 0xff + b = rgb & 0xff + return (r, g, b) + + def _setup_styles(self): + for ttype, ndef in self.style: + escape = EscapeSequence() + if ndef['color']: + escape.fg = self._color_tuple(ndef['color']) + if ndef['bgcolor']: + escape.bg = self._color_tuple(ndef['bgcolor']) + if self.usebold and ndef['bold']: + escape.bold = True + if self.useunderline and ndef['underline']: + escape.underline = True + if self.useitalic and ndef['italic']: + escape.italic = True + self.style_string[str(ttype)] = (escape.true_color_string(), + escape.reset_string()) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py new file mode 100644 index 0000000..eb2c1b4 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexer.py @@ -0,0 +1,943 @@ +""" + pygments.lexer + ~~~~~~~~~~~~~~ + + Base lexer classes. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import time + +from pip._vendor.pygments.filter import apply_filters, Filter +from pip._vendor.pygments.filters import get_filter_by_name +from pip._vendor.pygments.token import Error, Text, Other, Whitespace, _TokenType +from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ + make_analysator, Future, guess_decode +from pip._vendor.pygments.regexopt import regex_opt + +__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', + 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', + 'default', 'words', 'line_re'] + +line_re = re.compile('.*?\n') + +_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), + (b'\xff\xfe\0\0', 'utf-32'), + (b'\0\0\xfe\xff', 'utf-32be'), + (b'\xff\xfe', 'utf-16'), + (b'\xfe\xff', 'utf-16be')] + +_default_analyse = staticmethod(lambda x: 0.0) + + +class LexerMeta(type): + """ + This metaclass automagically converts ``analyse_text`` methods into + static methods which always return float values. + """ + + def __new__(mcs, name, bases, d): + if 'analyse_text' in d: + d['analyse_text'] = make_analysator(d['analyse_text']) + return type.__new__(mcs, name, bases, d) + + +class Lexer(metaclass=LexerMeta): + """ + Lexer for a specific language. + + See also :doc:`lexerdevelopment`, a high-level guide to writing + lexers. + + Lexer classes have attributes used for choosing the most appropriate + lexer based on various criteria. + + .. autoattribute:: name + :no-value: + .. autoattribute:: aliases + :no-value: + .. autoattribute:: filenames + :no-value: + .. autoattribute:: alias_filenames + .. autoattribute:: mimetypes + :no-value: + .. autoattribute:: priority + + Lexers included in Pygments should have an additional attribute: + + .. autoattribute:: url + :no-value: + + You can pass options to the constructor. The basic options recognized + by all lexers and processed by the base `Lexer` class are: + + ``stripnl`` + Strip leading and trailing newlines from the input (default: True). + ``stripall`` + Strip all leading and trailing whitespace from the input + (default: False). + ``ensurenl`` + Make sure that the input ends with a newline (default: True). This + is required for some lexers that consume input linewise. + + .. versionadded:: 1.3 + + ``tabsize`` + If given and greater than 0, expand tabs in the input (default: 0). + ``encoding`` + If given, must be an encoding name. This encoding will be used to + convert the input string to Unicode, if it is not already a Unicode + string (default: ``'guess'``, which uses a simple UTF-8 / Locale / + Latin1 detection. Can also be ``'chardet'`` to use the chardet + library, if it is installed. + ``inencoding`` + Overrides the ``encoding`` if given. + """ + + #: Full name of the lexer, in human-readable form + name = None + + #: A list of short, unique identifiers that can be used to look + #: up the lexer from a list, e.g., using `get_lexer_by_name()`. + aliases = [] + + #: A list of `fnmatch` patterns that match filenames which contain + #: content for this lexer. The patterns in this list should be unique among + #: all lexers. + filenames = [] + + #: A list of `fnmatch` patterns that match filenames which may or may not + #: contain content for this lexer. This list is used by the + #: :func:`.guess_lexer_for_filename()` function, to determine which lexers + #: are then included in guessing the correct one. That means that + #: e.g. every lexer for HTML and a template language should include + #: ``\*.html`` in this list. + alias_filenames = [] + + #: A list of MIME types for content that can be lexed with this lexer. + mimetypes = [] + + #: Priority, should multiple lexers match and no content is provided + priority = 0 + + #: URL of the language specification/definition. Used in the Pygments + #: documentation. + url = None + + def __init__(self, **options): + """ + This constructor takes arbitrary options as keyword arguments. + Every subclass must first process its own options and then call + the `Lexer` constructor, since it processes the basic + options like `stripnl`. + + An example looks like this: + + .. sourcecode:: python + + def __init__(self, **options): + self.compress = options.get('compress', '') + Lexer.__init__(self, **options) + + As these options must all be specifiable as strings (due to the + command line usage), there are various utility functions + available to help with that, see `Utilities`_. + """ + self.options = options + self.stripnl = get_bool_opt(options, 'stripnl', True) + self.stripall = get_bool_opt(options, 'stripall', False) + self.ensurenl = get_bool_opt(options, 'ensurenl', True) + self.tabsize = get_int_opt(options, 'tabsize', 0) + self.encoding = options.get('encoding', 'guess') + self.encoding = options.get('inencoding') or self.encoding + self.filters = [] + for filter_ in get_list_opt(options, 'filters', ()): + self.add_filter(filter_) + + def __repr__(self): + if self.options: + return '<pygments.lexers.%s with %r>' % (self.__class__.__name__, + self.options) + else: + return '<pygments.lexers.%s>' % self.__class__.__name__ + + def add_filter(self, filter_, **options): + """ + Add a new stream filter to this lexer. + """ + if not isinstance(filter_, Filter): + filter_ = get_filter_by_name(filter_, **options) + self.filters.append(filter_) + + def analyse_text(text): + """ + A static method which is called for lexer guessing. + + It should analyse the text and return a float in the range + from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer + will not be selected as the most probable one, if it returns + ``1.0``, it will be selected immediately. This is used by + `guess_lexer`. + + The `LexerMeta` metaclass automatically wraps this function so + that it works like a static method (no ``self`` or ``cls`` + parameter) and the return value is automatically converted to + `float`. If the return value is an object that is boolean `False` + it's the same as if the return values was ``0.0``. + """ + + def get_tokens(self, text, unfiltered=False): + """ + This method is the basic interface of a lexer. It is called by + the `highlight()` function. It must process the text and return an + iterable of ``(tokentype, value)`` pairs from `text`. + + Normally, you don't need to override this method. The default + implementation processes the options recognized by all lexers + (`stripnl`, `stripall` and so on), and then yields all tokens + from `get_tokens_unprocessed()`, with the ``index`` dropped. + + If `unfiltered` is set to `True`, the filtering mechanism is + bypassed even if filters are defined. + """ + if not isinstance(text, str): + if self.encoding == 'guess': + text, _ = guess_decode(text) + elif self.encoding == 'chardet': + try: + from pip._vendor import chardet + except ImportError as e: + raise ImportError('To enable chardet encoding guessing, ' + 'please install the chardet library ' + 'from http://chardet.feedparser.org/') from e + # check for BOM first + decoded = None + for bom, encoding in _encoding_map: + if text.startswith(bom): + decoded = text[len(bom):].decode(encoding, 'replace') + break + # no BOM found, so use chardet + if decoded is None: + enc = chardet.detect(text[:1024]) # Guess using first 1KB + decoded = text.decode(enc.get('encoding') or 'utf-8', + 'replace') + text = decoded + else: + text = text.decode(self.encoding) + if text.startswith('\ufeff'): + text = text[len('\ufeff'):] + else: + if text.startswith('\ufeff'): + text = text[len('\ufeff'):] + + # text now *is* a unicode string + text = text.replace('\r\n', '\n') + text = text.replace('\r', '\n') + if self.stripall: + text = text.strip() + elif self.stripnl: + text = text.strip('\n') + if self.tabsize > 0: + text = text.expandtabs(self.tabsize) + if self.ensurenl and not text.endswith('\n'): + text += '\n' + + def streamer(): + for _, t, v in self.get_tokens_unprocessed(text): + yield t, v + stream = streamer() + if not unfiltered: + stream = apply_filters(stream, self.filters, self) + return stream + + def get_tokens_unprocessed(self, text): + """ + This method should process the text and return an iterable of + ``(index, tokentype, value)`` tuples where ``index`` is the starting + position of the token within the input text. + + It must be overridden by subclasses. It is recommended to + implement it as a generator to maximize effectiveness. + """ + raise NotImplementedError + + +class DelegatingLexer(Lexer): + """ + This lexer takes two lexer as arguments. A root lexer and + a language lexer. First everything is scanned using the language + lexer, afterwards all ``Other`` tokens are lexed using the root + lexer. + + The lexers from the ``template`` lexer package use this base lexer. + """ + + def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): + self.root_lexer = _root_lexer(**options) + self.language_lexer = _language_lexer(**options) + self.needle = _needle + Lexer.__init__(self, **options) + + def get_tokens_unprocessed(self, text): + buffered = '' + insertions = [] + lng_buffer = [] + for i, t, v in self.language_lexer.get_tokens_unprocessed(text): + if t is self.needle: + if lng_buffer: + insertions.append((len(buffered), lng_buffer)) + lng_buffer = [] + buffered += v + else: + lng_buffer.append((i, t, v)) + if lng_buffer: + insertions.append((len(buffered), lng_buffer)) + return do_insertions(insertions, + self.root_lexer.get_tokens_unprocessed(buffered)) + + +# ------------------------------------------------------------------------------ +# RegexLexer and ExtendedRegexLexer +# + + +class include(str): # pylint: disable=invalid-name + """ + Indicates that a state should include rules from another state. + """ + pass + + +class _inherit: + """ + Indicates the a state should inherit from its superclass. + """ + def __repr__(self): + return 'inherit' + +inherit = _inherit() # pylint: disable=invalid-name + + +class combined(tuple): # pylint: disable=invalid-name + """ + Indicates a state combined from multiple states. + """ + + def __new__(cls, *args): + return tuple.__new__(cls, args) + + def __init__(self, *args): + # tuple.__init__ doesn't do anything + pass + + +class _PseudoMatch: + """ + A pseudo match object constructed from a string. + """ + + def __init__(self, start, text): + self._text = text + self._start = start + + def start(self, arg=None): + return self._start + + def end(self, arg=None): + return self._start + len(self._text) + + def group(self, arg=None): + if arg: + raise IndexError('No such group') + return self._text + + def groups(self): + return (self._text,) + + def groupdict(self): + return {} + + +def bygroups(*args): + """ + Callback that yields multiple actions for each group in the match. + """ + def callback(lexer, match, ctx=None): + for i, action in enumerate(args): + if action is None: + continue + elif type(action) is _TokenType: + data = match.group(i + 1) + if data: + yield match.start(i + 1), action, data + else: + data = match.group(i + 1) + if data is not None: + if ctx: + ctx.pos = match.start(i + 1) + for item in action(lexer, + _PseudoMatch(match.start(i + 1), data), ctx): + if item: + yield item + if ctx: + ctx.pos = match.end() + return callback + + +class _This: + """ + Special singleton used for indicating the caller class. + Used by ``using``. + """ + +this = _This() + + +def using(_other, **kwargs): + """ + Callback that processes the match with a different lexer. + + The keyword arguments are forwarded to the lexer, except `state` which + is handled separately. + + `state` specifies the state that the new lexer will start in, and can + be an enumerable such as ('root', 'inline', 'string') or a simple + string which is assumed to be on top of the root state. + + Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. + """ + gt_kwargs = {} + if 'state' in kwargs: + s = kwargs.pop('state') + if isinstance(s, (list, tuple)): + gt_kwargs['stack'] = s + else: + gt_kwargs['stack'] = ('root', s) + + if _other is this: + def callback(lexer, match, ctx=None): + # if keyword arguments are given the callback + # function has to create a new lexer instance + if kwargs: + # XXX: cache that somehow + kwargs.update(lexer.options) + lx = lexer.__class__(**kwargs) + else: + lx = lexer + s = match.start() + for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): + yield i + s, t, v + if ctx: + ctx.pos = match.end() + else: + def callback(lexer, match, ctx=None): + # XXX: cache that somehow + kwargs.update(lexer.options) + lx = _other(**kwargs) + + s = match.start() + for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): + yield i + s, t, v + if ctx: + ctx.pos = match.end() + return callback + + +class default: + """ + Indicates a state or state action (e.g. #pop) to apply. + For example default('#pop') is equivalent to ('', Token, '#pop') + Note that state tuples may be used as well. + + .. versionadded:: 2.0 + """ + def __init__(self, state): + self.state = state + + +class words(Future): + """ + Indicates a list of literal words that is transformed into an optimized + regex that matches any of the words. + + .. versionadded:: 2.0 + """ + def __init__(self, words, prefix='', suffix=''): + self.words = words + self.prefix = prefix + self.suffix = suffix + + def get(self): + return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) + + +class RegexLexerMeta(LexerMeta): + """ + Metaclass for RegexLexer, creates the self._tokens attribute from + self.tokens on the first instantiation. + """ + + def _process_regex(cls, regex, rflags, state): + """Preprocess the regular expression component of a token definition.""" + if isinstance(regex, Future): + regex = regex.get() + return re.compile(regex, rflags).match + + def _process_token(cls, token): + """Preprocess the token component of a token definition.""" + assert type(token) is _TokenType or callable(token), \ + 'token type must be simple type or callable, not %r' % (token,) + return token + + def _process_new_state(cls, new_state, unprocessed, processed): + """Preprocess the state transition action of a token definition.""" + if isinstance(new_state, str): + # an existing state + if new_state == '#pop': + return -1 + elif new_state in unprocessed: + return (new_state,) + elif new_state == '#push': + return new_state + elif new_state[:5] == '#pop:': + return -int(new_state[5:]) + else: + assert False, 'unknown new state %r' % new_state + elif isinstance(new_state, combined): + # combine a new state from existing ones + tmp_state = '_tmp_%d' % cls._tmpname + cls._tmpname += 1 + itokens = [] + for istate in new_state: + assert istate != new_state, 'circular state ref %r' % istate + itokens.extend(cls._process_state(unprocessed, + processed, istate)) + processed[tmp_state] = itokens + return (tmp_state,) + elif isinstance(new_state, tuple): + # push more than one state + for istate in new_state: + assert (istate in unprocessed or + istate in ('#pop', '#push')), \ + 'unknown new state ' + istate + return new_state + else: + assert False, 'unknown new state def %r' % new_state + + def _process_state(cls, unprocessed, processed, state): + """Preprocess a single state definition.""" + assert type(state) is str, "wrong state name %r" % state + assert state[0] != '#', "invalid state name %r" % state + if state in processed: + return processed[state] + tokens = processed[state] = [] + rflags = cls.flags + for tdef in unprocessed[state]: + if isinstance(tdef, include): + # it's a state reference + assert tdef != state, "circular state reference %r" % state + tokens.extend(cls._process_state(unprocessed, processed, + str(tdef))) + continue + if isinstance(tdef, _inherit): + # should be processed already, but may not in the case of: + # 1. the state has no counterpart in any parent + # 2. the state includes more than one 'inherit' + continue + if isinstance(tdef, default): + new_state = cls._process_new_state(tdef.state, unprocessed, processed) + tokens.append((re.compile('').match, None, new_state)) + continue + + assert type(tdef) is tuple, "wrong rule def %r" % tdef + + try: + rex = cls._process_regex(tdef[0], rflags, state) + except Exception as err: + raise ValueError("uncompilable regex %r in state %r of %r: %s" % + (tdef[0], state, cls, err)) from err + + token = cls._process_token(tdef[1]) + + if len(tdef) == 2: + new_state = None + else: + new_state = cls._process_new_state(tdef[2], + unprocessed, processed) + + tokens.append((rex, token, new_state)) + return tokens + + def process_tokendef(cls, name, tokendefs=None): + """Preprocess a dictionary of token definitions.""" + processed = cls._all_tokens[name] = {} + tokendefs = tokendefs or cls.tokens[name] + for state in list(tokendefs): + cls._process_state(tokendefs, processed, state) + return processed + + def get_tokendefs(cls): + """ + Merge tokens from superclasses in MRO order, returning a single tokendef + dictionary. + + Any state that is not defined by a subclass will be inherited + automatically. States that *are* defined by subclasses will, by + default, override that state in the superclass. If a subclass wishes to + inherit definitions from a superclass, it can use the special value + "inherit", which will cause the superclass' state definition to be + included at that point in the state. + """ + tokens = {} + inheritable = {} + for c in cls.__mro__: + toks = c.__dict__.get('tokens', {}) + + for state, items in toks.items(): + curitems = tokens.get(state) + if curitems is None: + # N.b. because this is assigned by reference, sufficiently + # deep hierarchies are processed incrementally (e.g. for + # A(B), B(C), C(RegexLexer), B will be premodified so X(B) + # will not see any inherits in B). + tokens[state] = items + try: + inherit_ndx = items.index(inherit) + except ValueError: + continue + inheritable[state] = inherit_ndx + continue + + inherit_ndx = inheritable.pop(state, None) + if inherit_ndx is None: + continue + + # Replace the "inherit" value with the items + curitems[inherit_ndx:inherit_ndx+1] = items + try: + # N.b. this is the index in items (that is, the superclass + # copy), so offset required when storing below. + new_inh_ndx = items.index(inherit) + except ValueError: + pass + else: + inheritable[state] = inherit_ndx + new_inh_ndx + + return tokens + + def __call__(cls, *args, **kwds): + """Instantiate cls after preprocessing its token definitions.""" + if '_tokens' not in cls.__dict__: + cls._all_tokens = {} + cls._tmpname = 0 + if hasattr(cls, 'token_variants') and cls.token_variants: + # don't process yet + pass + else: + cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) + + return type.__call__(cls, *args, **kwds) + + +class RegexLexer(Lexer, metaclass=RegexLexerMeta): + """ + Base for simple stateful regular expression-based lexers. + Simplifies the lexing process so that you need only + provide a list of states and regular expressions. + """ + + #: Flags for compiling the regular expressions. + #: Defaults to MULTILINE. + flags = re.MULTILINE + + #: At all time there is a stack of states. Initially, the stack contains + #: a single state 'root'. The top of the stack is called "the current state". + #: + #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` + #: + #: ``new_state`` can be omitted to signify no state transition. + #: If ``new_state`` is a string, it is pushed on the stack. This ensure + #: the new current state is ``new_state``. + #: If ``new_state`` is a tuple of strings, all of those strings are pushed + #: on the stack and the current state will be the last element of the list. + #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` + #: to signify a new, anonymous state combined from the rules of two + #: or more existing ones. + #: Furthermore, it can be '#pop' to signify going back one step in + #: the state stack, or '#push' to push the current state on the stack + #: again. Note that if you push while in a combined state, the combined + #: state itself is pushed, and not only the state in which the rule is + #: defined. + #: + #: The tuple can also be replaced with ``include('state')``, in which + #: case the rules from the state named by the string are included in the + #: current one. + tokens = {} + + def get_tokens_unprocessed(self, text, stack=('root',)): + """ + Split ``text`` into (tokentype, text) pairs. + + ``stack`` is the initial stack (default: ``['root']``) + """ + pos = 0 + tokendefs = self._tokens + statestack = list(stack) + statetokens = tokendefs[statestack[-1]] + while 1: + for rexmatch, action, new_state in statetokens: + m = rexmatch(text, pos) + if m: + if action is not None: + if type(action) is _TokenType: + yield pos, action, m.group() + else: + yield from action(self, m) + pos = m.end() + if new_state is not None: + # state transition + if isinstance(new_state, tuple): + for state in new_state: + if state == '#pop': + if len(statestack) > 1: + statestack.pop() + elif state == '#push': + statestack.append(statestack[-1]) + else: + statestack.append(state) + elif isinstance(new_state, int): + # pop, but keep at least one state on the stack + # (random code leading to unexpected pops should + # not allow exceptions) + if abs(new_state) >= len(statestack): + del statestack[1:] + else: + del statestack[new_state:] + elif new_state == '#push': + statestack.append(statestack[-1]) + else: + assert False, "wrong state def: %r" % new_state + statetokens = tokendefs[statestack[-1]] + break + else: + # We are here only if all state tokens have been considered + # and there was not a match on any of them. + try: + if text[pos] == '\n': + # at EOL, reset state to "root" + statestack = ['root'] + statetokens = tokendefs['root'] + yield pos, Whitespace, '\n' + pos += 1 + continue + yield pos, Error, text[pos] + pos += 1 + except IndexError: + break + + +class LexerContext: + """ + A helper object that holds lexer position data. + """ + + def __init__(self, text, pos, stack=None, end=None): + self.text = text + self.pos = pos + self.end = end or len(text) # end=0 not supported ;-) + self.stack = stack or ['root'] + + def __repr__(self): + return 'LexerContext(%r, %r, %r)' % ( + self.text, self.pos, self.stack) + + +class ExtendedRegexLexer(RegexLexer): + """ + A RegexLexer that uses a context object to store its state. + """ + + def get_tokens_unprocessed(self, text=None, context=None): + """ + Split ``text`` into (tokentype, text) pairs. + If ``context`` is given, use this lexer context instead. + """ + tokendefs = self._tokens + if not context: + ctx = LexerContext(text, 0) + statetokens = tokendefs['root'] + else: + ctx = context + statetokens = tokendefs[ctx.stack[-1]] + text = ctx.text + while 1: + for rexmatch, action, new_state in statetokens: + m = rexmatch(text, ctx.pos, ctx.end) + if m: + if action is not None: + if type(action) is _TokenType: + yield ctx.pos, action, m.group() + ctx.pos = m.end() + else: + yield from action(self, m, ctx) + if not new_state: + # altered the state stack? + statetokens = tokendefs[ctx.stack[-1]] + # CAUTION: callback must set ctx.pos! + if new_state is not None: + # state transition + if isinstance(new_state, tuple): + for state in new_state: + if state == '#pop': + if len(ctx.stack) > 1: + ctx.stack.pop() + elif state == '#push': + ctx.stack.append(ctx.stack[-1]) + else: + ctx.stack.append(state) + elif isinstance(new_state, int): + # see RegexLexer for why this check is made + if abs(new_state) >= len(ctx.stack): + del ctx.stack[1:] + else: + del ctx.stack[new_state:] + elif new_state == '#push': + ctx.stack.append(ctx.stack[-1]) + else: + assert False, "wrong state def: %r" % new_state + statetokens = tokendefs[ctx.stack[-1]] + break + else: + try: + if ctx.pos >= ctx.end: + break + if text[ctx.pos] == '\n': + # at EOL, reset state to "root" + ctx.stack = ['root'] + statetokens = tokendefs['root'] + yield ctx.pos, Text, '\n' + ctx.pos += 1 + continue + yield ctx.pos, Error, text[ctx.pos] + ctx.pos += 1 + except IndexError: + break + + +def do_insertions(insertions, tokens): + """ + Helper for lexers which must combine the results of several + sublexers. + + ``insertions`` is a list of ``(index, itokens)`` pairs. + Each ``itokens`` iterable should be inserted at position + ``index`` into the token stream given by the ``tokens`` + argument. + + The result is a combined token stream. + + TODO: clean up the code here. + """ + insertions = iter(insertions) + try: + index, itokens = next(insertions) + except StopIteration: + # no insertions + yield from tokens + return + + realpos = None + insleft = True + + # iterate over the token stream where we want to insert + # the tokens from the insertion list. + for i, t, v in tokens: + # first iteration. store the position of first item + if realpos is None: + realpos = i + oldi = 0 + while insleft and i + len(v) >= index: + tmpval = v[oldi:index - i] + if tmpval: + yield realpos, t, tmpval + realpos += len(tmpval) + for it_index, it_token, it_value in itokens: + yield realpos, it_token, it_value + realpos += len(it_value) + oldi = index - i + try: + index, itokens = next(insertions) + except StopIteration: + insleft = False + break # not strictly necessary + if oldi < len(v): + yield realpos, t, v[oldi:] + realpos += len(v) - oldi + + # leftover tokens + while insleft: + # no normal tokens, set realpos to zero + realpos = realpos or 0 + for p, t, v in itokens: + yield realpos, t, v + realpos += len(v) + try: + index, itokens = next(insertions) + except StopIteration: + insleft = False + break # not strictly necessary + + +class ProfilingRegexLexerMeta(RegexLexerMeta): + """Metaclass for ProfilingRegexLexer, collects regex timing info.""" + + def _process_regex(cls, regex, rflags, state): + if isinstance(regex, words): + rex = regex_opt(regex.words, prefix=regex.prefix, + suffix=regex.suffix) + else: + rex = regex + compiled = re.compile(rex, rflags) + + def match_func(text, pos, endpos=sys.maxsize): + info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) + t0 = time.time() + res = compiled.match(text, pos, endpos) + t1 = time.time() + info[0] += 1 + info[1] += t1 - t0 + return res + return match_func + + +class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): + """Drop-in replacement for RegexLexer that does profiling of its regexes.""" + + _prof_data = [] + _prof_sort_index = 4 # defaults to time per call + + def get_tokens_unprocessed(self, text, stack=('root',)): + # this needs to be a stack, since using(this) will produce nested calls + self.__class__._prof_data.append({}) + yield from RegexLexer.get_tokens_unprocessed(self, text, stack) + rawdata = self.__class__._prof_data.pop() + data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], + n, 1000 * t, 1000 * t / n) + for ((s, r), (n, t)) in rawdata.items()), + key=lambda x: x[self._prof_sort_index], + reverse=True) + sum_total = sum(x[3] for x in data) + + print() + print('Profiling result for %s lexing %d chars in %.3f ms' % + (self.__class__.__name__, len(text), sum_total)) + print('=' * 110) + print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) + print('-' * 110) + for d in data: + print('%-20s %-65s %5d %8.4f %8.4f' % d) + print('=' * 110) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py new file mode 100644 index 0000000..d97c3e3 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__init__.py @@ -0,0 +1,362 @@ +""" + pygments.lexers + ~~~~~~~~~~~~~~~ + + Pygments lexers. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import sys +import types +import fnmatch +from os.path import basename + +from pip._vendor.pygments.lexers._mapping import LEXERS +from pip._vendor.pygments.modeline import get_filetype_from_buffer +from pip._vendor.pygments.plugin import find_plugin_lexers +from pip._vendor.pygments.util import ClassNotFound, guess_decode + +COMPAT = { + 'Python3Lexer': 'PythonLexer', + 'Python3TracebackLexer': 'PythonTracebackLexer', +} + +__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class', + 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT) + +_lexer_cache = {} +_pattern_cache = {} + + +def _fn_matches(fn, glob): + """Return whether the supplied file name fn matches pattern filename.""" + if glob not in _pattern_cache: + pattern = _pattern_cache[glob] = re.compile(fnmatch.translate(glob)) + return pattern.match(fn) + return _pattern_cache[glob].match(fn) + + +def _load_lexers(module_name): + """Load a lexer (and all others in the module too).""" + mod = __import__(module_name, None, None, ['__all__']) + for lexer_name in mod.__all__: + cls = getattr(mod, lexer_name) + _lexer_cache[cls.name] = cls + + +def get_all_lexers(plugins=True): + """Return a generator of tuples in the form ``(name, aliases, + filenames, mimetypes)`` of all know lexers. + + If *plugins* is true (the default), plugin lexers supplied by entrypoints + are also returned. Otherwise, only builtin ones are considered. + """ + for item in LEXERS.values(): + yield item[1:] + if plugins: + for lexer in find_plugin_lexers(): + yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes + + +def find_lexer_class(name): + """ + Return the `Lexer` subclass that with the *name* attribute as given by + the *name* argument. + """ + if name in _lexer_cache: + return _lexer_cache[name] + # lookup builtin lexers + for module_name, lname, aliases, _, _ in LEXERS.values(): + if name == lname: + _load_lexers(module_name) + return _lexer_cache[name] + # continue with lexers from setuptools entrypoints + for cls in find_plugin_lexers(): + if cls.name == name: + return cls + + +def find_lexer_class_by_name(_alias): + """ + Return the `Lexer` subclass that has `alias` in its aliases list, without + instantiating it. + + Like `get_lexer_by_name`, but does not instantiate the class. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + + .. versionadded:: 2.2 + """ + if not _alias: + raise ClassNotFound('no lexer for alias %r found' % _alias) + # lookup builtin lexers + for module_name, name, aliases, _, _ in LEXERS.values(): + if _alias.lower() in aliases: + if name not in _lexer_cache: + _load_lexers(module_name) + return _lexer_cache[name] + # continue with lexers from setuptools entrypoints + for cls in find_plugin_lexers(): + if _alias.lower() in cls.aliases: + return cls + raise ClassNotFound('no lexer for alias %r found' % _alias) + + +def get_lexer_by_name(_alias, **options): + """ + Return an instance of a `Lexer` subclass that has `alias` in its + aliases list. The lexer is given the `options` at its + instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if no lexer with that alias is + found. + """ + if not _alias: + raise ClassNotFound('no lexer for alias %r found' % _alias) + + # lookup builtin lexers + for module_name, name, aliases, _, _ in LEXERS.values(): + if _alias.lower() in aliases: + if name not in _lexer_cache: + _load_lexers(module_name) + return _lexer_cache[name](**options) + # continue with lexers from setuptools entrypoints + for cls in find_plugin_lexers(): + if _alias.lower() in cls.aliases: + return cls(**options) + raise ClassNotFound('no lexer for alias %r found' % _alias) + + +def load_lexer_from_file(filename, lexername="CustomLexer", **options): + """Load a lexer from a file. + + This method expects a file located relative to the current working + directory, which contains a Lexer class. By default, it expects the + Lexer to be name CustomLexer; you can specify your own class name + as the second argument to this function. + + Users should be very careful with the input, because this method + is equivalent to running eval on the input file. + + Raises ClassNotFound if there are any problems importing the Lexer. + + .. versionadded:: 2.2 + """ + try: + # This empty dict will contain the namespace for the exec'd file + custom_namespace = {} + with open(filename, 'rb') as f: + exec(f.read(), custom_namespace) + # Retrieve the class `lexername` from that namespace + if lexername not in custom_namespace: + raise ClassNotFound('no valid %s class found in %s' % + (lexername, filename)) + lexer_class = custom_namespace[lexername] + # And finally instantiate it with the options + return lexer_class(**options) + except OSError as err: + raise ClassNotFound('cannot read %s: %s' % (filename, err)) + except ClassNotFound: + raise + except Exception as err: + raise ClassNotFound('error when loading custom lexer: %s' % err) + + +def find_lexer_class_for_filename(_fn, code=None): + """Get a lexer for a filename. + + If multiple lexers match the filename pattern, use ``analyse_text()`` to + figure out which one is more appropriate. + + Returns None if not found. + """ + matches = [] + fn = basename(_fn) + for modname, name, _, filenames, _ in LEXERS.values(): + for filename in filenames: + if _fn_matches(fn, filename): + if name not in _lexer_cache: + _load_lexers(modname) + matches.append((_lexer_cache[name], filename)) + for cls in find_plugin_lexers(): + for filename in cls.filenames: + if _fn_matches(fn, filename): + matches.append((cls, filename)) + + if isinstance(code, bytes): + # decode it, since all analyse_text functions expect unicode + code = guess_decode(code) + + def get_rating(info): + cls, filename = info + # explicit patterns get a bonus + bonus = '*' not in filename and 0.5 or 0 + # The class _always_ defines analyse_text because it's included in + # the Lexer class. The default implementation returns None which + # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py + # to find lexers which need it overridden. + if code: + return cls.analyse_text(code) + bonus, cls.__name__ + return cls.priority + bonus, cls.__name__ + + if matches: + matches.sort(key=get_rating) + # print "Possible lexers, after sort:", matches + return matches[-1][0] + + +def get_lexer_for_filename(_fn, code=None, **options): + """Get a lexer for a filename. + + Return a `Lexer` subclass instance that has a filename pattern + matching `fn`. The lexer is given the `options` at its + instantiation. + + Raise :exc:`pygments.util.ClassNotFound` if no lexer for that filename + is found. + + If multiple lexers match the filename pattern, use their ``analyse_text()`` + methods to figure out which one is more appropriate. + """ + res = find_lexer_class_for_filename(_fn, code) + if not res: + raise ClassNotFound('no lexer for filename %r found' % _fn) + return res(**options) + + +def get_lexer_for_mimetype(_mime, **options): + """ + Return a `Lexer` subclass instance that has `mime` in its mimetype + list. The lexer is given the `options` at its instantiation. + + Will raise :exc:`pygments.util.ClassNotFound` if not lexer for that mimetype + is found. + """ + for modname, name, _, _, mimetypes in LEXERS.values(): + if _mime in mimetypes: + if name not in _lexer_cache: + _load_lexers(modname) + return _lexer_cache[name](**options) + for cls in find_plugin_lexers(): + if _mime in cls.mimetypes: + return cls(**options) + raise ClassNotFound('no lexer for mimetype %r found' % _mime) + + +def _iter_lexerclasses(plugins=True): + """Return an iterator over all lexer classes.""" + for key in sorted(LEXERS): + module_name, name = LEXERS[key][:2] + if name not in _lexer_cache: + _load_lexers(module_name) + yield _lexer_cache[name] + if plugins: + yield from find_plugin_lexers() + + +def guess_lexer_for_filename(_fn, _text, **options): + """ + As :func:`guess_lexer()`, but only lexers which have a pattern in `filenames` + or `alias_filenames` that matches `filename` are taken into consideration. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + """ + fn = basename(_fn) + primary = {} + matching_lexers = set() + for lexer in _iter_lexerclasses(): + for filename in lexer.filenames: + if _fn_matches(fn, filename): + matching_lexers.add(lexer) + primary[lexer] = True + for filename in lexer.alias_filenames: + if _fn_matches(fn, filename): + matching_lexers.add(lexer) + primary[lexer] = False + if not matching_lexers: + raise ClassNotFound('no lexer for filename %r found' % fn) + if len(matching_lexers) == 1: + return matching_lexers.pop()(**options) + result = [] + for lexer in matching_lexers: + rv = lexer.analyse_text(_text) + if rv == 1.0: + return lexer(**options) + result.append((rv, lexer)) + + def type_sort(t): + # sort by: + # - analyse score + # - is primary filename pattern? + # - priority + # - last resort: class name + return (t[0], primary[t[1]], t[1].priority, t[1].__name__) + result.sort(key=type_sort) + + return result[-1][1](**options) + + +def guess_lexer(_text, **options): + """ + Return a `Lexer` subclass instance that's guessed from the text in + `text`. For that, the :meth:`.analyse_text()` method of every known lexer + class is called with the text as argument, and the lexer which returned the + highest value will be instantiated and returned. + + :exc:`pygments.util.ClassNotFound` is raised if no lexer thinks it can + handle the content. + """ + + if not isinstance(_text, str): + inencoding = options.get('inencoding', options.get('encoding')) + if inencoding: + _text = _text.decode(inencoding or 'utf8') + else: + _text, _ = guess_decode(_text) + + # try to get a vim modeline first + ft = get_filetype_from_buffer(_text) + + if ft is not None: + try: + return get_lexer_by_name(ft, **options) + except ClassNotFound: + pass + + best_lexer = [0.0, None] + for lexer in _iter_lexerclasses(): + rv = lexer.analyse_text(_text) + if rv == 1.0: + return lexer(**options) + if rv > best_lexer[0]: + best_lexer[:] = (rv, lexer) + if not best_lexer[0] or best_lexer[1] is None: + raise ClassNotFound('no lexer matching the text found') + return best_lexer[1](**options) + + +class _automodule(types.ModuleType): + """Automatically import lexers.""" + + def __getattr__(self, name): + info = LEXERS.get(name) + if info: + _load_lexers(info[0]) + cls = _lexer_cache[info[1]] + setattr(self, name, cls) + return cls + if name in COMPAT: + return getattr(self, COMPAT[name]) + raise AttributeError(name) + + +oldmod = sys.modules[__name__] +newmod = _automodule(__name__) +newmod.__dict__.update(oldmod.__dict__) +sys.modules[__name__] = newmod +del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..fa3d629 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/__init__.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..407de6e --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/_mapping.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..1d8dcd5 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/__pycache__/python.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/_mapping.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/_mapping.py new file mode 100644 index 0000000..de6a015 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/_mapping.py @@ -0,0 +1,559 @@ +# Automatically generated by scripts/gen_mapfiles.py. +# DO NOT EDIT BY HAND; run `tox -e mapfiles` instead. + +LEXERS = { + 'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)), + 'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()), + 'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()), + 'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)), + 'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')), + 'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')), + 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)), + 'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()), + 'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)), + 'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()), + 'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)), + 'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)), + 'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()), + 'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()), + 'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()), + 'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()), + 'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()), + 'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()), + 'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()), + 'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()), + 'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()), + 'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()), + 'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()), + 'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()), + 'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)), + 'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()), + 'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)), + 'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()), + 'ArturoLexer': ('pip._vendor.pygments.lexers.arturo', 'Arturo', ('arturo', 'art'), ('*.art',), ()), + 'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')), + 'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)), + 'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)), + 'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()), + 'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)), + 'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)), + 'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)), + 'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()), + 'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)), + 'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()), + 'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()), + 'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()), + 'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()), + 'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')), + 'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')), + 'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)), + 'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)), + 'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)), + 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')), + 'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)), + 'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)), + 'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)), + 'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)), + 'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()), + 'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)), + 'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()), + 'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)), + 'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()), + 'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()), + 'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')), + 'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)), + 'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)), + 'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()), + 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()), + 'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), + 'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)), + 'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()), + 'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()), + 'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()), + 'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()), + 'CarbonLexer': ('pip._vendor.pygments.lexers.carbon', 'Carbon', ('carbon',), ('*.carbon',), ('text/x-carbon',)), + 'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()), + 'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)), + 'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)), + 'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()), + 'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')), + 'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()), + 'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()), + 'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')), + 'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')), + 'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')), + 'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')), + 'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)), + 'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)), + 'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()), + 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')), + 'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')), + 'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()), + 'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)), + 'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)), + 'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()), + 'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)), + 'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()), + 'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()), + 'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)), + 'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)), + 'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)), + 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)), + 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')), + 'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)), + 'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()), + 'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)), + 'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)), + 'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)), + 'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()), + 'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()), + 'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()), + 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')), + 'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)), + 'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)), + 'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)), + 'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)), + 'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)), + 'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)), + 'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()), + 'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')), + 'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)), + 'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)), + 'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()), + 'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)), + 'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)), + 'DaxLexer': ('pip._vendor.pygments.lexers.dax', 'Dax', ('dax',), ('*.dax',), ()), + 'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()), + 'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)), + 'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)), + 'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)), + 'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')), + 'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')), + 'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)), + 'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)), + 'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')), + 'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)), + 'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)), + 'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)), + 'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)), + 'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')), + 'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)), + 'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)), + 'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)), + 'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)), + 'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)), + 'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)), + 'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)), + 'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)), + 'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')), + 'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)), + 'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)), + 'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)), + 'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)), + 'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)), + 'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)), + 'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)), + 'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()), + 'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)), + 'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi', '*.fsx'), ('text/x-fsharp',)), + 'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)), + 'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)), + 'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)), + 'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)), + 'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)), + 'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()), + 'FiftLexer': ('pip._vendor.pygments.lexers.fift', 'Fift', ('fift', 'fif'), ('*.fif',), ()), + 'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)), + 'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)), + 'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()), + 'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)), + 'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()), + 'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)), + 'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()), + 'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)), + 'FuncLexer': ('pip._vendor.pygments.lexers.func', 'FunC', ('func', 'fc'), ('*.fc', '*.func'), ()), + 'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)), + 'GAPConsoleLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP session', ('gap-console', 'gap-repl'), ('*.tst',), ()), + 'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()), + 'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')), + 'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)), + 'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()), + 'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)), + 'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()), + 'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')), + 'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')), + 'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')), + 'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)), + 'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)), + 'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)), + 'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()), + 'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)), + 'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)), + 'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)), + 'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')), + 'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')), + 'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)), + 'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)), + 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()), + 'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)), + 'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')), + 'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()), + 'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)), + 'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')), + 'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()), + 'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)), + 'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), ('*Spec.hs',), ()), + 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')), + 'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)), + 'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')), + 'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')), + 'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)), + 'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()), + 'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()), + 'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')), + 'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')), + 'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)), + 'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()), + 'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)), + 'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)), + 'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()), + 'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()), + 'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()), + 'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')), + 'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)), + 'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)), + 'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)), + 'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)), + 'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)), + 'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()), + 'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)), + 'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()), + 'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()), + 'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)), + 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')), + 'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')), + 'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')), + 'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')), + 'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')), + 'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')), + 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()), + 'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)), + 'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')), + 'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()), + 'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)), + 'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')), + 'JsonnetLexer': ('pip._vendor.pygments.lexers.jsonnet', 'Jsonnet', ('jsonnet',), ('*.jsonnet', '*.libsonnet'), ()), + 'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)), + 'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()), + 'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')), + 'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')), + 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()), + 'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')), + 'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)), + 'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()), + 'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)), + 'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)), + 'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()), + 'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)), + 'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)), + 'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')), + 'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')), + 'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)), + 'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)), + 'LeanLexer': ('pip._vendor.pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)), + 'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)), + 'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)), + 'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()), + 'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)), + 'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()), + 'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)), + 'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)), + 'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)), + 'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)), + 'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)), + 'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)), + 'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()), + 'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()), + 'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)), + 'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)), + 'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')), + 'MCFunctionLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)), + 'MCSchemaLexer': ('pip._vendor.pygments.lexers.minecraft', 'MCSchema', ('mcschema',), ('*.mcschema',), ('text/mcschema',)), + 'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')), + 'MIPSLexer': ('pip._vendor.pygments.lexers.mips', 'MIPS', ('mips',), ('*.mips', '*.MIPS'), ()), + 'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)), + 'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()), + 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()), + 'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)), + 'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)), + 'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)), + 'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')), + 'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)), + 'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)), + 'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')), + 'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)), + 'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)), + 'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)), + 'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')), + 'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)), + 'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()), + 'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()), + 'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)), + 'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)), + 'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')), + 'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)), + 'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)), + 'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)), + 'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)), + 'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()), + 'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')), + 'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()), + 'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()), + 'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()), + 'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()), + 'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()), + 'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()), + 'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)), + 'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()), + 'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()), + 'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()), + 'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)), + 'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)), + 'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)), + 'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')), + 'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)), + 'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)), + 'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)), + 'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)), + 'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM', '*.nasm'), ('text/x-nasm',)), + 'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)), + 'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)), + 'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)), + 'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()), + 'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')), + 'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)), + 'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)), + 'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)), + 'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()), + 'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)), + 'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)), + 'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()), + 'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()), + 'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()), + 'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)), + 'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)), + 'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)), + 'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)), + 'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)), + 'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)), + 'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)), + 'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()), + 'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)), + 'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)), + 'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')), + 'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()), + 'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()), + 'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()), + 'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)), + 'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)), + 'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)), + 'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')), + 'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')), + 'PhixLexer': ('pip._vendor.pygments.lexers.phix', 'Phix', ('phix',), ('*.exw',), ('text/x-phix',)), + 'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)), + 'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)), + 'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)), + 'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()), + 'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)), + 'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()), + 'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()), + 'PortugolLexer': ('pip._vendor.pygments.lexers.pascal', 'Portugol', ('portugol',), ('*.alg', '*.portugol'), ()), + 'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)), + 'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)), + 'PostgresExplainLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL EXPLAIN dialect', ('postgres-explain',), ('*.explain',), ('text/x-postgresql-explain',)), + 'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)), + 'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)), + 'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)), + 'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()), + 'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()), + 'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()), + 'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)), + 'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()), + 'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)), + 'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()), + 'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()), + 'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')), + 'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()), + 'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)), + 'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')), + 'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)), + 'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)), + 'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.pyi', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')), + 'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')), + 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()), + 'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)), + 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()), + 'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()), + 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()), + 'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')), + 'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()), + 'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()), + 'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)), + 'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')), + 'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()), + 'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()), + 'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()), + 'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()), + 'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()), + 'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()), + 'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()), + 'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()), + 'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)), + 'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)), + 'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)), + 'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)), + 'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')), + 'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()), + 'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)), + 'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()), + 'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)), + 'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)), + 'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)), + 'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)), + 'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()), + 'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()), + 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)), + 'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)), + 'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)), + 'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')), + 'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()), + 'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)), + 'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')), + 'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')), + 'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')), + 'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')), + 'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')), + 'SNBTLexer': ('pip._vendor.pygments.lexers.minecraft', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)), + 'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)), + 'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)), + 'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()), + 'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)), + 'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)), + 'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()), + 'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')), + 'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)), + 'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)), + 'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)), + 'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)), + 'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')), + 'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()), + 'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()), + 'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()), + 'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()), + 'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)), + 'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()), + 'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)), + 'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)), + 'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()), + 'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)), + 'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()), + 'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)), + 'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()), + 'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()), + 'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()), + 'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)), + 'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()), + 'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)), + 'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)), + 'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()), + 'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)), + 'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)), + 'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)), + 'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()), + 'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)), + 'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()), + 'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')), + 'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')), + 'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)), + 'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)), + 'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)), + 'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()), + 'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()), + 'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()), + 'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()), + 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)), + 'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)), + 'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')), + 'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)), + 'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()), + 'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)), + 'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()), + 'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)), + 'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()), + 'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()), + 'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf', 'hcl'), ('*.tf', '*.hcl'), ('application/x-tf', 'application/x-terraform')), + 'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')), + 'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)), + 'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()), + 'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)), + 'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)), + 'TlbLexer': ('pip._vendor.pygments.lexers.tlb', 'Tl-b', ('tlb',), ('*.tlb',), ()), + 'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)), + 'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)), + 'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()), + 'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')), + 'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)), + 'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)), + 'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')), + 'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()), + 'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()), + 'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)), + 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()), + 'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()), + 'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)), + 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()), + 'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)), + 'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()), + 'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()), + 'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)), + 'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)), + 'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()), + 'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()), + 'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)), + 'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()), + 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')), + 'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)), + 'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()), + 'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)), + 'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)), + 'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)), + 'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)), + 'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()), + 'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()), + 'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()), + 'WgslLexer': ('pip._vendor.pygments.lexers.wgsl', 'WebGPU Shading Language', ('wgsl',), ('*.wgsl',), ('text/wgsl',)), + 'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)), + 'WikitextLexer': ('pip._vendor.pygments.lexers.markup', 'Wikitext', ('wikitext', 'mediawiki'), (), ('text/x-wiki',)), + 'WoWTocLexer': ('pip._vendor.pygments.lexers.wowtoc', 'World of Warcraft TOC', ('wowtoc',), ('*.toc',), ()), + 'WrenLexer': ('pip._vendor.pygments.lexers.wren', 'Wren', ('wren',), ('*.wren',), ()), + 'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)), + 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()), + 'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')), + 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')), + 'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)), + 'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')), + 'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)), + 'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)), + 'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()), + 'XppLexer': ('pip._vendor.pygments.lexers.dotnet', 'X++', ('xpp', 'x++'), ('*.xpp',), ()), + 'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')), + 'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)), + 'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()), + 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')), + 'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)), + 'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)), + 'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()), + 'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()), + 'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)), + 'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()), +} diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py new file mode 100644 index 0000000..e9bf2d3 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/lexers/python.py @@ -0,0 +1,1198 @@ +""" + pygments.lexers.python + ~~~~~~~~~~~~~~~~~~~~~~ + + Lexers for Python and related languages. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +import keyword + +from pip._vendor.pygments.lexer import DelegatingLexer, Lexer, RegexLexer, include, \ + bygroups, using, default, words, combined, do_insertions, this, line_re +from pip._vendor.pygments.util import get_bool_opt, shebang_matches +from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \ + Number, Punctuation, Generic, Other, Error, Whitespace +from pip._vendor.pygments import unistring as uni + +__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer', + 'Python2Lexer', 'Python2TracebackLexer', + 'CythonLexer', 'DgLexer', 'NumPyLexer'] + + +class PythonLexer(RegexLexer): + """ + For Python source code (version 3.x). + + .. versionadded:: 0.10 + + .. versionchanged:: 2.5 + This is now the default ``PythonLexer``. It is still available as the + alias ``Python3Lexer``. + """ + + name = 'Python' + url = 'http://www.python.org' + aliases = ['python', 'py', 'sage', 'python3', 'py3'] + filenames = [ + '*.py', + '*.pyw', + # Type stubs + '*.pyi', + # Jython + '*.jy', + # Sage + '*.sage', + # SCons + '*.sc', + 'SConstruct', + 'SConscript', + # Skylark/Starlark (used by Bazel, Buck, and Pants) + '*.bzl', + 'BUCK', + 'BUILD', + 'BUILD.bazel', + 'WORKSPACE', + # Twisted Application infrastructure + '*.tac', + ] + mimetypes = ['text/x-python', 'application/x-python', + 'text/x-python3', 'application/x-python3'] + + uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue) + + def innerstring_rules(ttype): + return [ + # the old style '%s' % (...) string formatting (still valid in Py3) + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsaux%]', String.Interpol), + # the new style '{}'.format(...) string formatting + (r'\{' + r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name + r'(\![sra])?' # conversion + r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?' + r'\}', String.Interpol), + + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%{\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%|(\{{1,2})', ttype) + # newlines are an error (use "nl" state) + ] + + def fstring_rules(ttype): + return [ + # Assuming that a '}' is the closing brace after format specifier. + # Sadly, this means that we won't detect syntax error. But it's + # more important to parse correct syntax correctly, than to + # highlight invalid syntax. + (r'\}', String.Interpol), + (r'\{', String.Interpol, 'expr-inside-fstring'), + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"{}\n]+', ttype), + (r'[\'"\\]', ttype), + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Whitespace, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Whitespace, String.Affix, String.Doc)), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'\\\n', Text), + (r'\\', Text), + include('keywords'), + include('soft-keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('expr'), + ], + 'expr': [ + # raw f-strings + ('(?i)(rf|fr)(""")', + bygroups(String.Affix, String.Double), + combined('rfstringescape', 'tdqf')), + ("(?i)(rf|fr)(''')", + bygroups(String.Affix, String.Single), + combined('rfstringescape', 'tsqf')), + ('(?i)(rf|fr)(")', + bygroups(String.Affix, String.Double), + combined('rfstringescape', 'dqf')), + ("(?i)(rf|fr)(')", + bygroups(String.Affix, String.Single), + combined('rfstringescape', 'sqf')), + # non-raw f-strings + ('([fF])(""")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'tdqf')), + ("([fF])(''')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'tsqf')), + ('([fF])(")', bygroups(String.Affix, String.Double), + combined('fstringescape', 'dqf')), + ("([fF])(')", bygroups(String.Affix, String.Single), + combined('fstringescape', 'sqf')), + # raw bytes and strings + ('(?i)(rb|br|r)(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("(?i)(rb|br|r)(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('(?i)(rb|br|r)(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("(?i)(rb|br|r)(')", + bygroups(String.Affix, String.Single), 'sqs'), + # non-raw strings + ('([uU]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uU]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uU]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uU]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + # non-raw bytes + ('([bB])(""")', bygroups(String.Affix, String.Double), + combined('bytesescape', 'tdqs')), + ("([bB])(''')", bygroups(String.Affix, String.Single), + combined('bytesescape', 'tsqs')), + ('([bB])(")', bygroups(String.Affix, String.Double), + combined('bytesescape', 'dqs')), + ("([bB])(')", bygroups(String.Affix, String.Single), + combined('bytesescape', 'sqs')), + + (r'[^\S\n]+', Text), + include('numbers'), + (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator), + (r'[]{}:(),;[]', Punctuation), + (r'(in|is|and|or|not)\b', Operator.Word), + include('expr-keywords'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('name'), + ], + 'expr-inside-fstring': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + # without format specifier + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r'\}', String.Interpol, '#pop'), + # with format specifier + # we'll catch the remaining '}' in the outer scope + (r'(=\s*)?' # debug (https://bugs.python.org/issue36817) + r'(\![sraf])?' # conversion + r':', String.Interpol, '#pop'), + (r'\s+', Whitespace), # allow new lines + include('expr'), + ], + 'expr-inside-fstring-inner': [ + (r'[{([]', Punctuation, 'expr-inside-fstring-inner'), + (r'[])}]', Punctuation, '#pop'), + (r'\s+', Whitespace), # allow new lines + include('expr'), + ], + 'expr-keywords': [ + # Based on https://docs.python.org/3/reference/expressions.html + (words(( + 'async for', 'await', 'else', 'for', 'if', 'lambda', + 'yield', 'yield from'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif', + 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda', + 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant), + ], + 'soft-keywords': [ + # `match`, `case` and `_` soft keywords + (r'(^[ \t]*)' # at beginning of line + possible indentation + r'(match|case)\b' # a possible keyword + r'(?![ \t]*(?:' # not followed by... + r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't + r'|'.join(keyword.kwlist) + r')\b))', # pattern matching + bygroups(Text, Keyword), 'soft-keywords-inner'), + ], + 'soft-keywords-inner': [ + # optional `_` keyword + (r'(\s+)([^\n_]*)(_\b)', bygroups(Whitespace, using(this), Keyword)), + default('#pop') + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'aiter', 'all', 'any', 'bin', 'bool', 'bytearray', + 'breakpoint', 'bytes', 'callable', 'chr', 'classmethod', 'compile', + 'complex', 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', + 'filter', 'float', 'format', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'map', 'max', + 'memoryview', 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', + 'print', 'property', 'range', 'repr', 'reversed', 'round', 'set', + 'setattr', 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', + 'tuple', 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), + Name.Builtin), + (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo), + (words(( + 'ArithmeticError', 'AssertionError', 'AttributeError', + 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning', + 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError', + 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError', + 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError', + 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError', + 'NotImplementedError', 'OSError', 'OverflowError', + 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning', + 'RuntimeError', 'RuntimeWarning', 'StopIteration', + 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', + 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', + 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', + 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', + 'Warning', 'WindowsError', 'ZeroDivisionError', + # new builtin exceptions from PEP 3151 + 'BlockingIOError', 'ChildProcessError', 'ConnectionError', + 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError', + 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError', + 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError', + 'PermissionError', 'ProcessLookupError', 'TimeoutError', + # others new in Python 3 + 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError', + 'EncodingWarning'), + prefix=r'(?<!\.)', suffix=r'\b'), + Name.Exception), + ], + 'magicfuncs': [ + (words(( + '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__', + '__and__', '__anext__', '__await__', '__bool__', '__bytes__', + '__call__', '__complex__', '__contains__', '__del__', '__delattr__', + '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__', + '__eq__', '__exit__', '__float__', '__floordiv__', '__format__', + '__ge__', '__get__', '__getattr__', '__getattribute__', + '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__', + '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__', + '__imul__', '__index__', '__init__', '__instancecheck__', + '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', + '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', + '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__', + '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', + '__new__', '__next__', '__or__', '__pos__', '__pow__', + '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__', + '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__', + '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__', + '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', + '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__', + '__sub__', '__subclasscheck__', '__truediv__', + '__xor__'), suffix=r'\b'), + Name.Function.Magic), + ], + 'magicvars': [ + (words(( + '__annotations__', '__bases__', '__class__', '__closure__', + '__code__', '__defaults__', '__dict__', '__doc__', '__file__', + '__func__', '__globals__', '__kwdefaults__', '__module__', + '__mro__', '__name__', '__objclass__', '__qualname__', + '__self__', '__slots__', '__weakref__'), suffix=r'\b'), + Name.Variable.Magic), + ], + 'numbers': [ + (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)' + r'([eE][+-]?\d(?:_?\d)*)?', Number.Float), + (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float), + (r'0[oO](?:_?[0-7])+', Number.Oct), + (r'0[bB](?:_?[01])+', Number.Bin), + (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex), + (r'\d(?:_?\d)*', Number.Integer), + ], + 'name': [ + (r'@' + uni_name, Name.Decorator), + (r'@', Operator), # new matrix multiplication operator + (uni_name, Name), + ], + 'funcname': [ + include('magicfuncs'), + (uni_name, Name.Function, '#pop'), + default('#pop'), + ], + 'classname': [ + (uni_name, Name.Class, '#pop'), + ], + 'import': [ + (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)), + (r'\.', Name.Namespace), + (uni_name, Name.Namespace), + (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)), + default('#pop') # all else: go back + ], + 'fromimport': [ + (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'), + (r'\.', Name.Namespace), + # if None occurs here, it's "raise x from None", since None can + # never be a module name + (r'None\b', Keyword.Constant, '#pop'), + (uni_name, Name.Namespace), + default('#pop'), + ], + 'rfstringescape': [ + (r'\{\{', String.Escape), + (r'\}\}', String.Escape), + ], + 'fstringescape': [ + include('rfstringescape'), + include('stringescape'), + ], + 'bytesescape': [ + (r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'stringescape': [ + (r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape), + include('bytesescape') + ], + 'fstrings-single': fstring_rules(String.Single), + 'fstrings-double': fstring_rules(String.Double), + 'strings-single': innerstring_rules(String.Single), + 'strings-double': innerstring_rules(String.Double), + 'dqf': [ + (r'"', String.Double, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings + include('fstrings-double') + ], + 'sqf': [ + (r"'", String.Single, '#pop'), + (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings + include('fstrings-single') + ], + 'dqs': [ + (r'"', String.Double, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings + include('strings-double') + ], + 'sqs': [ + (r"'", String.Single, '#pop'), + (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings + include('strings-single') + ], + 'tdqf': [ + (r'"""', String.Double, '#pop'), + include('fstrings-double'), + (r'\n', String.Double) + ], + 'tsqf': [ + (r"'''", String.Single, '#pop'), + include('fstrings-single'), + (r'\n', String.Single) + ], + 'tdqs': [ + (r'"""', String.Double, '#pop'), + include('strings-double'), + (r'\n', String.Double) + ], + 'tsqs': [ + (r"'''", String.Single, '#pop'), + include('strings-single'), + (r'\n', String.Single) + ], + } + + def analyse_text(text): + return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \ + 'import ' in text[:1000] + + +Python3Lexer = PythonLexer + + +class Python2Lexer(RegexLexer): + """ + For Python 2.x source code. + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonLexer``. ``PythonLexer`` now + refers to the Python 3 variant. File name patterns like ``*.py`` have + been moved to Python 3 as well. + """ + + name = 'Python 2.x' + url = 'http://www.python.org' + aliases = ['python2', 'py2'] + filenames = [] # now taken over by PythonLexer (3.x) + mimetypes = ['text/x-python2', 'application/x-python2'] + + def innerstring_rules(ttype): + return [ + # the old style '%s' % (...) string formatting + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsux%]', String.Interpol), + # backslashes, quotes and formatting signs must be parsed one at a time + (r'[^\\\'"%\n]+', ttype), + (r'[\'"\\]', ttype), + # unhandled string formatting sign + (r'%', ttype), + # newlines are an error (use "nl" state) + ] + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")', + bygroups(Whitespace, String.Affix, String.Doc)), + (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')", + bygroups(Whitespace, String.Affix, String.Doc)), + (r'[^\S\n]+', Text), + (r'\A#!.+$', Comment.Hashbang), + (r'#.*$', Comment.Single), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Text), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator), + include('keywords'), + (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'), + (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'), + (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'fromimport'), + (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), + 'import'), + include('builtins'), + include('magicfuncs'), + include('magicvars'), + include('backtick'), + ('([rR]|[uUbB][rR]|[rR][uUbB])(""")', + bygroups(String.Affix, String.Double), 'tdqs'), + ("([rR]|[uUbB][rR]|[rR][uUbB])(''')", + bygroups(String.Affix, String.Single), 'tsqs'), + ('([rR]|[uUbB][rR]|[rR][uUbB])(")', + bygroups(String.Affix, String.Double), 'dqs'), + ("([rR]|[uUbB][rR]|[rR][uUbB])(')", + bygroups(String.Affix, String.Single), 'sqs'), + ('([uUbB]?)(""")', bygroups(String.Affix, String.Double), + combined('stringescape', 'tdqs')), + ("([uUbB]?)(''')", bygroups(String.Affix, String.Single), + combined('stringescape', 'tsqs')), + ('([uUbB]?)(")', bygroups(String.Affix, String.Double), + combined('stringescape', 'dqs')), + ("([uUbB]?)(')", bygroups(String.Affix, String.Single), + combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except', + 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass', + 'print', 'raise', 'return', 'try', 'while', 'yield', + 'yield from', 'as', 'with'), suffix=r'\b'), + Keyword), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod', + 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod', + 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float', + 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id', + 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len', + 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object', + 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce', + 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice', + 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type', + 'unichr', 'unicode', 'vars', 'xrange', 'zip'), + prefix=r'(?<!\.)', suffix=r'\b'), + Name.Builtin), + (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls' + r')\b', Name.Builtin.Pseudo), + (words(( + 'ArithmeticError', 'AssertionError', 'AttributeError', + 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError', + 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', + 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', + 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', + 'MemoryError', 'NameError', + 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning', + 'PendingDeprecationWarning', 'ReferenceError', + 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration', + 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit', + 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', + 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', + 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning', + 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'), + Name.Exception), + ], + 'magicfuncs': [ + (words(( + '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__', + '__complex__', '__contains__', '__del__', '__delattr__', '__delete__', + '__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__', + '__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__', + '__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__', + '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__', + '__ilshift__', '__imod__', '__imul__', '__index__', '__init__', + '__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__', + '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__', + '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__', + '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__', + '__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__', + '__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__', + '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', + '__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', + '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__', + '__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__', + '__unicode__', '__xor__'), suffix=r'\b'), + Name.Function.Magic), + ], + 'magicvars': [ + (words(( + '__bases__', '__class__', '__closure__', '__code__', '__defaults__', + '__dict__', '__doc__', '__file__', '__func__', '__globals__', + '__metaclass__', '__module__', '__mro__', '__name__', '__self__', + '__slots__', '__weakref__'), + suffix=r'\b'), + Name.Variable.Magic), + ], + 'numbers': [ + (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), + (r'\d+[eE][+-]?[0-9]+j?', Number.Float), + (r'0[0-7]+j?', Number.Oct), + (r'0[bB][01]+', Number.Bin), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+j?', Number.Integer) + ], + 'backtick': [ + ('`.*?`', String.Backtick), + ], + 'name': [ + (r'@[\w.]+', Name.Decorator), + (r'[a-zA-Z_]\w*', Name), + ], + 'funcname': [ + include('magicfuncs'), + (r'[a-zA-Z_]\w*', Name.Function, '#pop'), + default('#pop'), + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop') + ], + 'import': [ + (r'(?:[ \t]|\\\n)+', Text), + (r'as\b', Keyword.Namespace), + (r',', Operator), + (r'[a-zA-Z_][\w.]*', Name.Namespace), + default('#pop') # all else: go back + ], + 'fromimport': [ + (r'(?:[ \t]|\\\n)+', Text), + (r'import\b', Keyword.Namespace, '#pop'), + # if None occurs here, it's "raise x from None", since None can + # never be a module name + (r'None\b', Name.Builtin.Pseudo, '#pop'), + # sadly, in "raise x from y" y will be highlighted as namespace too + (r'[a-zA-Z_.][\w.]*', Name.Namespace), + # anything else here also means "raise x from y" and is therefore + # not an error + default('#pop'), + ], + 'stringescape': [ + (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' + r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'strings-single': innerstring_rules(String.Single), + 'strings-double': innerstring_rules(String.Double), + 'dqs': [ + (r'"', String.Double, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings + include('strings-double') + ], + 'sqs': [ + (r"'", String.Single, '#pop'), + (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings + include('strings-single') + ], + 'tdqs': [ + (r'"""', String.Double, '#pop'), + include('strings-double'), + (r'\n', String.Double) + ], + 'tsqs': [ + (r"'''", String.Single, '#pop'), + include('strings-single'), + (r'\n', String.Single) + ], + } + + def analyse_text(text): + return shebang_matches(text, r'pythonw?2(\.\d)?') + +class _PythonConsoleLexerBase(RegexLexer): + name = 'Python console session' + aliases = ['pycon'] + mimetypes = ['text/x-python-doctest'] + + """Auxiliary lexer for `PythonConsoleLexer`. + + Code tokens are output as ``Token.Other.Code``, traceback tokens as + ``Token.Other.Traceback``. + """ + tokens = { + 'root': [ + (r'(>>> )(.*\n)', bygroups(Generic.Prompt, Other.Code), 'continuations'), + # This happens, e.g., when tracebacks are embedded in documentation; + # trailing whitespaces are often stripped in such contexts. + (r'(>>>)(\n)', bygroups(Generic.Prompt, Whitespace)), + (r'(\^C)?Traceback \(most recent call last\):\n', Other.Traceback, 'traceback'), + # SyntaxError starts with this + (r' File "[^"]+", line \d+', Other.Traceback, 'traceback'), + (r'.*\n', Generic.Output), + ], + 'continuations': [ + (r'(\.\.\. )(.*\n)', bygroups(Generic.Prompt, Other.Code)), + # See above. + (r'(\.\.\.)(\n)', bygroups(Generic.Prompt, Whitespace)), + default('#pop'), + ], + 'traceback': [ + # As soon as we see a traceback, consume everything until the next + # >>> prompt. + (r'(?=>>>( |$))', Text, '#pop'), + (r'(KeyboardInterrupt)(\n)', bygroups(Name.Class, Whitespace)), + (r'.*\n', Other.Traceback), + ], + } + +class PythonConsoleLexer(DelegatingLexer): + """ + For Python console output or doctests, such as: + + .. sourcecode:: pycon + + >>> a = 'foo' + >>> print(a) + foo + >>> 1 / 0 + Traceback (most recent call last): + File "<stdin>", line 1, in <module> + ZeroDivisionError: integer division or modulo by zero + + Additional options: + + `python3` + Use Python 3 lexer for code. Default is ``True``. + + .. versionadded:: 1.0 + .. versionchanged:: 2.5 + Now defaults to ``True``. + """ + + name = 'Python console session' + aliases = ['pycon'] + mimetypes = ['text/x-python-doctest'] + + def __init__(self, **options): + python3 = get_bool_opt(options, 'python3', True) + if python3: + pylexer = PythonLexer + tblexer = PythonTracebackLexer + else: + pylexer = Python2Lexer + tblexer = Python2TracebackLexer + # We have two auxiliary lexers. Use DelegatingLexer twice with + # different tokens. TODO: DelegatingLexer should support this + # directly, by accepting a tuplet of auxiliary lexers and a tuple of + # distinguishing tokens. Then we wouldn't need this intermediary + # class. + class _ReplaceInnerCode(DelegatingLexer): + def __init__(self, **options): + super().__init__(pylexer, _PythonConsoleLexerBase, Other.Code, **options) + super().__init__(tblexer, _ReplaceInnerCode, Other.Traceback, **options) + +class PythonTracebackLexer(RegexLexer): + """ + For Python 3.x tracebacks, with support for chained exceptions. + + .. versionadded:: 1.0 + + .. versionchanged:: 2.5 + This is now the default ``PythonTracebackLexer``. It is still available + as the alias ``Python3TracebackLexer``. + """ + + name = 'Python Traceback' + aliases = ['pytb', 'py3tb'] + filenames = ['*.pytb', '*.py3tb'] + mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback'] + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\^C)?Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'), + (r'^During handling of the above exception, another ' + r'exception occurred:\n\n', Generic.Traceback), + (r'^The above exception was the direct cause of the ' + r'following exception:\n\n', Generic.Traceback), + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Whitespace)), + (r'^( )(.+)(\n)', + bygroups(Whitespace, using(PythonLexer), Whitespace), 'markers'), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Whitespace, Comment, Whitespace)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Whitespace), '#pop'), + (r'^([a-zA-Z_][\w.]*)(:?\n)', + bygroups(Generic.Error, Whitespace), '#pop'), + default('#pop'), + ], + 'markers': [ + # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>` + # error locations in Python 3.11+, or single-caret markers + # for syntax errors before that. + (r'^( {4,})([~^]+)(\n)', + bygroups(Whitespace, Punctuation.Marker, Whitespace), + '#pop'), + default('#pop'), + ], + } + + +Python3TracebackLexer = PythonTracebackLexer + + +class Python2TracebackLexer(RegexLexer): + """ + For Python tracebacks. + + .. versionadded:: 0.7 + + .. versionchanged:: 2.5 + This class has been renamed from ``PythonTracebackLexer``. + ``PythonTracebackLexer`` now refers to the Python 3 variant. + """ + + name = 'Python 2.x Traceback' + aliases = ['py2tb'] + filenames = ['*.py2tb'] + mimetypes = ['text/x-python2-traceback'] + + tokens = { + 'root': [ + # Cover both (most recent call last) and (innermost last) + # The optional ^C allows us to catch keyboard interrupt signals. + (r'^(\^C)?(Traceback.*\n)', + bygroups(Text, Generic.Traceback), 'intb'), + # SyntaxError starts with this. + (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'), + (r'^.*\n', Other), + ], + 'intb': [ + (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Text, Name, Whitespace)), + (r'^( File )("[^"]+")(, line )(\d+)(\n)', + bygroups(Text, Name.Builtin, Text, Number, Whitespace)), + (r'^( )(.+)(\n)', + bygroups(Text, using(Python2Lexer), Whitespace), 'marker'), + (r'^([ \t]*)(\.\.\.)(\n)', + bygroups(Text, Comment, Whitespace)), # for doctests... + (r'^([^:]+)(: )(.+)(\n)', + bygroups(Generic.Error, Text, Name, Whitespace), '#pop'), + (r'^([a-zA-Z_]\w*)(:?\n)', + bygroups(Generic.Error, Whitespace), '#pop') + ], + 'marker': [ + # For syntax errors. + (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'), + default('#pop'), + ], + } + + +class CythonLexer(RegexLexer): + """ + For Pyrex and Cython source code. + + .. versionadded:: 1.1 + """ + + name = 'Cython' + url = 'http://cython.org' + aliases = ['cython', 'pyx', 'pyrex'] + filenames = ['*.pyx', '*.pxd', '*.pxi'] + mimetypes = ['text/x-cython', 'application/x-cython'] + + tokens = { + 'root': [ + (r'\n', Whitespace), + (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Whitespace, String.Doc)), + (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Whitespace, String.Doc)), + (r'[^\S\n]+', Text), + (r'#.*$', Comment), + (r'[]{}:(),;[]', Punctuation), + (r'\\\n', Whitespace), + (r'\\', Text), + (r'(in|is|and|or|not)\b', Operator.Word), + (r'(<)([a-zA-Z0-9.?]+)(>)', + bygroups(Punctuation, Keyword.Type, Punctuation)), + (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator), + (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)', + bygroups(Keyword, Number.Integer, Operator, Name, Operator, + Name, Punctuation)), + include('keywords'), + (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'), + (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'), + # (should actually start a block with only cdefs) + (r'(cdef)(:)', bygroups(Keyword, Punctuation)), + (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'), + (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'), + (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'), + include('builtins'), + include('backtick'), + ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'), + ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'), + ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'), + ('[uU]?"""', String, combined('stringescape', 'tdqs')), + ("[uU]?'''", String, combined('stringescape', 'tsqs')), + ('[uU]?"', String, combined('stringescape', 'dqs')), + ("[uU]?'", String, combined('stringescape', 'sqs')), + include('name'), + include('numbers'), + ], + 'keywords': [ + (words(( + 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif', + 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil', + 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'), + Keyword), + (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc), + ], + 'builtins': [ + (words(( + '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint', + 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', + 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr', + 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit', + 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals', + 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance', + 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max', + 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t', + 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed', + 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod', + 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned', + 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'), + Name.Builtin), + (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL' + r')\b', Name.Builtin.Pseudo), + (words(( + 'ArithmeticError', 'AssertionError', 'AttributeError', + 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError', + 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit', + 'IOError', 'ImportError', 'ImportWarning', 'IndentationError', + 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError', + 'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError', + 'OSError', 'OverflowError', 'OverflowWarning', + 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError', + 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError', + 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError', + 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError', + 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError', + 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning', + 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'), + Name.Exception), + ], + 'numbers': [ + (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float), + (r'0\d+', Number.Oct), + (r'0[xX][a-fA-F0-9]+', Number.Hex), + (r'\d+L', Number.Integer.Long), + (r'\d+', Number.Integer) + ], + 'backtick': [ + ('`.*?`', String.Backtick), + ], + 'name': [ + (r'@\w+', Name.Decorator), + (r'[a-zA-Z_]\w*', Name), + ], + 'funcname': [ + (r'[a-zA-Z_]\w*', Name.Function, '#pop') + ], + 'cdef': [ + (r'(public|readonly|extern|api|inline)\b', Keyword.Reserved), + (r'(struct|enum|union|class)\b', Keyword), + (r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)', + bygroups(Name.Function, Text), '#pop'), + (r'([a-zA-Z_]\w*)(\s*)(,)', + bygroups(Name.Function, Text, Punctuation)), + (r'from\b', Keyword, '#pop'), + (r'as\b', Keyword), + (r':', Punctuation, '#pop'), + (r'(?=["\'])', Text, '#pop'), + (r'[a-zA-Z_]\w*', Keyword.Type), + (r'.', Text), + ], + 'classname': [ + (r'[a-zA-Z_]\w*', Name.Class, '#pop') + ], + 'import': [ + (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)), + (r'[a-zA-Z_][\w.]*', Name.Namespace), + (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)), + default('#pop') # all else: go back + ], + 'fromimport': [ + (r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'), + (r'[a-zA-Z_.][\w.]*', Name.Namespace), + # ``cdef foo from "header"``, or ``for foo from 0 < i < 10`` + default('#pop'), + ], + 'stringescape': [ + (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' + r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'strings': [ + (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsux%]', String.Interpol), + (r'[^\\\'"%\n]+', String), + # quotes, percents and backslashes must be parsed one at a time + (r'[\'"\\]', String), + # unhandled string formatting sign + (r'%', String) + # newlines are an error (use "nl" state) + ], + 'nl': [ + (r'\n', String) + ], + 'dqs': [ + (r'"', String, '#pop'), + (r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings + include('strings') + ], + 'sqs': [ + (r"'", String, '#pop'), + (r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings + include('strings') + ], + 'tdqs': [ + (r'"""', String, '#pop'), + include('strings'), + include('nl') + ], + 'tsqs': [ + (r"'''", String, '#pop'), + include('strings'), + include('nl') + ], + } + + +class DgLexer(RegexLexer): + """ + Lexer for dg, + a functional and object-oriented programming language + running on the CPython 3 VM. + + .. versionadded:: 1.6 + """ + name = 'dg' + aliases = ['dg'] + filenames = ['*.dg'] + mimetypes = ['text/x-dg'] + + tokens = { + 'root': [ + (r'\s+', Text), + (r'#.*?$', Comment.Single), + + (r'(?i)0b[01]+', Number.Bin), + (r'(?i)0o[0-7]+', Number.Oct), + (r'(?i)0x[0-9a-f]+', Number.Hex), + (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float), + (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float), + (r'(?i)[+-]?[0-9]+j?', Number.Integer), + + (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')), + (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')), + (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')), + (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')), + + (r"`\w+'*`", Operator), + (r'\b(and|in|is|or|where)\b', Operator.Word), + (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator), + + (words(( + 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'', + 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object', + 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str', + 'super', 'tuple', 'tuple\'', 'type'), + prefix=r'(?<!\.)', suffix=r'(?![\'\w])'), + Name.Builtin), + (words(( + '__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile', + 'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate', + 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst', + 'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init', + 'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len', + 'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow', + 'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd', + 'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'), + prefix=r'(?<!\.)', suffix=r'(?![\'\w])'), + Name.Builtin), + (r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])", + Name.Builtin.Pseudo), + + (r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])", + Name.Exception), + (r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|" + r"SystemExit)(?!['\w])", Name.Exception), + + (r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|" + r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved), + + (r"[A-Z_]+'*(?!['\w])", Name), + (r"[A-Z]\w+'*(?!['\w])", Keyword.Type), + (r"\w+'*", Name), + + (r'[()]', Punctuation), + (r'.', Error), + ], + 'stringescape': [ + (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|' + r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) + ], + 'string': [ + (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?' + '[hlL]?[E-GXc-giorsux%]', String.Interpol), + (r'[^\\\'"%\n]+', String), + # quotes, percents and backslashes must be parsed one at a time + (r'[\'"\\]', String), + # unhandled string formatting sign + (r'%', String), + (r'\n', String) + ], + 'dqs': [ + (r'"', String, '#pop') + ], + 'sqs': [ + (r"'", String, '#pop') + ], + 'tdqs': [ + (r'"""', String, '#pop') + ], + 'tsqs': [ + (r"'''", String, '#pop') + ], + } + + +class NumPyLexer(PythonLexer): + """ + A Python lexer recognizing Numerical Python builtins. + + .. versionadded:: 0.10 + """ + + name = 'NumPy' + url = 'https://numpy.org/' + aliases = ['numpy'] + + # override the mimetypes to not inherit them from python + mimetypes = [] + filenames = [] + + EXTRA_KEYWORDS = { + 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose', + 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append', + 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh', + 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', + 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal', + 'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange', + 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray', + 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype', + 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett', + 'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial', + 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman', + 'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_', + 'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type', + 'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate', + 'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov', + 'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate', + 'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide', + 'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty', + 'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye', + 'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill', + 'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud', + 'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer', + 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring', + 'generic', 'get_array_wrap', 'get_include', 'get_numarray_include', + 'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize', + 'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater', + 'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram', + 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0', + 'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info', + 'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d', + 'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj', + 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf', + 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_', + 'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_', + 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort', + 'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2', + 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace', + 'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype', + 'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min', + 'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan', + 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum', + 'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer', + 'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones', + 'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload', + 'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', + 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod', + 'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers', + 'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close', + 'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require', + 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll', + 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_', + 'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select', + 'set_numeric_ops', 'set_printoptions', 'set_string_function', + 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj', + 'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape', + 'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh', + 'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source', + 'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std', + 'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot', + 'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose', + 'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict', + 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index', + 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises', + 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like' + } + + def get_tokens_unprocessed(self, text): + for index, token, value in \ + PythonLexer.get_tokens_unprocessed(self, text): + if token is Name and value in self.EXTRA_KEYWORDS: + yield index, Keyword.Pseudo, value + else: + yield index, token, value + + def analyse_text(text): + ltext = text[:1000] + return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or + 'import ' in ltext) \ + and ('import numpy' in ltext or 'from numpy import' in ltext) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/modeline.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/modeline.py new file mode 100644 index 0000000..7b6f6a3 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/modeline.py @@ -0,0 +1,43 @@ +""" + pygments.modeline + ~~~~~~~~~~~~~~~~~ + + A simple modeline parser (based on pymodeline). + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re + +__all__ = ['get_filetype_from_buffer'] + + +modeline_re = re.compile(r''' + (?: vi | vim | ex ) (?: [<=>]? \d* )? : + .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ ) +''', re.VERBOSE) + + +def get_filetype_from_line(l): + m = modeline_re.search(l) + if m: + return m.group(1) + + +def get_filetype_from_buffer(buf, max_lines=5): + """ + Scan the buffer for modelines and return filetype if one is found. + """ + lines = buf.splitlines() + for l in lines[-1:-max_lines-1:-1]: + ret = get_filetype_from_line(l) + if ret: + return ret + for i in range(max_lines, -1, -1): + if i < len(lines): + ret = get_filetype_from_line(lines[i]) + if ret: + return ret + + return None diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py new file mode 100644 index 0000000..7b722d5 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/plugin.py @@ -0,0 +1,88 @@ +""" + pygments.plugin + ~~~~~~~~~~~~~~~ + + Pygments plugin interface. By default, this tries to use + ``importlib.metadata``, which is in the Python standard + library since Python 3.8, or its ``importlib_metadata`` + backport for earlier versions of Python. It falls back on + ``pkg_resources`` if not found. Finally, if ``pkg_resources`` + is not found either, no plugins are loaded at all. + + lexer plugins:: + + [pygments.lexers] + yourlexer = yourmodule:YourLexer + + formatter plugins:: + + [pygments.formatters] + yourformatter = yourformatter:YourFormatter + /.ext = yourformatter:YourFormatter + + As you can see, you can define extensions for the formatter + with a leading slash. + + syntax plugins:: + + [pygments.styles] + yourstyle = yourstyle:YourStyle + + filter plugin:: + + [pygments.filter] + yourfilter = yourfilter:YourFilter + + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +LEXER_ENTRY_POINT = 'pygments.lexers' +FORMATTER_ENTRY_POINT = 'pygments.formatters' +STYLE_ENTRY_POINT = 'pygments.styles' +FILTER_ENTRY_POINT = 'pygments.filters' + + +def iter_entry_points(group_name): + try: + from importlib.metadata import entry_points + except ImportError: + try: + from importlib_metadata import entry_points + except ImportError: + try: + from pip._vendor.pkg_resources import iter_entry_points + except (ImportError, OSError): + return [] + else: + return iter_entry_points(group_name) + groups = entry_points() + if hasattr(groups, 'select'): + # New interface in Python 3.10 and newer versions of the + # importlib_metadata backport. + return groups.select(group=group_name) + else: + # Older interface, deprecated in Python 3.10 and recent + # importlib_metadata, but we need it in Python 3.8 and 3.9. + return groups.get(group_name, []) + + +def find_plugin_lexers(): + for entrypoint in iter_entry_points(LEXER_ENTRY_POINT): + yield entrypoint.load() + + +def find_plugin_formatters(): + for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT): + yield entrypoint.name, entrypoint.load() + + +def find_plugin_styles(): + for entrypoint in iter_entry_points(STYLE_ENTRY_POINT): + yield entrypoint.name, entrypoint.load() + + +def find_plugin_filters(): + for entrypoint in iter_entry_points(FILTER_ENTRY_POINT): + yield entrypoint.name, entrypoint.load() diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/regexopt.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/regexopt.py new file mode 100644 index 0000000..45223ec --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/regexopt.py @@ -0,0 +1,91 @@ +""" + pygments.regexopt + ~~~~~~~~~~~~~~~~~ + + An algorithm that generates optimized regexes for matching long lists of + literal strings. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from re import escape +from os.path import commonprefix +from itertools import groupby +from operator import itemgetter + +CS_ESCAPE = re.compile(r'[\[\^\\\-\]]') +FIRST_ELEMENT = itemgetter(0) + + +def make_charset(letters): + return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']' + + +def regex_opt_inner(strings, open_paren): + """Return a regex that matches any string in the sorted list of strings.""" + close_paren = open_paren and ')' or '' + # print strings, repr(open_paren) + if not strings: + # print '-> nothing left' + return '' + first = strings[0] + if len(strings) == 1: + # print '-> only 1 string' + return open_paren + escape(first) + close_paren + if not first: + # print '-> first string empty' + return open_paren + regex_opt_inner(strings[1:], '(?:') \ + + '?' + close_paren + if len(first) == 1: + # multiple one-char strings? make a charset + oneletter = [] + rest = [] + for s in strings: + if len(s) == 1: + oneletter.append(s) + else: + rest.append(s) + if len(oneletter) > 1: # do we have more than one oneletter string? + if rest: + # print '-> 1-character + rest' + return open_paren + regex_opt_inner(rest, '') + '|' \ + + make_charset(oneletter) + close_paren + # print '-> only 1-character' + return open_paren + make_charset(oneletter) + close_paren + prefix = commonprefix(strings) + if prefix: + plen = len(prefix) + # we have a prefix for all strings + # print '-> prefix:', prefix + return open_paren + escape(prefix) \ + + regex_opt_inner([s[plen:] for s in strings], '(?:') \ + + close_paren + # is there a suffix? + strings_rev = [s[::-1] for s in strings] + suffix = commonprefix(strings_rev) + if suffix: + slen = len(suffix) + # print '-> suffix:', suffix[::-1] + return open_paren \ + + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \ + + escape(suffix[::-1]) + close_paren + # recurse on common 1-string prefixes + # print '-> last resort' + return open_paren + \ + '|'.join(regex_opt_inner(list(group[1]), '') + for group in groupby(strings, lambda s: s[0] == first[0])) \ + + close_paren + + +def regex_opt(strings, prefix='', suffix=''): + """Return a compiled regex that matches any string in the given list. + + The strings to match must be literal strings, not regexes. They will be + regex-escaped. + + *prefix* and *suffix* are pre- and appended to the final regex. + """ + strings = sorted(strings) + return prefix + regex_opt_inner(strings, '(') + suffix diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py new file mode 100644 index 0000000..32a2f30 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/scanner.py @@ -0,0 +1,104 @@ +""" + pygments.scanner + ~~~~~~~~~~~~~~~~ + + This library implements a regex based scanner. Some languages + like Pascal are easy to parse but have some keywords that + depend on the context. Because of this it's impossible to lex + that just by using a regular expression lexer like the + `RegexLexer`. + + Have a look at the `DelphiLexer` to get an idea of how to use + this scanner. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" +import re + + +class EndOfText(RuntimeError): + """ + Raise if end of text is reached and the user + tried to call a match function. + """ + + +class Scanner: + """ + Simple scanner + + All method patterns are regular expression strings (not + compiled expressions!) + """ + + def __init__(self, text, flags=0): + """ + :param text: The text which should be scanned + :param flags: default regular expression flags + """ + self.data = text + self.data_length = len(text) + self.start_pos = 0 + self.pos = 0 + self.flags = flags + self.last = None + self.match = None + self._re_cache = {} + + def eos(self): + """`True` if the scanner reached the end of text.""" + return self.pos >= self.data_length + eos = property(eos, eos.__doc__) + + def check(self, pattern): + """ + Apply `pattern` on the current position and return + the match object. (Doesn't touch pos). Use this for + lookahead. + """ + if self.eos: + raise EndOfText() + if pattern not in self._re_cache: + self._re_cache[pattern] = re.compile(pattern, self.flags) + return self._re_cache[pattern].match(self.data, self.pos) + + def test(self, pattern): + """Apply a pattern on the current position and check + if it patches. Doesn't touch pos. + """ + return self.check(pattern) is not None + + def scan(self, pattern): + """ + Scan the text for the given pattern and update pos/match + and related fields. The return value is a boolean that + indicates if the pattern matched. The matched value is + stored on the instance as ``match``, the last value is + stored as ``last``. ``start_pos`` is the position of the + pointer before the pattern was matched, ``pos`` is the + end position. + """ + if self.eos: + raise EndOfText() + if pattern not in self._re_cache: + self._re_cache[pattern] = re.compile(pattern, self.flags) + self.last = self.match + m = self._re_cache[pattern].match(self.data, self.pos) + if m is None: + return False + self.start_pos = m.start() + self.pos = m.end() + self.match = m.group() + return True + + def get_char(self): + """Scan exactly one char.""" + self.scan('.') + + def __repr__(self): + return '<%s %d/%d>' % ( + self.__class__.__name__, + self.pos, + self.data_length + ) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py new file mode 100644 index 0000000..2c7facd --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/sphinxext.py @@ -0,0 +1,217 @@ +""" + pygments.sphinxext + ~~~~~~~~~~~~~~~~~~ + + Sphinx extension to generate automatic documentation of lexers, + formatters and filters. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import sys + +from docutils import nodes +from docutils.statemachine import ViewList +from docutils.parsers.rst import Directive +from sphinx.util.nodes import nested_parse_with_titles + + +MODULEDOC = ''' +.. module:: %s + +%s +%s +''' + +LEXERDOC = ''' +.. class:: %s + + :Short names: %s + :Filenames: %s + :MIME types: %s + + %s + +''' + +FMTERDOC = ''' +.. class:: %s + + :Short names: %s + :Filenames: %s + + %s + +''' + +FILTERDOC = ''' +.. class:: %s + + :Name: %s + + %s + +''' + + +class PygmentsDoc(Directive): + """ + A directive to collect all lexers/formatters/filters and generate + autoclass directives for them. + """ + has_content = False + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = {} + + def run(self): + self.filenames = set() + if self.arguments[0] == 'lexers': + out = self.document_lexers() + elif self.arguments[0] == 'formatters': + out = self.document_formatters() + elif self.arguments[0] == 'filters': + out = self.document_filters() + elif self.arguments[0] == 'lexers_overview': + out = self.document_lexers_overview() + else: + raise Exception('invalid argument for "pygmentsdoc" directive') + node = nodes.compound() + vl = ViewList(out.split('\n'), source='') + nested_parse_with_titles(self.state, vl, node) + for fn in self.filenames: + self.state.document.settings.record_dependencies.add(fn) + return node.children + + def document_lexers_overview(self): + """Generate a tabular overview of all lexers. + + The columns are the lexer name, the extensions handled by this lexer + (or "None"), the aliases and a link to the lexer class.""" + from pip._vendor.pygments.lexers._mapping import LEXERS + from pip._vendor.pygments.lexers import find_lexer_class + out = [] + + table = [] + + def format_link(name, url): + if url: + return f'`{name} <{url}>`_' + return name + + for classname, data in sorted(LEXERS.items(), key=lambda x: x[1][1].lower()): + lexer_cls = find_lexer_class(data[1]) + extensions = lexer_cls.filenames + lexer_cls.alias_filenames + + table.append({ + 'name': format_link(data[1], lexer_cls.url), + 'extensions': ', '.join(extensions).replace('*', '\\*').replace('_', '\\') or 'None', + 'aliases': ', '.join(data[2]), + 'class': f'{data[0]}.{classname}' + }) + + column_names = ['name', 'extensions', 'aliases', 'class'] + column_lengths = [max([len(row[column]) for row in table if row[column]]) + for column in column_names] + + def write_row(*columns): + """Format a table row""" + out = [] + for l, c in zip(column_lengths, columns): + if c: + out.append(c.ljust(l)) + else: + out.append(' '*l) + + return ' '.join(out) + + def write_seperator(): + """Write a table separator row""" + sep = ['='*c for c in column_lengths] + return write_row(*sep) + + out.append(write_seperator()) + out.append(write_row('Name', 'Extension(s)', 'Short name(s)', 'Lexer class')) + out.append(write_seperator()) + for row in table: + out.append(write_row( + row['name'], + row['extensions'], + row['aliases'], + f':class:`~{row["class"]}`')) + out.append(write_seperator()) + + return '\n'.join(out) + + def document_lexers(self): + from pip._vendor.pygments.lexers._mapping import LEXERS + out = [] + modules = {} + moduledocstrings = {} + for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]): + module = data[0] + mod = __import__(module, None, None, [classname]) + self.filenames.add(mod.__file__) + cls = getattr(mod, classname) + if not cls.__doc__: + print("Warning: %s does not have a docstring." % classname) + docstring = cls.__doc__ + if isinstance(docstring, bytes): + docstring = docstring.decode('utf8') + modules.setdefault(module, []).append(( + classname, + ', '.join(data[2]) or 'None', + ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None', + ', '.join(data[4]) or 'None', + docstring)) + if module not in moduledocstrings: + moddoc = mod.__doc__ + if isinstance(moddoc, bytes): + moddoc = moddoc.decode('utf8') + moduledocstrings[module] = moddoc + + for module, lexers in sorted(modules.items(), key=lambda x: x[0]): + if moduledocstrings[module] is None: + raise Exception("Missing docstring for %s" % (module,)) + heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.') + out.append(MODULEDOC % (module, heading, '-'*len(heading))) + for data in lexers: + out.append(LEXERDOC % data) + + return ''.join(out) + + def document_formatters(self): + from pip._vendor.pygments.formatters import FORMATTERS + + out = [] + for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]): + module = data[0] + mod = __import__(module, None, None, [classname]) + self.filenames.add(mod.__file__) + cls = getattr(mod, classname) + docstring = cls.__doc__ + if isinstance(docstring, bytes): + docstring = docstring.decode('utf8') + heading = cls.__name__ + out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None', + ', '.join(data[3]).replace('*', '\\*') or 'None', + docstring)) + return ''.join(out) + + def document_filters(self): + from pip._vendor.pygments.filters import FILTERS + + out = [] + for name, cls in FILTERS.items(): + self.filenames.add(sys.modules[cls.__module__].__file__) + docstring = cls.__doc__ + if isinstance(docstring, bytes): + docstring = docstring.decode('utf8') + out.append(FILTERDOC % (cls.__name__, name, docstring)) + return ''.join(out) + + +def setup(app): + app.add_directive('pygmentsdoc', PygmentsDoc) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/style.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/style.py new file mode 100644 index 0000000..edc1962 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/style.py @@ -0,0 +1,197 @@ +""" + pygments.style + ~~~~~~~~~~~~~~ + + Basic style object. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.token import Token, STANDARD_TYPES + +# Default mapping of ansixxx to RGB colors. +_ansimap = { + # dark + 'ansiblack': '000000', + 'ansired': '7f0000', + 'ansigreen': '007f00', + 'ansiyellow': '7f7fe0', + 'ansiblue': '00007f', + 'ansimagenta': '7f007f', + 'ansicyan': '007f7f', + 'ansigray': 'e5e5e5', + # normal + 'ansibrightblack': '555555', + 'ansibrightred': 'ff0000', + 'ansibrightgreen': '00ff00', + 'ansibrightyellow': 'ffff00', + 'ansibrightblue': '0000ff', + 'ansibrightmagenta': 'ff00ff', + 'ansibrightcyan': '00ffff', + 'ansiwhite': 'ffffff', +} +# mapping of deprecated #ansixxx colors to new color names +_deprecated_ansicolors = { + # dark + '#ansiblack': 'ansiblack', + '#ansidarkred': 'ansired', + '#ansidarkgreen': 'ansigreen', + '#ansibrown': 'ansiyellow', + '#ansidarkblue': 'ansiblue', + '#ansipurple': 'ansimagenta', + '#ansiteal': 'ansicyan', + '#ansilightgray': 'ansigray', + # normal + '#ansidarkgray': 'ansibrightblack', + '#ansired': 'ansibrightred', + '#ansigreen': 'ansibrightgreen', + '#ansiyellow': 'ansibrightyellow', + '#ansiblue': 'ansibrightblue', + '#ansifuchsia': 'ansibrightmagenta', + '#ansiturquoise': 'ansibrightcyan', + '#ansiwhite': 'ansiwhite', +} +ansicolors = set(_ansimap) + + +class StyleMeta(type): + + def __new__(mcs, name, bases, dct): + obj = type.__new__(mcs, name, bases, dct) + for token in STANDARD_TYPES: + if token not in obj.styles: + obj.styles[token] = '' + + def colorformat(text): + if text in ansicolors: + return text + if text[0:1] == '#': + col = text[1:] + if len(col) == 6: + return col + elif len(col) == 3: + return col[0] * 2 + col[1] * 2 + col[2] * 2 + elif text == '': + return '' + elif text.startswith('var') or text.startswith('calc'): + return text + assert False, "wrong color format %r" % text + + _styles = obj._styles = {} + + for ttype in obj.styles: + for token in ttype.split(): + if token in _styles: + continue + ndef = _styles.get(token.parent, None) + styledefs = obj.styles.get(token, '').split() + if not ndef or token is None: + ndef = ['', 0, 0, 0, '', '', 0, 0, 0] + elif 'noinherit' in styledefs and token is not Token: + ndef = _styles[Token][:] + else: + ndef = ndef[:] + _styles[token] = ndef + for styledef in obj.styles.get(token, '').split(): + if styledef == 'noinherit': + pass + elif styledef == 'bold': + ndef[1] = 1 + elif styledef == 'nobold': + ndef[1] = 0 + elif styledef == 'italic': + ndef[2] = 1 + elif styledef == 'noitalic': + ndef[2] = 0 + elif styledef == 'underline': + ndef[3] = 1 + elif styledef == 'nounderline': + ndef[3] = 0 + elif styledef[:3] == 'bg:': + ndef[4] = colorformat(styledef[3:]) + elif styledef[:7] == 'border:': + ndef[5] = colorformat(styledef[7:]) + elif styledef == 'roman': + ndef[6] = 1 + elif styledef == 'sans': + ndef[7] = 1 + elif styledef == 'mono': + ndef[8] = 1 + else: + ndef[0] = colorformat(styledef) + + return obj + + def style_for_token(cls, token): + t = cls._styles[token] + ansicolor = bgansicolor = None + color = t[0] + if color in _deprecated_ansicolors: + color = _deprecated_ansicolors[color] + if color in ansicolors: + ansicolor = color + color = _ansimap[color] + bgcolor = t[4] + if bgcolor in _deprecated_ansicolors: + bgcolor = _deprecated_ansicolors[bgcolor] + if bgcolor in ansicolors: + bgansicolor = bgcolor + bgcolor = _ansimap[bgcolor] + + return { + 'color': color or None, + 'bold': bool(t[1]), + 'italic': bool(t[2]), + 'underline': bool(t[3]), + 'bgcolor': bgcolor or None, + 'border': t[5] or None, + 'roman': bool(t[6]) or None, + 'sans': bool(t[7]) or None, + 'mono': bool(t[8]) or None, + 'ansicolor': ansicolor, + 'bgansicolor': bgansicolor, + } + + def list_styles(cls): + return list(cls) + + def styles_token(cls, ttype): + return ttype in cls._styles + + def __iter__(cls): + for token in cls._styles: + yield token, cls.style_for_token(token) + + def __len__(cls): + return len(cls._styles) + + +class Style(metaclass=StyleMeta): + + #: overall background color (``None`` means transparent) + background_color = '#ffffff' + + #: highlight background color + highlight_color = '#ffffcc' + + #: line number font color + line_number_color = 'inherit' + + #: line number background color + line_number_background_color = 'transparent' + + #: special line number font color + line_number_special_color = '#000000' + + #: special line number background color + line_number_special_background_color = '#ffffc0' + + #: Style definitions for individual token types. + styles = {} + + # Attribute for lexers defined within Pygments. If set + # to True, the style is not shown in the style gallery + # on the website. This is intended for language-specific + # styles. + web_style_gallery_exclude = False diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__init__.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__init__.py new file mode 100644 index 0000000..7401cf5 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__init__.py @@ -0,0 +1,103 @@ +""" + pygments.styles + ~~~~~~~~~~~~~~~ + + Contains built-in styles. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +from pip._vendor.pygments.plugin import find_plugin_styles +from pip._vendor.pygments.util import ClassNotFound + +#: A dictionary of built-in styles, mapping style names to +#: ``'submodule::classname'`` strings. +STYLE_MAP = { + 'default': 'default::DefaultStyle', + 'emacs': 'emacs::EmacsStyle', + 'friendly': 'friendly::FriendlyStyle', + 'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle', + 'colorful': 'colorful::ColorfulStyle', + 'autumn': 'autumn::AutumnStyle', + 'murphy': 'murphy::MurphyStyle', + 'manni': 'manni::ManniStyle', + 'material': 'material::MaterialStyle', + 'monokai': 'monokai::MonokaiStyle', + 'perldoc': 'perldoc::PerldocStyle', + 'pastie': 'pastie::PastieStyle', + 'borland': 'borland::BorlandStyle', + 'trac': 'trac::TracStyle', + 'native': 'native::NativeStyle', + 'fruity': 'fruity::FruityStyle', + 'bw': 'bw::BlackWhiteStyle', + 'vim': 'vim::VimStyle', + 'vs': 'vs::VisualStudioStyle', + 'tango': 'tango::TangoStyle', + 'rrt': 'rrt::RrtStyle', + 'xcode': 'xcode::XcodeStyle', + 'igor': 'igor::IgorStyle', + 'paraiso-light': 'paraiso_light::ParaisoLightStyle', + 'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle', + 'lovelace': 'lovelace::LovelaceStyle', + 'algol': 'algol::AlgolStyle', + 'algol_nu': 'algol_nu::Algol_NuStyle', + 'arduino': 'arduino::ArduinoStyle', + 'rainbow_dash': 'rainbow_dash::RainbowDashStyle', + 'abap': 'abap::AbapStyle', + 'solarized-dark': 'solarized::SolarizedDarkStyle', + 'solarized-light': 'solarized::SolarizedLightStyle', + 'sas': 'sas::SasStyle', + 'staroffice' : 'staroffice::StarofficeStyle', + 'stata': 'stata_light::StataLightStyle', + 'stata-light': 'stata_light::StataLightStyle', + 'stata-dark': 'stata_dark::StataDarkStyle', + 'inkpot': 'inkpot::InkPotStyle', + 'zenburn': 'zenburn::ZenburnStyle', + 'gruvbox-dark': 'gruvbox::GruvboxDarkStyle', + 'gruvbox-light': 'gruvbox::GruvboxLightStyle', + 'dracula': 'dracula::DraculaStyle', + 'one-dark': 'onedark::OneDarkStyle', + 'lilypond' : 'lilypond::LilyPondStyle', + 'nord': 'nord::NordStyle', + 'nord-darker': 'nord::NordDarkerStyle', + 'github-dark': 'gh_dark::GhDarkStyle' +} + + +def get_style_by_name(name): + """ + Return a style class by its short name. The names of the builtin styles + are listed in :data:`pygments.styles.STYLE_MAP`. + + Will raise :exc:`pygments.util.ClassNotFound` if no style of that name is + found. + """ + if name in STYLE_MAP: + mod, cls = STYLE_MAP[name].split('::') + builtin = "yes" + else: + for found_name, style in find_plugin_styles(): + if name == found_name: + return style + # perhaps it got dropped into our styles package + builtin = "" + mod = name + cls = name.title() + "Style" + + try: + mod = __import__('pygments.styles.' + mod, None, None, [cls]) + except ImportError: + raise ClassNotFound("Could not find style module %r" % mod + + (builtin and ", though it should be builtin") + ".") + try: + return getattr(mod, cls) + except AttributeError: + raise ClassNotFound("Could not find style class %r in style module." % cls) + + +def get_all_styles(): + """Return a generator for all styles by name, both builtin and plugin.""" + yield from STYLE_MAP + for name, _ in find_plugin_styles(): + yield name diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-311.pyc b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-311.pyc Binary files differnew file mode 100644 index 0000000..f81c5cd --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/styles/__pycache__/__init__.cpython-311.pyc diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/token.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/token.py new file mode 100644 index 0000000..7395cb6 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/token.py @@ -0,0 +1,213 @@ +""" + pygments.token + ~~~~~~~~~~~~~~ + + Basic token types and the standard tokens. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + + +class _TokenType(tuple): + parent = None + + def split(self): + buf = [] + node = self + while node is not None: + buf.append(node) + node = node.parent + buf.reverse() + return buf + + def __init__(self, *args): + # no need to call super.__init__ + self.subtypes = set() + + def __contains__(self, val): + return self is val or ( + type(val) is self.__class__ and + val[:len(self)] == self + ) + + def __getattr__(self, val): + if not val or not val[0].isupper(): + return tuple.__getattribute__(self, val) + new = _TokenType(self + (val,)) + setattr(self, val, new) + self.subtypes.add(new) + new.parent = self + return new + + def __repr__(self): + return 'Token' + (self and '.' or '') + '.'.join(self) + + def __copy__(self): + # These instances are supposed to be singletons + return self + + def __deepcopy__(self, memo): + # These instances are supposed to be singletons + return self + + +Token = _TokenType() + +# Special token types +Text = Token.Text +Whitespace = Text.Whitespace +Escape = Token.Escape +Error = Token.Error +# Text that doesn't belong to this lexer (e.g. HTML in PHP) +Other = Token.Other + +# Common token types for source code +Keyword = Token.Keyword +Name = Token.Name +Literal = Token.Literal +String = Literal.String +Number = Literal.Number +Punctuation = Token.Punctuation +Operator = Token.Operator +Comment = Token.Comment + +# Generic types for non-source code +Generic = Token.Generic + +# String and some others are not direct children of Token. +# alias them: +Token.Token = Token +Token.String = String +Token.Number = Number + + +def is_token_subtype(ttype, other): + """ + Return True if ``ttype`` is a subtype of ``other``. + + exists for backwards compatibility. use ``ttype in other`` now. + """ + return ttype in other + + +def string_to_tokentype(s): + """ + Convert a string into a token type:: + + >>> string_to_token('String.Double') + Token.Literal.String.Double + >>> string_to_token('Token.Literal.Number') + Token.Literal.Number + >>> string_to_token('') + Token + + Tokens that are already tokens are returned unchanged: + + >>> string_to_token(String) + Token.Literal.String + """ + if isinstance(s, _TokenType): + return s + if not s: + return Token + node = Token + for item in s.split('.'): + node = getattr(node, item) + return node + + +# Map standard token types to short names, used in CSS class naming. +# If you add a new item, please be sure to run this file to perform +# a consistency check for duplicate values. +STANDARD_TYPES = { + Token: '', + + Text: '', + Whitespace: 'w', + Escape: 'esc', + Error: 'err', + Other: 'x', + + Keyword: 'k', + Keyword.Constant: 'kc', + Keyword.Declaration: 'kd', + Keyword.Namespace: 'kn', + Keyword.Pseudo: 'kp', + Keyword.Reserved: 'kr', + Keyword.Type: 'kt', + + Name: 'n', + Name.Attribute: 'na', + Name.Builtin: 'nb', + Name.Builtin.Pseudo: 'bp', + Name.Class: 'nc', + Name.Constant: 'no', + Name.Decorator: 'nd', + Name.Entity: 'ni', + Name.Exception: 'ne', + Name.Function: 'nf', + Name.Function.Magic: 'fm', + Name.Property: 'py', + Name.Label: 'nl', + Name.Namespace: 'nn', + Name.Other: 'nx', + Name.Tag: 'nt', + Name.Variable: 'nv', + Name.Variable.Class: 'vc', + Name.Variable.Global: 'vg', + Name.Variable.Instance: 'vi', + Name.Variable.Magic: 'vm', + + Literal: 'l', + Literal.Date: 'ld', + + String: 's', + String.Affix: 'sa', + String.Backtick: 'sb', + String.Char: 'sc', + String.Delimiter: 'dl', + String.Doc: 'sd', + String.Double: 's2', + String.Escape: 'se', + String.Heredoc: 'sh', + String.Interpol: 'si', + String.Other: 'sx', + String.Regex: 'sr', + String.Single: 's1', + String.Symbol: 'ss', + + Number: 'm', + Number.Bin: 'mb', + Number.Float: 'mf', + Number.Hex: 'mh', + Number.Integer: 'mi', + Number.Integer.Long: 'il', + Number.Oct: 'mo', + + Operator: 'o', + Operator.Word: 'ow', + + Punctuation: 'p', + Punctuation.Marker: 'pm', + + Comment: 'c', + Comment.Hashbang: 'ch', + Comment.Multiline: 'cm', + Comment.Preproc: 'cp', + Comment.PreprocFile: 'cpf', + Comment.Single: 'c1', + Comment.Special: 'cs', + + Generic: 'g', + Generic.Deleted: 'gd', + Generic.Emph: 'ge', + Generic.Error: 'gr', + Generic.Heading: 'gh', + Generic.Inserted: 'gi', + Generic.Output: 'go', + Generic.Prompt: 'gp', + Generic.Strong: 'gs', + Generic.Subheading: 'gu', + Generic.Traceback: 'gt', +} diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py new file mode 100644 index 0000000..39f6bae --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/unistring.py @@ -0,0 +1,153 @@ +""" + pygments.unistring + ~~~~~~~~~~~~~~~~~~ + + Strings of all Unicode characters of a certain category. + Used for matching in Unicode-aware languages. Run to regenerate. + + Inspired by chartypes_create.py from the MoinMoin project. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +Cc = '\x00-\x1f\x7f-\x9f' + +Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f' + +Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff' + +Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd' + +Cs = '\ud800-\udbff\\\udc00\udc01-\udfff' + +Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943' + +Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1' + +Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d' + +Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc' + +Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921' + +Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172' + +Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672' + +Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef' + +Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959' + +Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e' + +No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c' + +Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f' + +Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d' + +Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63' + +Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21' + +Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20' + +Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f" + +Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62' + +Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0' + +Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff' + +Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1' + +So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d' + +Zl = '\u2028' + +Zp = '\u2029' + +Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000' + +xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef' + +xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d' + +cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs'] + +# Generated from unidata 11.0.0 + +def combine(*args): + return ''.join(globals()[cat] for cat in args) + + +def allexcept(*args): + newcats = cats[:] + for arg in args: + newcats.remove(arg) + return ''.join(globals()[cat] for cat in newcats) + + +def _handle_runs(char_list): # pragma: no cover + buf = [] + for c in char_list: + if len(c) == 1: + if buf and buf[-1][1] == chr(ord(c)-1): + buf[-1] = (buf[-1][0], c) + else: + buf.append((c, c)) + else: + buf.append((c, c)) + for a, b in buf: + if a == b: + yield a + else: + yield '%s-%s' % (a, b) + + +if __name__ == '__main__': # pragma: no cover + import unicodedata + + categories = {'xid_start': [], 'xid_continue': []} + + with open(__file__, encoding='utf-8') as fp: + content = fp.read() + + header = content[:content.find('Cc =')] + footer = content[content.find("def combine("):] + + for code in range(0x110000): + c = chr(code) + cat = unicodedata.category(c) + if ord(c) == 0xdc00: + # Hack to avoid combining this combining with the preceding high + # surrogate, 0xdbff, when doing a repr. + c = '\\' + c + elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e): + # Escape regex metachars. + c = '\\' + c + categories.setdefault(cat, []).append(c) + # XID_START and XID_CONTINUE are special categories used for matching + # identifiers in Python 3. + if c.isidentifier(): + categories['xid_start'].append(c) + if ('a' + c).isidentifier(): + categories['xid_continue'].append(c) + + with open(__file__, 'w', encoding='utf-8') as fp: + fp.write(header) + + for cat in sorted(categories): + val = ''.join(_handle_runs(categories[cat])) + fp.write('%s = %a\n\n' % (cat, val)) + + cats = sorted(categories) + cats.remove('xid_start') + cats.remove('xid_continue') + fp.write('cats = %r\n\n' % cats) + + fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,)) + + fp.write(footer) diff --git a/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py new file mode 100644 index 0000000..941fdb9 --- /dev/null +++ b/venv/lib/python3.11/site-packages/pip/_vendor/pygments/util.py @@ -0,0 +1,330 @@ +""" + pygments.util + ~~~~~~~~~~~~~ + + Utility functions. + + :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. + :license: BSD, see LICENSE for details. +""" + +import re +from io import TextIOWrapper + + +split_path_re = re.compile(r'[/\\ ]') +doctype_lookup_re = re.compile(r''' + <!DOCTYPE\s+( + [a-zA-Z_][a-zA-Z0-9]* + (?: \s+ # optional in HTML5 + [a-zA-Z_][a-zA-Z0-9]*\s+ + "[^"]*")? + ) + [^>]*> +''', re.DOTALL | re.MULTILINE | re.VERBOSE) +tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>', + re.IGNORECASE | re.DOTALL | re.MULTILINE) +xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I) + + +class ClassNotFound(ValueError): + """Raised if one of the lookup functions didn't find a matching class.""" + + +class OptionError(Exception): + """ + This exception will be raised by all option processing functions if + the type or value of the argument is not correct. + """ + +def get_choice_opt(options, optname, allowed, default=None, normcase=False): + """ + If the key `optname` from the dictionary is not in the sequence + `allowed`, raise an error, otherwise return it. + """ + string = options.get(optname, default) + if normcase: + string = string.lower() + if string not in allowed: + raise OptionError('Value for option %s must be one of %s' % + (optname, ', '.join(map(str, allowed)))) + return string + + +def get_bool_opt(options, optname, default=None): + """ + Intuitively, this is `options.get(optname, default)`, but restricted to + Boolean value. The Booleans can be represented as string, in order to accept + Boolean value from the command line arguments. If the key `optname` is + present in the dictionary `options` and is not associated with a Boolean, + raise an `OptionError`. If it is absent, `default` is returned instead. + + The valid string values for ``True`` are ``1``, ``yes``, ``true`` and + ``on``, the ones for ``False`` are ``0``, ``no``, ``false`` and ``off`` + (matched case-insensitively). + """ + string = options.get(optname, default) + if isinstance(string, bool): + return string + elif isinstance(string, int): + return bool(string) + elif not isinstance(string, str): + raise OptionError('Invalid type %r for option %s; use ' + '1/0, yes/no, true/false, on/off' % ( + string, optname)) + elif string.lower() in ('1', 'yes', 'true', 'on'): + return True + elif string.lower() in ('0', 'no', 'false', 'off'): + return False + else: + raise OptionError('Invalid value %r for option %s; use ' + '1/0, yes/no, true/false, on/off' % ( + string, optname)) + + +def get_int_opt(options, optname, default=None): + """As :func:`get_bool_opt`, but interpret the value as an integer.""" + string = options.get(optname, default) + try: + return int(string) + except TypeError: + raise OptionError('Invalid type %r for option %s; you ' + 'must give an integer value' % ( + string, optname)) + except ValueError: + raise OptionError('Invalid value %r for option %s; you ' + 'must give an integer value' % ( + string, optname)) + +def get_list_opt(options, optname, default=None): + """ + If the key `optname` from the dictionary `options` is a string, + split it at whitespace and return it. If it is already a list + or a tuple, it is returned as a list. + """ + val = options.get(optname, default) + if isinstance(val, str): + return val.split() + elif isinstance(val, (list, tuple)): + return list(val) + else: + raise OptionError('Invalid type %r for option %s; you ' + 'must give a list value' % ( + val, optname)) + + +def docstring_headline(obj): + if not obj.__doc__: + return '' + res = [] + for line in obj.__doc__.strip().splitlines(): + if line.strip(): + res.append(" " + line.strip()) + else: + break + return ''.join(res).lstrip() + + +def make_analysator(f): + """Return a static text analyser function that returns float values.""" + def text_analyse(text): + try: + rv = f(text) + except Exception: + return 0.0 + if not rv: + return 0.0 + try: + return min(1.0, max(0.0, float(rv))) + except (ValueError, TypeError): + return 0.0 + text_analyse.__doc__ = f.__doc__ + return staticmethod(text_analyse) + + +def shebang_matches(text, regex): + r"""Check if the given regular expression matches the last part of the + shebang if one exists. + + >>> from pygments.util import shebang_matches + >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?') + True + >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?') + True + >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?') + False + >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?') + False + >>> shebang_matches('#!/usr/bin/startsomethingwith python', + ... r'python(2\.\d)?') + True + + It also checks for common windows executable file extensions:: + + >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?') + True + + Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does + the same as ``'perl -e'``) + + Note that this method automatically searches the whole string (eg: + the regular expression is wrapped in ``'^$'``) + """ + index = text.find('\n') + if index >= 0: + first_line = text[:index].lower() + else: + first_line = text.lower() + if first_line.startswith('#!'): + try: + found = [x for x in split_path_re.split(first_line[2:].strip()) + if x and not x.startswith('-')][-1] + except IndexError: + return False + regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE) + if regex.search(found) is not None: + return True + return False + + +def doctype_matches(text, regex): + """Check if the doctype matches a regular expression (if present). + + Note that this method only checks the first part of a DOCTYPE. + eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' + """ + m = doctype_lookup_re.search(text) + if m is None: + return False + doctype = m.group(1) + return re.compile(regex, re.I).match(doctype.strip()) is not None + + +def html_doctype_matches(text): + """Check if the file looks like it has a html doctype.""" + return doctype_matches(text, r'html') + + +_looks_like_xml_cache = {} + + +def looks_like_xml(text): + """Check if a doctype exists or if we have some tags.""" + if xml_decl_re.match(text): + return True + key = hash(text) + try: + return _looks_like_xml_cache[key] + except KeyError: + m = doctype_lookup_re.search(text) + if m is not None: + return True + rv = tag_re.search(text[:1000]) is not None + _looks_like_xml_cache[key] = rv + return rv + + +def surrogatepair(c): + """Given a unicode character code with length greater than 16 bits, + return the two 16 bit surrogate pair. + """ + # From example D28 of: + # http://www.unicode.org/book/ch03.pdf + return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff))) + + +def format_lines(var_name, seq, raw=False, indent_level=0): + """Formats a sequence of strings for output.""" + lines = [] + base_indent = ' ' * indent_level * 4 + inner_indent = ' ' * (indent_level + 1) * 4 + lines.append(base_indent + var_name + ' = (') + if raw: + # These should be preformatted reprs of, say, tuples. + for i in seq: + lines.append(inner_indent + i + ',') + else: + for i in seq: + # Force use of single quotes + r = repr(i + '"') + lines.append(inner_indent + r[:-2] + r[-1] + ',') + lines.append(base_indent + ')') + return '\n'.join(lines) + + +def duplicates_removed(it, already_seen=()): + """ + Returns a list with duplicates removed from the iterable `it`. + + Order is preserved. + """ + lst = [] + seen = set() + for i in it: + if i in seen or i in already_seen: + continue + lst.append(i) + seen.add(i) + return lst + + +class Future: + """Generic class to defer some work. + + Handled specially in RegexLexerMeta, to support regex string construction at + first use. + """ + def get(self): + raise NotImplementedError + + +def guess_decode(text): + """Decode *text* with guessed encoding. + + First try UTF-8; this should fail for non-UTF-8 encodings. + Then try the preferred locale encoding. + Fall back to latin-1, which always works. + """ + try: + text = text.decode('utf-8') + return text, 'utf-8' + except UnicodeDecodeError: + try: + import locale + prefencoding = locale.getpreferredencoding() + text = text.decode() + return text, prefencoding + except (UnicodeDecodeError, LookupError): + text = text.decode('latin1') + return text, 'latin1' + + +def guess_decode_from_terminal(text, term): + """Decode *text* coming from terminal *term*. + + First try the terminal encoding, if given. + Then try UTF-8. Then try the preferred locale encoding. + Fall back to latin-1, which always works. + """ + if getattr(term, 'encoding', None): + try: + text = text.decode(term.encoding) + except UnicodeDecodeError: + pass + else: + return text, term.encoding + return guess_decode(text) + + +def terminal_encoding(term): + """Return our best guess of encoding for the given *term*.""" + if getattr(term, 'encoding', None): + return term.encoding + import locale + return locale.getpreferredencoding() + + +class UnclosingTextIOWrapper(TextIOWrapper): + # Don't close underlying buffer on destruction. + def close(self): + self.flush() |