diff --git a/.gitignore b/.gitignore index bdaab25..866ad94 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,8 @@ env/ +env/* +venv/ +venv/* +.idea/ +*.pyc +__pycache__/ +__pycache__/* \ No newline at end of file diff --git a/README.md b/README.md index e69de29..ddc9b30 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,75 @@ +# Run + +## in Development +`export FLASK_APP=run.py` +`export FLASK_ENV=development` +`flask run` + +**Using gunicorn:** + +`gunicorn -w 4 -b 127.0.0.1:5000 app:app` +* `-w` workers +* `-b` bind to address / unix socker + +**And gunicorn with using unix sock:** (this how it should run in production) +`gunicorn --workers 4 --bind unix:app.sock -m 007 app:app ` + +## in Production with gunicorn and unix sockets +Based on https://medium.com/faun/deploy-flask-app-with-nginx-using-gunicorn-7fda4f50066a + +### systemd service file + +Add to /etc/systemd/system/watermarks.service + +``` +[Unit] +After=network.target + +[Service] +User=psc +Group=www-data +WorkingDirectory=/var/www/TacticalApp +Environment="PATH=/var/www/TacticalApp/venv/bin" +ExecStart=/var/www/TacticalApp/venv/bin/gunicorn --workers 4 --bind unix:app.sock -m 007 app:app + +[Install] +WantedBy=multi-user.target +``` + +## enable & start service file +`systemctl enable watermarks.service` + +`systemctl start watermarks.service` + +It is also a good idea to check the status of the service +`systemctl status watermarks.service` + +## Nginx config + +``` +location / { + include proxy_params; + # it will pass the requests to the socket + proxy_pass http://unix:/var/www/TacticalApp/app.sock; + # @andre: unsure whether we need a proxy_redirect as well + # if so, it might be somthing like + # http://unix:/var/www/TacticalApp/app.sock $scheme://$host:80/; + } +``` + +## debug. + +It might be helpful to enable gunicorn logging, while in development, to check what is going on, + by replacing in watermarks.service, the `ExecStart=` value with: + +`ExecStart=/var/www/TacticalApp/venv/bin/gunicorn --log-level debug --error-logfile /var/www/TacticalApp/app.log --workers 4 --bind unix:app.sock -m 007 app:app` + +And reloading the services +`systemctl daemon-reload` + +`systemctl restart watermarks.service` + +and the start following app.log: +`tail -f /var/www/TacticalApp/app.log` + +But in the long run logging should be disabled \ No newline at end of file diff --git a/__pycache__/config.cpython-37.pyc b/__pycache__/config.cpython-37.pyc deleted file mode 100644 index 392a627..0000000 Binary files a/__pycache__/config.cpython-37.pyc and /dev/null differ diff --git a/__pycache__/wsgi.cpython-37.pyc b/__pycache__/wsgi.cpython-37.pyc deleted file mode 100644 index 9b46ecc..0000000 Binary files a/__pycache__/wsgi.cpython-37.pyc and /dev/null differ diff --git a/app/__pycache__/__init__.cpython-37.pyc b/app/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 8b981d2..0000000 Binary files a/app/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/app/__pycache__/views.cpython-37.pyc b/app/__pycache__/views.cpython-37.pyc deleted file mode 100644 index fc740f1..0000000 Binary files a/app/__pycache__/views.cpython-37.pyc and /dev/null differ diff --git a/requirements.txt b/requirements.txt index 02e4857..f923120 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,21 +1,4 @@ -alembic -click -console-log -dataset -Flask -Flask-SQLAlchemy -gevent -gevent-websocket -greenlet -gunicorn -itsdangerous -Jinja2 -Mako -MarkupSafe -python-dateutil -python-editor -setuptools -six -SQLAlchemy -Werkzeug -wsgigzip +Flask==1.0.2 +gunicorn==20.0.4 +Jinja2==2.11.1 +dataset==1.3.1 diff --git a/venv/bin/activate b/venv/bin/activate deleted file mode 100644 index 3971a44..0000000 --- a/venv/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "$1" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/var/www/TacticalApp/venv" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ "x(venv) " != x ] ; then - PS1="(venv) ${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh deleted file mode 100644 index d45085f..0000000 --- a/venv/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/var/www/TacticalApp/venv" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if ("venv" != "") then - set env_name = "venv" - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish deleted file mode 100644 index c91a377..0000000 --- a/venv/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV "/var/www/TacticalApp/venv" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n "(venv) " - printf "%s%s" "(venv) " (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/venv/bin/alembic b/venv/bin/alembic deleted file mode 100755 index 67387f4..0000000 --- a/venv/bin/alembic +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from alembic.config import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/easy_install b/venv/bin/easy_install deleted file mode 100755 index 7671bcb..0000000 --- a/venv/bin/easy_install +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/easy_install-3.7 b/venv/bin/easy_install-3.7 deleted file mode 100755 index 7671bcb..0000000 --- a/venv/bin/easy_install-3.7 +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/flask b/venv/bin/flask deleted file mode 100755 index 0c6b1fb..0000000 --- a/venv/bin/flask +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from flask.cli import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/gunicorn b/venv/bin/gunicorn deleted file mode 100755 index 39136ce..0000000 --- a/venv/bin/gunicorn +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from gunicorn.app.wsgiapp import run - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run()) diff --git a/venv/bin/mako-render b/venv/bin/mako-render deleted file mode 100755 index 16006e8..0000000 --- a/venv/bin/mako-render +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from mako.cmd import cmdline - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(cmdline()) diff --git a/venv/bin/pip b/venv/bin/pip deleted file mode 100755 index 56a52d0..0000000 --- a/venv/bin/pip +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pip._internal import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip3 b/venv/bin/pip3 deleted file mode 100755 index 56a52d0..0000000 --- a/venv/bin/pip3 +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pip._internal import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip3.7 b/venv/bin/pip3.7 deleted file mode 100755 index 56a52d0..0000000 --- a/venv/bin/pip3.7 +++ /dev/null @@ -1,10 +0,0 @@ -#!/var/www/TacticalApp/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pip._internal import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/python b/venv/bin/python deleted file mode 120000 index b8a0adb..0000000 --- a/venv/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3 \ No newline at end of file diff --git a/venv/bin/python3 b/venv/bin/python3 deleted file mode 120000 index ae65fda..0000000 --- a/venv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3 \ No newline at end of file diff --git a/venv/include/site/python3.7/greenlet/greenlet.h b/venv/include/site/python3.7/greenlet/greenlet.h deleted file mode 100644 index 8fff3f5..0000000 --- a/venv/include/site/python3.7/greenlet/greenlet.h +++ /dev/null @@ -1,157 +0,0 @@ -/* vim:set noet ts=8 sw=8 : */ - -/* Greenlet object interface */ - -#ifndef Py_GREENLETOBJECT_H -#define Py_GREENLETOBJECT_H - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#define GREENLET_VERSION "0.4.15" - -#if PY_VERSION_HEX >= 0x030700A3 -# define GREENLET_USE_EXC_INFO -#endif - -typedef struct _greenlet { - PyObject_HEAD - char* stack_start; - char* stack_stop; - char* stack_copy; - intptr_t stack_saved; - struct _greenlet* stack_prev; - struct _greenlet* parent; - PyObject* run_info; - struct _frame* top_frame; - int recursion_depth; - PyObject* weakreflist; -#ifdef GREENLET_USE_EXC_INFO - _PyErr_StackItem* exc_info; - _PyErr_StackItem exc_state; -#else - PyObject* exc_type; - PyObject* exc_value; - PyObject* exc_traceback; -#endif - PyObject* dict; -} PyGreenlet; - -#define PyGreenlet_Check(op) PyObject_TypeCheck(op, &PyGreenlet_Type) -#define PyGreenlet_MAIN(op) (((PyGreenlet*)(op))->stack_stop == (char*) -1) -#define PyGreenlet_STARTED(op) (((PyGreenlet*)(op))->stack_stop != NULL) -#define PyGreenlet_ACTIVE(op) (((PyGreenlet*)(op))->stack_start != NULL) -#define PyGreenlet_GET_PARENT(op) (((PyGreenlet*)(op))->parent) - -#if (PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION >= 7) || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 1) || PY_MAJOR_VERSION > 3 -#define GREENLET_USE_PYCAPSULE -#endif - -/* C API functions */ - -/* Total number of symbols that are exported */ -#define PyGreenlet_API_pointers 8 - -#define PyGreenlet_Type_NUM 0 -#define PyExc_GreenletError_NUM 1 -#define PyExc_GreenletExit_NUM 2 - -#define PyGreenlet_New_NUM 3 -#define PyGreenlet_GetCurrent_NUM 4 -#define PyGreenlet_Throw_NUM 5 -#define PyGreenlet_Switch_NUM 6 -#define PyGreenlet_SetParent_NUM 7 - -#ifndef GREENLET_MODULE -/* This section is used by modules that uses the greenlet C API */ -static void **_PyGreenlet_API = NULL; - -#define PyGreenlet_Type (*(PyTypeObject *) _PyGreenlet_API[PyGreenlet_Type_NUM]) - -#define PyExc_GreenletError \ - ((PyObject *) _PyGreenlet_API[PyExc_GreenletError_NUM]) - -#define PyExc_GreenletExit \ - ((PyObject *) _PyGreenlet_API[PyExc_GreenletExit_NUM]) - -/* - * PyGreenlet_New(PyObject *args) - * - * greenlet.greenlet(run, parent=None) - */ -#define PyGreenlet_New \ - (* (PyGreenlet * (*)(PyObject *run, PyGreenlet *parent)) \ - _PyGreenlet_API[PyGreenlet_New_NUM]) - -/* - * PyGreenlet_GetCurrent(void) - * - * greenlet.getcurrent() - */ -#define PyGreenlet_GetCurrent \ - (* (PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM]) - -/* - * PyGreenlet_Throw( - * PyGreenlet *greenlet, - * PyObject *typ, - * PyObject *val, - * PyObject *tb) - * - * g.throw(...) - */ -#define PyGreenlet_Throw \ - (* (PyObject * (*) \ - (PyGreenlet *self, PyObject *typ, PyObject *val, PyObject *tb)) \ - _PyGreenlet_API[PyGreenlet_Throw_NUM]) - -/* - * PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args) - * - * g.switch(*args, **kwargs) - */ -#define PyGreenlet_Switch \ - (* (PyObject * (*)(PyGreenlet *greenlet, PyObject *args, PyObject *kwargs)) \ - _PyGreenlet_API[PyGreenlet_Switch_NUM]) - -/* - * PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent) - * - * g.parent = new_parent - */ -#define PyGreenlet_SetParent \ - (* (int (*)(PyGreenlet *greenlet, PyGreenlet *nparent)) \ - _PyGreenlet_API[PyGreenlet_SetParent_NUM]) - -/* Macro that imports greenlet and initializes C API */ -#ifdef GREENLET_USE_PYCAPSULE -#define PyGreenlet_Import() \ -{ \ - _PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \ -} -#else -#define PyGreenlet_Import() \ -{ \ - PyObject *module = PyImport_ImportModule("greenlet"); \ - if (module != NULL) { \ - PyObject *c_api_object = PyObject_GetAttrString( \ - module, "_C_API"); \ - if (c_api_object != NULL && PyCObject_Check(c_api_object)) { \ - _PyGreenlet_API = \ - (void **) PyCObject_AsVoidPtr(c_api_object); \ - Py_DECREF(c_api_object); \ - } \ - Py_DECREF(module); \ - } \ -} -#endif - -#endif /* GREENLET_MODULE */ - -#ifdef __cplusplus -} -#endif -#endif /* !Py_GREENLETOBJECT_H */ diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/LICENSE.rst b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/LICENSE.rst deleted file mode 100644 index 9d227a0..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/METADATA b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/METADATA deleted file mode 100644 index 08fcc91..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/METADATA +++ /dev/null @@ -1,134 +0,0 @@ -Metadata-Version: 2.1 -Name: Flask -Version: 1.1.1 -Summary: A simple framework for building complex web applications. -Home-page: https://palletsprojects.com/p/flask/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://flask.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/flask -Project-URL: Issue tracker, https://github.com/pallets/flask/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Framework :: Flask -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application -Classifier: Topic :: Software Development :: Libraries :: Application Frameworks -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* -Requires-Dist: Werkzeug (>=0.15) -Requires-Dist: Jinja2 (>=2.10.1) -Requires-Dist: itsdangerous (>=0.24) -Requires-Dist: click (>=5.1) -Provides-Extra: dev -Requires-Dist: pytest ; extra == 'dev' -Requires-Dist: coverage ; extra == 'dev' -Requires-Dist: tox ; extra == 'dev' -Requires-Dist: sphinx ; extra == 'dev' -Requires-Dist: pallets-sphinx-themes ; extra == 'dev' -Requires-Dist: sphinxcontrib-log-cabinet ; extra == 'dev' -Requires-Dist: sphinx-issues ; extra == 'dev' -Provides-Extra: docs -Requires-Dist: sphinx ; extra == 'docs' -Requires-Dist: pallets-sphinx-themes ; extra == 'docs' -Requires-Dist: sphinxcontrib-log-cabinet ; extra == 'docs' -Requires-Dist: sphinx-issues ; extra == 'docs' -Provides-Extra: dotenv -Requires-Dist: python-dotenv ; extra == 'dotenv' - -Flask -===== - -Flask is a lightweight `WSGI`_ web application framework. It is designed -to make getting started quick and easy, with the ability to scale up to -complex applications. It began as a simple wrapper around `Werkzeug`_ -and `Jinja`_ and has become one of the most popular Python web -application frameworks. - -Flask offers suggestions, but doesn't enforce any dependencies or -project layout. It is up to the developer to choose the tools and -libraries they want to use. There are many extensions provided by the -community that make adding new functionality easy. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U Flask - - -A Simple Example ----------------- - -.. code-block:: python - - from flask import Flask - - app = Flask(__name__) - - @app.route("/") - def hello(): - return "Hello, World!" - -.. code-block:: text - - $ env FLASK_APP=hello.py flask run - * Serving Flask app "hello" - * Running on http://127.0.0.1:5000/ (Press CTRL+C to quit) - - -Contributing ------------- - -For guidance on setting up a development environment and how to make a -contribution to Flask, see the `contributing guidelines`_. - -.. _contributing guidelines: https://github.com/pallets/flask/blob/master/CONTRIBUTING.rst - - -Donate ------- - -The Pallets organization develops and supports Flask and the libraries -it uses. In order to grow the community of contributors and users, and -allow the maintainers to devote more time to the projects, `please -donate today`_. - -.. _please donate today: https://psfmember.org/civicrm/contribute/transact?reset=1&id=20 - - -Links ------ - -* Website: https://palletsprojects.com/p/flask/ -* Documentation: https://flask.palletsprojects.com/ -* Releases: https://pypi.org/project/Flask/ -* Code: https://github.com/pallets/flask -* Issue tracker: https://github.com/pallets/flask/issues -* Test status: https://dev.azure.com/pallets/flask/_build -* Official chat: https://discord.gg/t6rrQZH - -.. _WSGI: https://wsgi.readthedocs.io -.. _Werkzeug: https://www.palletsprojects.com/p/werkzeug/ -.. _Jinja: https://www.palletsprojects.com/p/jinja/ -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/RECORD b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/RECORD deleted file mode 100644 index bc0c526..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/RECORD +++ /dev/null @@ -1,48 +0,0 @@ -../../../bin/flask,sha256=s-6d4F3VC8_p4RMKgO4CDN_Wbz-3ik6F2Qv69FO-mCU,232 -Flask-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Flask-1.1.1.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 -Flask-1.1.1.dist-info/METADATA,sha256=Ht4R6TpTKOaXOmmQHhEF3A0Obpzde2Ai0kzNdu6-VWQ,4400 -Flask-1.1.1.dist-info/RECORD,, -Flask-1.1.1.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110 -Flask-1.1.1.dist-info/entry_points.txt,sha256=gBLA1aKg0OYR8AhbAfg8lnburHtKcgJLDU52BBctN0k,42 -Flask-1.1.1.dist-info/top_level.txt,sha256=dvi65F6AeGWVU0TBpYiC04yM60-FX1gJFkK31IKQr5c,6 -flask/__init__.py,sha256=qaBW4gy9Xxmdc3ygYO0_H214H1VpF7fq8xRR4XbqRjE,1894 -flask/__main__.py,sha256=fjVtt3QTANXlpJCOv3Ha7d5H-76MwzSIOab7SFD9TEk,254 -flask/__pycache__/__init__.cpython-37.pyc,, -flask/__pycache__/__main__.cpython-37.pyc,, -flask/__pycache__/_compat.cpython-37.pyc,, -flask/__pycache__/app.cpython-37.pyc,, -flask/__pycache__/blueprints.cpython-37.pyc,, -flask/__pycache__/cli.cpython-37.pyc,, -flask/__pycache__/config.cpython-37.pyc,, -flask/__pycache__/ctx.cpython-37.pyc,, -flask/__pycache__/debughelpers.cpython-37.pyc,, -flask/__pycache__/globals.cpython-37.pyc,, -flask/__pycache__/helpers.cpython-37.pyc,, -flask/__pycache__/logging.cpython-37.pyc,, -flask/__pycache__/sessions.cpython-37.pyc,, -flask/__pycache__/signals.cpython-37.pyc,, -flask/__pycache__/templating.cpython-37.pyc,, -flask/__pycache__/testing.cpython-37.pyc,, -flask/__pycache__/views.cpython-37.pyc,, -flask/__pycache__/wrappers.cpython-37.pyc,, -flask/_compat.py,sha256=8KPT54Iig96TuLipdogLRHNYToIcg-xPhnSV5VRERnw,4099 -flask/app.py,sha256=gLZInxueeQ9dkBo1wrntZ-bZqiDT4rYxy_AQ1xraFDc,98066 -flask/blueprints.py,sha256=vkdm8NusGsfZUeIfPdCluj733QFmiQcT4Sk1tuZLUjw,21400 -flask/cli.py,sha256=_WhPG1bggNdrP0QO95Vex6VJpDqTsVK0z54Y5poljKU,30933 -flask/config.py,sha256=3dejvQRYfNHw_V7dCLMxU8UNFpL34xIKemN7gHZIZ8Y,10052 -flask/ctx.py,sha256=cks-omGedkxawHFo6bKIrdOHsJCAgg1i_NWw_htxb5U,16724 -flask/debughelpers.py,sha256=-whvPKuAoU8AZ9c1z_INuOeBgfYDqE1J2xNBsoriugU,6475 -flask/globals.py,sha256=OgcHb6_NCyX6-TldciOdKcyj4PNfyQwClxdMhvov6aA,1637 -flask/helpers.py,sha256=x2Pa85R5dV6uA5f5423JTb6x4u6ZaMGf8sfosUZ76dQ,43004 -flask/json/__init__.py,sha256=6nITbZYiYOPB8Qfi1-dvsblwn01KRz8VOsMBIZyaYek,11988 -flask/json/__pycache__/__init__.cpython-37.pyc,, -flask/json/__pycache__/tag.cpython-37.pyc,, -flask/json/tag.py,sha256=vq9GOllg_0kTWKuVFrwmkeOQzR-jdBD23x-89JyCCQI,8306 -flask/logging.py,sha256=WcY5UkqTysGfmosyygSlXyZYGwOp3y-VsE6ehoJ48dk,3250 -flask/sessions.py,sha256=G0KsEkr_i1LG_wOINwFSOW3ts7Xbv4bNgEZKc7TRloc,14360 -flask/signals.py,sha256=yYLOed2x8WnQ7pirGalQYfpYpCILJ0LJhmNSrnWvjqw,2212 -flask/templating.py,sha256=F8E_IZXn9BGsjMzUJ5N_ACMyZdiFBp_SSEaUunvfZ7g,4939 -flask/testing.py,sha256=b0QaEejx0UcXqfSFP43k5W57bTVeDyrNK3uPD8JUpCk,10146 -flask/views.py,sha256=eeWnadLAj0QdQPLtjKipDetRZyG62CT2y7fNOFDJz0g,5802 -flask/wrappers.py,sha256=kgsvtZuMM6RQaDqhRbc5Pcj9vqTnaERl2pmXcdGL7LU,4736 diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/WHEEL b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/WHEEL deleted file mode 100644 index 78e6f69..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.4) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/entry_points.txt b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/entry_points.txt deleted file mode 100644 index 1eb0252..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -flask = flask.cli:main - diff --git a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/top_level.txt deleted file mode 100644 index 7e10602..0000000 --- a/venv/lib/python3.7/site-packages/Flask-1.1.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask diff --git a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/LICENSE.rst b/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/LICENSE.rst deleted file mode 100644 index 9d227a0..0000000 --- a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/METADATA b/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/METADATA deleted file mode 100644 index 4686cd3..0000000 --- a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/METADATA +++ /dev/null @@ -1,94 +0,0 @@ -Metadata-Version: 2.1 -Name: Flask-SQLAlchemy -Version: 2.4.1 -Summary: Adds SQLAlchemy support to your Flask application. -Home-page: https://github.com/pallets/flask-sqlalchemy -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://flask-sqlalchemy.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/flask-sqlalchemy -Project-URL: Issue tracker, https://github.com/pallets/flask-sqlalchemy/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >= 2.7, != 3.0.*, != 3.1.*, != 3.2.*, != 3.3.* -Requires-Dist: Flask (>=0.10) -Requires-Dist: SQLAlchemy (>=0.8.0) - -Flask-SQLAlchemy -================ - -Flask-SQLAlchemy is an extension for `Flask`_ that adds support for -`SQLAlchemy`_ to your application. It aims to simplify using SQLAlchemy -with Flask by providing useful defaults and extra helpers that make it -easier to accomplish common tasks. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - $ pip install -U Flask-SQLAlchemy - - -A Simple Example ----------------- - -.. code-block:: python - - from flask import Flask - from flask_sqlalchemy import SQLAlchemy - - app = Flask(__name__) - app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///example.sqlite" - db = SQLAlchemy(app) - - - class User(db.Model): - id = db.Column(db.Integer, primary_key=True) - username = db.Column(db.String, unique=True, nullable=False) - email = db.Column(db.String, unique=True, nullable=False) - - - db.session.add(User(name="Flask", email="example@example.com")) - db.session.commit() - - users = User.query.all() - - -Links ------ - -- Documentation: https://flask-sqlalchemy.palletsprojects.com/ -- Releases: https://pypi.org/project/Flask-SQLAlchemy/ -- Code: https://github.com/pallets/flask-sqlalchemy -- Issue tracker: https://github.com/pallets/flask-sqlalchemy/issues -- Test status: https://travis-ci.org/pallets/flask-sqlalchemy -- Test coverage: https://codecov.io/gh/pallets/flask-sqlalchemy - -.. _Flask: https://palletsprojects.com/p/flask/ -.. _SQLAlchemy: https://www.sqlalchemy.org -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - diff --git a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/RECORD b/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/RECORD deleted file mode 100644 index 9065e34..0000000 --- a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/RECORD +++ /dev/null @@ -1,14 +0,0 @@ -Flask_SQLAlchemy-2.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Flask_SQLAlchemy-2.4.1.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 -Flask_SQLAlchemy-2.4.1.dist-info/METADATA,sha256=SO2Yy86hBglL9QIQxNdZqKPPBaS-3LrvuYbMG6wHuKI,3128 -Flask_SQLAlchemy-2.4.1.dist-info/RECORD,, -Flask_SQLAlchemy-2.4.1.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 -Flask_SQLAlchemy-2.4.1.dist-info/top_level.txt,sha256=w2K4fNNoTh4HItoFfz2FRQShSeLcvHYrzU_sZov21QU,17 -flask_sqlalchemy/__init__.py,sha256=qaMQKMcAVz3et6XhUqOyjzpn8V7NUghH5hHSZvyEJUw,39027 -flask_sqlalchemy/__pycache__/__init__.cpython-37.pyc,, -flask_sqlalchemy/__pycache__/_compat.cpython-37.pyc,, -flask_sqlalchemy/__pycache__/model.cpython-37.pyc,, -flask_sqlalchemy/__pycache__/utils.cpython-37.pyc,, -flask_sqlalchemy/_compat.py,sha256=yua0ZSgVWwi56QpEgwaPInzkNQ9PFb7YQdvEk3dImXo,821 -flask_sqlalchemy/model.py,sha256=9jBoPU1k0c4nqz2-KyYnfoE55n-1G8Zxfo2Z-ZHV0v4,4992 -flask_sqlalchemy/utils.py,sha256=4eHqAbYElnJ3NbSAHhuINckoAHDABoxjleMJD0iKgyg,1390 diff --git a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/WHEEL b/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/WHEEL deleted file mode 100644 index 8b701e9..0000000 --- a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/top_level.txt deleted file mode 100644 index 8a5538e..0000000 --- a/venv/lib/python3.7/site-packages/Flask_SQLAlchemy-2.4.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -flask_sqlalchemy diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/LICENSE.rst b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/LICENSE.rst deleted file mode 100644 index c37cae4..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2007 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/METADATA b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/METADATA deleted file mode 100644 index 3d5bbb4..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/METADATA +++ /dev/null @@ -1,106 +0,0 @@ -Metadata-Version: 2.1 -Name: Jinja2 -Version: 2.11.1 -Summary: A very fast and expressive template engine. -Home-page: https://palletsprojects.com/p/jinja/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://jinja.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/jinja -Project-URL: Issue tracker, https://github.com/pallets/jinja/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* -Description-Content-Type: text/x-rst -Requires-Dist: MarkupSafe (>=0.23) -Provides-Extra: i18n -Requires-Dist: Babel (>=0.8) ; extra == 'i18n' - -Jinja -===== - -Jinja is a fast, expressive, extensible templating engine. Special -placeholders in the template allow writing code similar to Python -syntax. Then the template is passed data to render the final document. - -It includes: - -- Template inheritance and inclusion. -- Define and import macros within templates. -- HTML templates can use autoescaping to prevent XSS from untrusted - user input. -- A sandboxed environment can safely render untrusted templates. -- AsyncIO support for generating templates and calling async - functions. -- I18N support with Babel. -- Templates are compiled to optimized Python code just-in-time and - cached, or can be compiled ahead-of-time. -- Exceptions point to the correct line in templates to make debugging - easier. -- Extensible filters, tests, functions, and even syntax. - -Jinja's philosophy is that while application logic belongs in Python if -possible, it shouldn't make the template designer's job difficult by -restricting functionality too much. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - $ pip install -U Jinja2 - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -In A Nutshell -------------- - -.. code-block:: jinja - - {% extends "base.html" %} - {% block title %}Members{% endblock %} - {% block content %} - - {% endblock %} - - -Links ------ - -- Website: https://palletsprojects.com/p/jinja/ -- Documentation: https://jinja.palletsprojects.com/ -- Releases: https://pypi.org/project/Jinja2/ -- Code: https://github.com/pallets/jinja -- Issue tracker: https://github.com/pallets/jinja/issues -- Test status: https://dev.azure.com/pallets/jinja/_build -- Official chat: https://discord.gg/t6rrQZH - - diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/RECORD b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/RECORD deleted file mode 100644 index 5a1dddb..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/RECORD +++ /dev/null @@ -1,61 +0,0 @@ -Jinja2-2.11.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Jinja2-2.11.1.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 -Jinja2-2.11.1.dist-info/METADATA,sha256=7e9_tz7RirTbxIeiHTSq3e5g6ddCjoym3o5vdlRLuxU,3535 -Jinja2-2.11.1.dist-info/RECORD,, -Jinja2-2.11.1.dist-info/WHEEL,sha256=hq9T7ntHzQqUTLUmJ2UVhPL-W4tJi3Yb2Lh5lMfs2mk,110 -Jinja2-2.11.1.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61 -Jinja2-2.11.1.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 -jinja2/__init__.py,sha256=Nq1rzGErXYjIQnqc1pDCJht5LmInBRIZkeL2qkrYEyI,1549 -jinja2/__pycache__/__init__.cpython-37.pyc,, -jinja2/__pycache__/_compat.cpython-37.pyc,, -jinja2/__pycache__/_identifier.cpython-37.pyc,, -jinja2/__pycache__/asyncfilters.cpython-37.pyc,, -jinja2/__pycache__/asyncsupport.cpython-37.pyc,, -jinja2/__pycache__/bccache.cpython-37.pyc,, -jinja2/__pycache__/compiler.cpython-37.pyc,, -jinja2/__pycache__/constants.cpython-37.pyc,, -jinja2/__pycache__/debug.cpython-37.pyc,, -jinja2/__pycache__/defaults.cpython-37.pyc,, -jinja2/__pycache__/environment.cpython-37.pyc,, -jinja2/__pycache__/exceptions.cpython-37.pyc,, -jinja2/__pycache__/ext.cpython-37.pyc,, -jinja2/__pycache__/filters.cpython-37.pyc,, -jinja2/__pycache__/idtracking.cpython-37.pyc,, -jinja2/__pycache__/lexer.cpython-37.pyc,, -jinja2/__pycache__/loaders.cpython-37.pyc,, -jinja2/__pycache__/meta.cpython-37.pyc,, -jinja2/__pycache__/nativetypes.cpython-37.pyc,, -jinja2/__pycache__/nodes.cpython-37.pyc,, -jinja2/__pycache__/optimizer.cpython-37.pyc,, -jinja2/__pycache__/parser.cpython-37.pyc,, -jinja2/__pycache__/runtime.cpython-37.pyc,, -jinja2/__pycache__/sandbox.cpython-37.pyc,, -jinja2/__pycache__/tests.cpython-37.pyc,, -jinja2/__pycache__/utils.cpython-37.pyc,, -jinja2/__pycache__/visitor.cpython-37.pyc,, -jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191 -jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775 -jinja2/asyncfilters.py,sha256=8uwjG1zgHTv3K4nEvsj4HXWkK4NsOlfx7-CcnCULDWw,4185 -jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209 -jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139 -jinja2/compiler.py,sha256=xCNpF7-xAduODbGKSVEyzU7XZGeLWHZr1cwcZTQob30,66236 -jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458 -jinja2/debug.py,sha256=UmsW6OxNmbIGvIkwytOyM1NsZB6xJvl_nSz3VgNETUk,8597 -jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126 -jinja2/environment.py,sha256=XqCM_GmncAXPm--CxpRPVF6uV_sPKb0Q0jVa7Znry04,50605 -jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425 -jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441 -jinja2/filters.py,sha256=4xEq1qfJ7burpHW5GyL6bkGomp0W47jOXg-HG5aLP-Y,41401 -jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211 -jinja2/lexer.py,sha256=VeGdW_t82Le4H-jLy-hX6UeosLf7ApUq2kuUos8YF4Y,29942 -jinja2/loaders.py,sha256=UUy5ud3lNtGtnn8iorlF9o1FJ6UqZZKMxd0VGnnqMHI,20350 -jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131 -jinja2/nativetypes.py,sha256=Arb2_3IuM386vWZbGPY7DmxryrXg3WzXAEnaHJNdWa0,3576 -jinja2/nodes.py,sha256=YwErhE9plVWeoxTQPtMwl10wovsyBRY4x9eAVgtP6zg,31071 -jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457 -jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660 -jinja2/runtime.py,sha256=94chnK20a1m1t5AaLWeuiTq6L3g3GLs6AxVPfbNXIHE,30582 -jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127 -jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799 -jinja2/utils.py,sha256=26B9HI2lVWaHY8iOnQTJzAcCL4PYOLiA3V79dm3oOSE,22456 -jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240 diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/WHEEL b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/WHEEL deleted file mode 100644 index 03bcde7..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.34.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/entry_points.txt b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/entry_points.txt deleted file mode 100644 index 3619483..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[babel.extractors] -jinja2 = jinja2.ext:babel_extract [i18n] - diff --git a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/top_level.txt deleted file mode 100644 index 7f7afbf..0000000 --- a/venv/lib/python3.7/site-packages/Jinja2-2.11.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jinja2 diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/AUTHORS b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/AUTHORS deleted file mode 100644 index 81d16dc..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/AUTHORS +++ /dev/null @@ -1,13 +0,0 @@ -Mako was created by Michael Bayer. - -Major contributing authors include: - -- Michael Bayer -- Geoffrey T. Dairiki -- Philip Jenvey -- David Peckam -- Armin Ronacher -- Ben Bangert -- Ben Trofatter - - diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/LICENSE b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/LICENSE deleted file mode 100644 index 1f835e9..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2006-2020 the Mako authors and contributors . - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/METADATA b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/METADATA deleted file mode 100644 index fb64a7a..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/METADATA +++ /dev/null @@ -1,82 +0,0 @@ -Metadata-Version: 2.1 -Name: Mako -Version: 1.1.2 -Summary: A super-fast templating language that borrows the best ideas from the existing templating languages. -Home-page: https://www.makotemplates.org/ -Author: Mike Bayer -Author-email: mike@zzzcomputing.com -License: MIT -Project-URL: Documentation, https://docs.makotemplates.org -Project-URL: Issue Tracker, https://github.com/sqlalchemy/mako -Keywords: templates -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: License :: OSI Approved :: MIT License -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Requires-Dist: MarkupSafe (>=0.9.2) -Provides-Extra: babel -Requires-Dist: Babel ; extra == 'babel' -Provides-Extra: lingua -Requires-Dist: lingua ; extra == 'lingua' - -========================= -Mako Templates for Python -========================= - -Mako is a template library written in Python. It provides a familiar, non-XML -syntax which compiles into Python modules for maximum performance. Mako's -syntax and API borrows from the best ideas of many others, including Django -templates, Cheetah, Myghty, and Genshi. Conceptually, Mako is an embedded -Python (i.e. Python Server Page) language, which refines the familiar ideas -of componentized layout and inheritance to produce one of the most -straightforward and flexible models available, while also maintaining close -ties to Python calling and scoping semantics. - -Nutshell -======== - -:: - - <%inherit file="base.html"/> - <% - rows = [[v for v in range(0,10)] for row in range(0,10)] - %> - - % for row in rows: - ${makerow(row)} - % endfor -
- - <%def name="makerow(row)"> - - % for name in row: - ${name}\ - % endfor - - - -Philosophy -=========== - -Python is a great scripting language. Don't reinvent the wheel...your templates can handle it ! - -Documentation -============== - -See documentation for Mako at https://docs.makotemplates.org/en/latest/ - -License -======== - -Mako is licensed under an MIT-style license (see LICENSE). -Other incorporated projects may be licensed under different licenses. -All licenses allow for non-commercial and commercial use. - - diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/RECORD b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/RECORD deleted file mode 100644 index 36cc2c4..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/RECORD +++ /dev/null @@ -1,61 +0,0 @@ -../../../bin/mako-render,sha256=hcT69q_NmFY5G8c2FRh1t7TfhjyYqH2B1-m-aMxY5Y4,237 -Mako-1.1.2.dist-info/AUTHORS,sha256=Io2Vw70mjYS7yFcUuJxhIGiMUQt8FWJuxiiwyUW1WRg,282 -Mako-1.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Mako-1.1.2.dist-info/LICENSE,sha256=R80NQbEJL5Fhz7Yp7RXlzqGFFEcQ_0YzpCge8Ij_Xec,1097 -Mako-1.1.2.dist-info/METADATA,sha256=fxw2oNdTkNQnafc1Enid-QapQv1OaYnqwtNDJoeihoo,2600 -Mako-1.1.2.dist-info/RECORD,, -Mako-1.1.2.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 -Mako-1.1.2.dist-info/entry_points.txt,sha256=GSuruj6eMrGwr7dHBGOdDkmgTTUQXr5ZrQjMmkPclKA,603 -Mako-1.1.2.dist-info/top_level.txt,sha256=LItdH8cDPetpUu8rUyBG3DObS6h9Gcpr9j_WLj2S-R0,5 -mako/__init__.py,sha256=kukH4UZzZx0XHsTBrfQMJ2fxcncjreyZv2m6PzvPWAM,242 -mako/__pycache__/__init__.cpython-37.pyc,, -mako/__pycache__/_ast_util.cpython-37.pyc,, -mako/__pycache__/ast.cpython-37.pyc,, -mako/__pycache__/cache.cpython-37.pyc,, -mako/__pycache__/cmd.cpython-37.pyc,, -mako/__pycache__/codegen.cpython-37.pyc,, -mako/__pycache__/compat.cpython-37.pyc,, -mako/__pycache__/exceptions.cpython-37.pyc,, -mako/__pycache__/filters.cpython-37.pyc,, -mako/__pycache__/lexer.cpython-37.pyc,, -mako/__pycache__/lookup.cpython-37.pyc,, -mako/__pycache__/parsetree.cpython-37.pyc,, -mako/__pycache__/pygen.cpython-37.pyc,, -mako/__pycache__/pyparser.cpython-37.pyc,, -mako/__pycache__/runtime.cpython-37.pyc,, -mako/__pycache__/template.cpython-37.pyc,, -mako/__pycache__/util.cpython-37.pyc,, -mako/_ast_util.py,sha256=QKXZC0DbpYefKhTrQZjLgjcNXlTgY38sbB-vmBR2HpU,20414 -mako/ast.py,sha256=T5KnOwZewqAfULULLLWp6joGD-j14SiCtrH1-KGJCpQ,6789 -mako/cache.py,sha256=N1VoKHul8K7RUwsGwoUL-HMtylDvrL6iGWNh7_AI1dc,7736 -mako/cmd.py,sha256=HZxSUsAFVHVrcWvb43Nh_vdbrGeJLFNTR6ejyhdZ0dc,2859 -mako/codegen.py,sha256=DoxSM34-305v0E4Ox7Y31nsVtKAmCEbRVC3BmNFy_54,47892 -mako/compat.py,sha256=08w8lB0Z3QKQi9vd4n4xUtjG_A3wOrk3QdvxkHlribY,3848 -mako/exceptions.py,sha256=ogXjpZO1beh37cWWa0pm4IHVNKsuNIUnqOjWznEKMLQ,13110 -mako/ext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -mako/ext/__pycache__/__init__.cpython-37.pyc,, -mako/ext/__pycache__/autohandler.cpython-37.pyc,, -mako/ext/__pycache__/babelplugin.cpython-37.pyc,, -mako/ext/__pycache__/beaker_cache.cpython-37.pyc,, -mako/ext/__pycache__/extract.cpython-37.pyc,, -mako/ext/__pycache__/linguaplugin.cpython-37.pyc,, -mako/ext/__pycache__/preprocessors.cpython-37.pyc,, -mako/ext/__pycache__/pygmentplugin.cpython-37.pyc,, -mako/ext/__pycache__/turbogears.cpython-37.pyc,, -mako/ext/autohandler.py,sha256=FJs1cY6Vz_NePboCUr-3STZY38btxFRZsPhMNe6NSms,1885 -mako/ext/babelplugin.py,sha256=EquybfGr6ffla72QapzkwTNpEwi_P87f1s9C7xNFuJw,2138 -mako/ext/beaker_cache.py,sha256=oDN-vSLeKfnAJKlPgrKKuHI-g7zszwd2y1uApBoOkeM,2599 -mako/ext/extract.py,sha256=oBx6lQqLOtDMu8YpBYK_klCZvMuVvbAAA3I-WUyTPXo,4616 -mako/ext/linguaplugin.py,sha256=Z8bV4RHjDJhqMApINSadycM1Xj-B2vB1_i3YN3l2KSc,1954 -mako/ext/preprocessors.py,sha256=TfHmG6EgzYumbCiFU06IHXG_n5y2sA6RFtDBNJ613M8,576 -mako/ext/pygmentplugin.py,sha256=wYJixnCqHJ7zHPT6gB3tGUg-R6yctFNpEhNIKbHHl-E,4951 -mako/ext/turbogears.py,sha256=BcKxkPpkeawkFqj6zS5sUQYt4I6LafRDYMLIDOg0ZPY,2165 -mako/filters.py,sha256=vzpdxOOXWco5_evH_6_9a8b92lHuDC7Sl3XZhFyIVV8,6063 -mako/lexer.py,sha256=pNKb5MVSzOdW0L2S97TYPFBATmHD_mo8Br9-5RSfIUM,16926 -mako/lookup.py,sha256=TQ-wx1DR8rj2HqsNJBsrS4ZqROwAeTRkw-LrTbSQxFc,12718 -mako/parsetree.py,sha256=epGi5wKtZA8LcpzdrEXl_jjPGPvuO-IjuDSAYoLAp4Y,19411 -mako/pygen.py,sha256=dKxVMCSPMaXbMTgQyd5_J7WvdzPpuUprufR4PS3cyqY,10073 -mako/pyparser.py,sha256=eU3-mgdrmj1cL9SgFxh1rvIFcio_6oJxoNJnyMuGiCI,7789 -mako/runtime.py,sha256=2fhZBgmnP3wrWlZAVd6PZCSeuuGVXVA8BmRdXs6VEDo,28040 -mako/template.py,sha256=hKYaXvRzqU7Map8wXaGTGXc8gPl8EDF4WqoNpIF-EqQ,26558 -mako/util.py,sha256=5DoK9dvPpzFK6ZnL3hhzMHQ0meanhXrH8aHoO8fbkCs,11038 diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/WHEEL b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/WHEEL deleted file mode 100644 index 8b701e9..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/entry_points.txt b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/entry_points.txt deleted file mode 100644 index 8e033c0..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/entry_points.txt +++ /dev/null @@ -1,20 +0,0 @@ - - [python.templating.engines] - mako = mako.ext.turbogears:TGPlugin - - [pygments.lexers] - mako = mako.ext.pygmentplugin:MakoLexer - html+mako = mako.ext.pygmentplugin:MakoHtmlLexer - xml+mako = mako.ext.pygmentplugin:MakoXmlLexer - js+mako = mako.ext.pygmentplugin:MakoJavascriptLexer - css+mako = mako.ext.pygmentplugin:MakoCssLexer - - [babel.extractors] - mako = mako.ext.babelplugin:extract [babel] - - [lingua.extractors] - mako = mako.ext.linguaplugin:LinguaMakoExtractor [lingua] - - [console_scripts] - mako-render = mako.cmd:cmdline - \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/top_level.txt deleted file mode 100644 index 2951cdd..0000000 --- a/venv/lib/python3.7/site-packages/Mako-1.1.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -mako diff --git a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.rst b/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.rst deleted file mode 100644 index 9d227a0..0000000 --- a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/METADATA b/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/METADATA deleted file mode 100644 index b208d93..0000000 --- a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/METADATA +++ /dev/null @@ -1,103 +0,0 @@ -Metadata-Version: 2.1 -Name: MarkupSafe -Version: 1.1.1 -Summary: Safely add untrusted strings to HTML/XML markup. -Home-page: https://palletsprojects.com/p/markupsafe/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: The Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://markupsafe.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/markupsafe -Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* - -MarkupSafe -========== - -MarkupSafe implements a text object that escapes characters so it is -safe to use in HTML and XML. Characters that have special meanings are -replaced so that they display as the actual characters. This mitigates -injection attacks, meaning untrusted user input can safely be displayed -on a page. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U MarkupSafe - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -Examples --------- - -.. code-block:: pycon - - >>> from markupsafe import Markup, escape - >>> # escape replaces special characters and wraps in Markup - >>> escape('') - Markup(u'<script>alert(document.cookie);</script>') - >>> # wrap in Markup to mark text "safe" and prevent escaping - >>> Markup('Hello') - Markup('hello') - >>> escape(Markup('Hello')) - Markup('hello') - >>> # Markup is a text subclass (str on Python 3, unicode on Python 2) - >>> # methods and operators escape their arguments - >>> template = Markup("Hello %s") - >>> template % '"World"' - Markup('Hello "World"') - - -Donate ------- - -The Pallets organization develops and supports MarkupSafe and other -libraries that use it. In order to grow the community of contributors -and users, and allow the maintainers to devote more time to the -projects, `please donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -* Website: https://palletsprojects.com/p/markupsafe/ -* Documentation: https://markupsafe.palletsprojects.com/ -* License: `BSD-3-Clause `_ -* Releases: https://pypi.org/project/MarkupSafe/ -* Code: https://github.com/pallets/markupsafe -* Issue tracker: https://github.com/pallets/markupsafe/issues -* Test status: - - * Linux, Mac: https://travis-ci.org/pallets/markupsafe - * Windows: https://ci.appveyor.com/project/pallets/markupsafe - -* Test coverage: https://codecov.io/gh/pallets/markupsafe - - diff --git a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/RECORD b/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/RECORD deleted file mode 100644 index 0e7516b..0000000 --- a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/RECORD +++ /dev/null @@ -1,16 +0,0 @@ -MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -MarkupSafe-1.1.1.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 -MarkupSafe-1.1.1.dist-info/METADATA,sha256=nJHwJ4_4ka-V39QH883jPrslj6inNdyyNASBXbYgHXQ,3570 -MarkupSafe-1.1.1.dist-info/RECORD,, -MarkupSafe-1.1.1.dist-info/WHEEL,sha256=GMu0CcHnECe7JSPnzBUPyOsrcZoHb7dOBGXgpe8vHSQ,104 -MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 -markupsafe/__init__.py,sha256=oTblO5f9KFM-pvnq9bB0HgElnqkJyqHnFN1Nx2NIvnY,10126 -markupsafe/__pycache__/__init__.cpython-37.pyc,, -markupsafe/__pycache__/_compat.cpython-37.pyc,, -markupsafe/__pycache__/_constants.cpython-37.pyc,, -markupsafe/__pycache__/_native.cpython-37.pyc,, -markupsafe/_compat.py,sha256=uEW1ybxEjfxIiuTbRRaJpHsPFf4yQUMMKaPgYEC5XbU,558 -markupsafe/_constants.py,sha256=zo2ajfScG-l1Sb_52EP3MlDCqO7Y1BVHUXXKRsVDRNk,4690 -markupsafe/_native.py,sha256=d-8S_zzYt2y512xYcuSxq0NeG2DUUvG80wVdTn-4KI8,1873 -markupsafe/_speedups.c,sha256=k0fzEIK3CP6MmMqeY0ob43TP90mVN0DTyn7BAl3RqSg,9884 -markupsafe/_speedups.cpython-37m-arm-linux-gnueabihf.so,sha256=UdHbkGis1TxK619S8APiKVHwcmcMKsR0rEA9WFeXTUs,46612 diff --git a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL b/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL deleted file mode 100644 index a194ea7..0000000 --- a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.3) -Root-Is-Purelib: false -Tag: cp37-cp37m-linux_armv7l - diff --git a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt deleted file mode 100644 index 75bf729..0000000 --- a/venv/lib/python3.7/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -markupsafe diff --git a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/LICENSE b/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/LICENSE deleted file mode 100644 index d3be6f0..0000000 --- a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2005-2020 SQLAlchemy authors and contributors . - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/METADATA b/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/METADATA deleted file mode 100644 index cef62de..0000000 --- a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/METADATA +++ /dev/null @@ -1,195 +0,0 @@ -Metadata-Version: 2.1 -Name: SQLAlchemy -Version: 1.3.17 -Summary: Database Abstraction Library -Home-page: http://www.sqlalchemy.org -Author: Mike Bayer -Author-email: mike_mp@zzzcomputing.com -License: MIT -Project-URL: Documentation, https://docs.sqlalchemy.org -Project-URL: Issue Tracker, https://github.com/sqlalchemy/sqlalchemy/ -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3.9 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Database :: Front-Ends -Classifier: Operating System :: OS Independent -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Provides-Extra: mssql -Requires-Dist: pyodbc ; extra == 'mssql' -Provides-Extra: mssql_pymssql -Requires-Dist: pymssql ; extra == 'mssql_pymssql' -Provides-Extra: mssql_pyodbc -Requires-Dist: pyodbc ; extra == 'mssql_pyodbc' -Provides-Extra: mysql -Requires-Dist: mysqlclient ; extra == 'mysql' -Provides-Extra: oracle -Requires-Dist: cx-oracle ; extra == 'oracle' -Provides-Extra: postgresql -Requires-Dist: psycopg2 ; extra == 'postgresql' -Provides-Extra: postgresql_pg8000 -Requires-Dist: pg8000 ; extra == 'postgresql_pg8000' -Provides-Extra: postgresql_psycopg2binary -Requires-Dist: psycopg2-binary ; extra == 'postgresql_psycopg2binary' -Provides-Extra: postgresql_psycopg2cffi -Requires-Dist: psycopg2cffi ; extra == 'postgresql_psycopg2cffi' -Provides-Extra: pymysql -Requires-Dist: pymysql ; extra == 'pymysql' - -SQLAlchemy -========== - -The Python SQL Toolkit and Object Relational Mapper - -Introduction -------------- - -SQLAlchemy is the Python SQL toolkit and Object Relational Mapper -that gives application developers the full power and -flexibility of SQL. SQLAlchemy provides a full suite -of well known enterprise-level persistence patterns, -designed for efficient and high-performing database -access, adapted into a simple and Pythonic domain -language. - -Major SQLAlchemy features include: - -* An industrial strength ORM, built - from the core on the identity map, unit of work, - and data mapper patterns. These patterns - allow transparent persistence of objects - using a declarative configuration system. - Domain models - can be constructed and manipulated naturally, - and changes are synchronized with the - current transaction automatically. -* A relationally-oriented query system, exposing - the full range of SQL's capabilities - explicitly, including joins, subqueries, - correlation, and most everything else, - in terms of the object model. - Writing queries with the ORM uses the same - techniques of relational composition you use - when writing SQL. While you can drop into - literal SQL at any time, it's virtually never - needed. -* A comprehensive and flexible system - of eager loading for related collections and objects. - Collections are cached within a session, - and can be loaded on individual access, all - at once using joins, or by query per collection - across the full result set. -* A Core SQL construction system and DBAPI - interaction layer. The SQLAlchemy Core is - separate from the ORM and is a full database - abstraction layer in its own right, and includes - an extensible Python-based SQL expression - language, schema metadata, connection pooling, - type coercion, and custom types. -* All primary and foreign key constraints are - assumed to be composite and natural. Surrogate - integer primary keys are of course still the - norm, but SQLAlchemy never assumes or hardcodes - to this model. -* Database introspection and generation. Database - schemas can be "reflected" in one step into - Python structures representing database metadata; - those same structures can then generate - CREATE statements right back out - all within - the Core, independent of the ORM. - -SQLAlchemy's philosophy: - -* SQL databases behave less and less like object - collections the more size and performance start to - matter; object collections behave less and less like - tables and rows the more abstraction starts to matter. - SQLAlchemy aims to accommodate both of these - principles. -* An ORM doesn't need to hide the "R". A relational - database provides rich, set-based functionality - that should be fully exposed. SQLAlchemy's - ORM provides an open-ended set of patterns - that allow a developer to construct a custom - mediation layer between a domain model and - a relational schema, turning the so-called - "object relational impedance" issue into - a distant memory. -* The developer, in all cases, makes all decisions - regarding the design, structure, and naming conventions - of both the object model as well as the relational - schema. SQLAlchemy only provides the means - to automate the execution of these decisions. -* With SQLAlchemy, there's no such thing as - "the ORM generated a bad query" - you - retain full control over the structure of - queries, including how joins are organized, - how subqueries and correlation is used, what - columns are requested. Everything SQLAlchemy - does is ultimately the result of a developer- - initiated decision. -* Don't use an ORM if the problem doesn't need one. - SQLAlchemy consists of a Core and separate ORM - component. The Core offers a full SQL expression - language that allows Pythonic construction - of SQL constructs that render directly to SQL - strings for a target database, returning - result sets that are essentially enhanced DBAPI - cursors. -* Transactions should be the norm. With SQLAlchemy's - ORM, nothing goes to permanent storage until - commit() is called. SQLAlchemy encourages applications - to create a consistent means of delineating - the start and end of a series of operations. -* Never render a literal value in a SQL statement. - Bound parameters are used to the greatest degree - possible, allowing query optimizers to cache - query plans effectively and making SQL injection - attacks a non-issue. - -Documentation -------------- - -Latest documentation is at: - -http://www.sqlalchemy.org/docs/ - -Installation / Requirements ---------------------------- - -Full documentation for installation is at -`Installation `_. - -Getting Help / Development / Bug reporting ------------------------------------------- - -Please refer to the `SQLAlchemy Community Guide `_. - -Code of Conduct ---------------- - -Above all, SQLAlchemy places great emphasis on polite, thoughtful, and -constructive communication between users and developers. -Please see our current Code of Conduct at -`Code of Conduct `_. - -License -------- - -SQLAlchemy is distributed under the `MIT license -`_. - - - diff --git a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/RECORD b/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/RECORD deleted file mode 100644 index 18869f4..0000000 --- a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/RECORD +++ /dev/null @@ -1,405 +0,0 @@ -SQLAlchemy-1.3.17.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -SQLAlchemy-1.3.17.dist-info/LICENSE,sha256=WG4lA_RQxK6BaWDKMtQTuCDHQWJpdWvZThIkXGD4WXI,1100 -SQLAlchemy-1.3.17.dist-info/METADATA,sha256=xcBflt2jZsh5GZQV77WLVJDaHeBAF3PCbg0kvAGFxKI,7341 -SQLAlchemy-1.3.17.dist-info/RECORD,, -SQLAlchemy-1.3.17.dist-info/WHEEL,sha256=GMu0CcHnECe7JSPnzBUPyOsrcZoHb7dOBGXgpe8vHSQ,104 -SQLAlchemy-1.3.17.dist-info/top_level.txt,sha256=rp-ZgB7D8G11ivXON5VGPjupT1voYmWqkciDt5Uaw_Q,11 -sqlalchemy/__init__.py,sha256=QoFu5r-vTXw_u7p7onkV160CcUz2_3tmxTeuzYvsJ2k,4659 -sqlalchemy/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/__pycache__/events.cpython-37.pyc,, -sqlalchemy/__pycache__/exc.cpython-37.pyc,, -sqlalchemy/__pycache__/inspection.cpython-37.pyc,, -sqlalchemy/__pycache__/interfaces.cpython-37.pyc,, -sqlalchemy/__pycache__/log.cpython-37.pyc,, -sqlalchemy/__pycache__/processors.cpython-37.pyc,, -sqlalchemy/__pycache__/schema.cpython-37.pyc,, -sqlalchemy/__pycache__/types.cpython-37.pyc,, -sqlalchemy/connectors/__init__.py,sha256=78tsbgw8Kh1tPCC2te-KV7DqeE7gRzUE2Qq64XIoUkU,278 -sqlalchemy/connectors/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/connectors/__pycache__/mxodbc.cpython-37.pyc,, -sqlalchemy/connectors/__pycache__/pyodbc.cpython-37.pyc,, -sqlalchemy/connectors/__pycache__/zxJDBC.cpython-37.pyc,, -sqlalchemy/connectors/mxodbc.py,sha256=h_fyp3zVvJ22bBhHl0296MtieO977XHVhbugT9sYrFo,5352 -sqlalchemy/connectors/pyodbc.py,sha256=xR_EfAvOqA_qXLgTlWQ7m8dUtHKErZY3ZI0N5t7wj2g,5586 -sqlalchemy/connectors/zxJDBC.py,sha256=wJlwdsmRov3naSY71JvifytEC4lPqO5_eiF_K9UDb3E,1878 -sqlalchemy/cprocessors.cpython-37m-arm-linux-gnueabihf.so,sha256=b3bL8vUTD9dD_0J-eDS8GK1ZuLN6QvoCPvg_oto_C78,49424 -sqlalchemy/cresultproxy.cpython-37m-arm-linux-gnueabihf.so,sha256=KErxAJkXTEXyiuWtD8qpfUgyHyW9nZ3cKULg373VZcg,57836 -sqlalchemy/cutils.cpython-37m-arm-linux-gnueabihf.so,sha256=eyykgO7az8k5BbMu0HNSAzEQfaKxRcUNeIhDDuCFsYI,31952 -sqlalchemy/databases/__init__.py,sha256=IywgDc8lo4rrxtHR7u1S1ttUMTKrKpYS3wwnd8bISQA,819 -sqlalchemy/databases/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/__init__.py,sha256=2Cra66jK1L9BzcAJUhVaJXR0vPG06L3MI6GEh-b69KI,1909 -sqlalchemy/dialects/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/firebird/__init__.py,sha256=xiywqBgVlgc1yaV-IlwJdgLXhAb9ise-B-vfcoFzQLw,1152 -sqlalchemy/dialects/firebird/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/firebird/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/firebird/__pycache__/fdb.cpython-37.pyc,, -sqlalchemy/dialects/firebird/__pycache__/kinterbasdb.cpython-37.pyc,, -sqlalchemy/dialects/firebird/base.py,sha256=wml5psIvyvrLsBGFne--M2sHjEYbPRamcr5L3QuS-0w,30221 -sqlalchemy/dialects/firebird/fdb.py,sha256=kQQ3atPmEdsgxqtYBRZwg0HGitQiS5-VtL2f13WDWpI,4079 -sqlalchemy/dialects/firebird/kinterbasdb.py,sha256=X7UfqENJaVFJaQj_7dK7NyxmGU8bvAfKkv5m2lQl4rs,6437 -sqlalchemy/dialects/mssql/__init__.py,sha256=OMkEnG7PSHt4ee_P0DKfhXG1jLh5nzRuvxqKLfYLPO8,1812 -sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/adodbapi.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/mxodbc.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/provision.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-37.pyc,, -sqlalchemy/dialects/mssql/__pycache__/zxjdbc.cpython-37.pyc,, -sqlalchemy/dialects/mssql/adodbapi.py,sha256=u8QnHUI6C5nImuzyuk25D6oiBAQRjspM5SGjLySse1s,2719 -sqlalchemy/dialects/mssql/base.py,sha256=L7oD-TAPER0rKsdWJUr4fCbHzQ1IsX9JRJYtDlyQCpw,89821 -sqlalchemy/dialects/mssql/information_schema.py,sha256=plSgus0f7PsR28oTB8y4-UkXU43rTxh0ltaFRvua9-A,5639 -sqlalchemy/dialects/mssql/mxodbc.py,sha256=geB0M9CAYo0AbPgiGLhWEGmx76B5SNGcQJ_PDl5yJMw,4616 -sqlalchemy/dialects/mssql/provision.py,sha256=0WXdFXwePVPYVrV3mmbbiAkuj-Tv5Rr-i3aizbD6Z7w,2786 -sqlalchemy/dialects/mssql/pymssql.py,sha256=RmV43rpu7EqMcNCsA31L9uRT272e9p0juEO6crBoaS0,4677 -sqlalchemy/dialects/mssql/pyodbc.py,sha256=xNeG4Cf489s1NPApg53ckpOSI1d3Plg6Ql0lZv4sF2A,14823 -sqlalchemy/dialects/mssql/zxjdbc.py,sha256=zxUozig0SWOlC6JuqzuyG6ojI55oI44P1RkaToWYul4,2311 -sqlalchemy/dialects/mysql/__init__.py,sha256=9mlm4mqt_CxcbFh9KC38Srw0_OnvrSluBmOcaWoOlV8,2056 -sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/dml.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/enumerated.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/gaerdbms.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/json.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/oursql.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/provision.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/reflection.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/types.cpython-37.pyc,, -sqlalchemy/dialects/mysql/__pycache__/zxjdbc.cpython-37.pyc,, -sqlalchemy/dialects/mysql/base.py,sha256=hMSuQKFrZZnpOQ1eFLOHCmB5Nl3OrhllNE13k3kxDls,101213 -sqlalchemy/dialects/mysql/cymysql.py,sha256=iZIQWKba3Ap6N6Af5MEtCgHN4SoHSUKFJwD_3gktsME,2245 -sqlalchemy/dialects/mysql/dml.py,sha256=yq9Kgm2IvLkt0_Uw1RGsuyjNQVgpWIWMFd2Ntm_R65Y,4764 -sqlalchemy/dialects/mysql/enumerated.py,sha256=9hhAfnVzvFjXKEShwqp52XT6Kaq8aw4uD7CD6doPd6k,11307 -sqlalchemy/dialects/mysql/gaerdbms.py,sha256=khU9l7SolzCyhF7d7q1iBn_OhPj3gw3IqGGtzomsugo,3375 -sqlalchemy/dialects/mysql/json.py,sha256=AJ_-DkzgByPy-3XQhXms2JiTM7MxGr2Le6E4k2-eT50,2050 -sqlalchemy/dialects/mysql/mysqlconnector.py,sha256=2piMPvg48daNXr2XzpLwmMm1VYVs9mw0AJ43OLXQgvI,7889 -sqlalchemy/dialects/mysql/mysqldb.py,sha256=aSkj-xdGYIhPlH2sOvGOo0qrbX3lkR1r8RX-84Rxe4Q,8383 -sqlalchemy/dialects/mysql/oursql.py,sha256=GzFBpEZGwWnLPVmC4IlU5_Y9gZvc-8-44-s6Xo2BXpg,8086 -sqlalchemy/dialects/mysql/provision.py,sha256=sJ7IpiIB1Eb9AdRK3loRlsCHKzzsFGJzMgICyRrkFpY,1269 -sqlalchemy/dialects/mysql/pymysql.py,sha256=8-V0VEWKOv1ooPQgRnm2Piqkk1lAqVDq16wZQB-xq58,2440 -sqlalchemy/dialects/mysql/pyodbc.py,sha256=0UdZSRnCF-0EWsX36Azrvc15xE87YEzyJSSGQJqVx0U,3470 -sqlalchemy/dialects/mysql/reflection.py,sha256=FgrerZD3x-KkXh5XbfYrPfNf_zNKFUVSRqW9Lv2iKjI,18267 -sqlalchemy/dialects/mysql/types.py,sha256=sOIRc3hnpuYVOQZyDadJjigJvA3rjaz66yD4_dAibS4,24601 -sqlalchemy/dialects/mysql/zxjdbc.py,sha256=u-jcqk3ZnpuPv89Hv3iEiCEhN0Ck4J48OLSYChZ3H-s,3970 -sqlalchemy/dialects/oracle/__init__.py,sha256=0800ZWZlvJfZ8qd_K4OfDEUq0TnAL7WkvaJPkMLiRDU,1257 -sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/oracle/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-37.pyc,, -sqlalchemy/dialects/oracle/__pycache__/provision.cpython-37.pyc,, -sqlalchemy/dialects/oracle/__pycache__/zxjdbc.cpython-37.pyc,, -sqlalchemy/dialects/oracle/base.py,sha256=x6GAt5W6R6MXOx5JMn5mALccY387sBMAgzPY_ev-kB8,76224 -sqlalchemy/dialects/oracle/cx_oracle.py,sha256=dZT9x1zkopNAk0SYbn8X06mDXZo2Z34qXmR2fwOYVFU,44915 -sqlalchemy/dialects/oracle/provision.py,sha256=zpAooUqZY741vrjplNkxV2v3uf4kbZb3r_LfH9dKkJw,3862 -sqlalchemy/dialects/oracle/zxjdbc.py,sha256=eBqslfHL08YaEUzZ32-faQo9nxLCR3s8IadVAI200FY,8207 -sqlalchemy/dialects/postgresql/__init__.py,sha256=G0qS6NaThvmq48RH378d90vBbfglrgKt_LxuGyq8UAM,2461 -sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/array.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/dml.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/ext.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/json.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/provision.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/psycopg2cffi.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pygresql.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/pypostgresql.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/__pycache__/zxjdbc.cpython-37.pyc,, -sqlalchemy/dialects/postgresql/array.py,sha256=XrpqVaX2xvF8poI5CyD6gIq-3XgoSqIEeBwU_BSA41I,12043 -sqlalchemy/dialects/postgresql/base.py,sha256=gTzhOkwkXn7pmSWfvxqDorNq1A4r_h994eNdyBYTIs0,124341 -sqlalchemy/dialects/postgresql/dml.py,sha256=JWLLh1Z9Kuu6TgReIFMKqOHoQBwc0xDqzIwRLik7Yoc,7790 -sqlalchemy/dialects/postgresql/ext.py,sha256=-lBaLXWbzapvozj1I8yP47GGtTmjXYPjnInJOTuHHTU,6905 -sqlalchemy/dialects/postgresql/hstore.py,sha256=p62Vdg4REY-p4B3Y6dsIekj85vqMqEVj2DWUn8Z6Iig,12443 -sqlalchemy/dialects/postgresql/json.py,sha256=MihPxRbtlVN54ya9EaDGHiB3IS2icpXmQVb7Vwol-e0,10132 -sqlalchemy/dialects/postgresql/pg8000.py,sha256=bQI9hm4lr4JshUlFWSDpAYBWXsMCsgWyETkSZPDexJg,9722 -sqlalchemy/dialects/postgresql/provision.py,sha256=E8LOoSNSWUlx77ihNLQfW9csAxsmL-qleiLPbVlSlVw,2008 -sqlalchemy/dialects/postgresql/psycopg2.py,sha256=2H7YaVaNkitKwYlHztS5iAX5kjAvu2tHHevpbfAUgJ8,35979 -sqlalchemy/dialects/postgresql/psycopg2cffi.py,sha256=8X5uLoW2am61rqwYs0agfngZ8awYia7AAnbgWwDF27w,1657 -sqlalchemy/dialects/postgresql/pygresql.py,sha256=eyRnGRlqeEbiwYbhcazQUkyHnB4yCeh9c7nhnQyPJ8E,8129 -sqlalchemy/dialects/postgresql/pypostgresql.py,sha256=9giPOxOzVlJHc7r4g-YsVsFWONZQQjIi_mVWzKopI2c,2915 -sqlalchemy/dialects/postgresql/ranges.py,sha256=vvc2NmQsprM81VZeO1zukk4D_T3GTWJ_MoFa7E2chHE,4478 -sqlalchemy/dialects/postgresql/zxjdbc.py,sha256=YMtRIy1IbMVMQKMxa_gORzXHbkEVxiJVBpeg4IFuYvM,1415 -sqlalchemy/dialects/sqlite/__init__.py,sha256=EvPKdQyHTkXlziZo6wYrJ2D1V8Pa52zK5kb2I59uX4s,1042 -sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/json.cpython-37.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/provision.cpython-37.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/pysqlcipher.cpython-37.pyc,, -sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-37.pyc,, -sqlalchemy/dialects/sqlite/base.py,sha256=_cBzmfDarBg5tO13VoH0dZgSrN58Rn4UfrM1oH4fuXs,72609 -sqlalchemy/dialects/sqlite/json.py,sha256=IwhCnGL_BKtASpm0-OzeNByp3qtkcLZgAeBGY_EPUvM,2292 -sqlalchemy/dialects/sqlite/provision.py,sha256=PEmgxbdz67KsONOEhFVQFLX3s4BXWKLb2gvNgj0gn9M,2591 -sqlalchemy/dialects/sqlite/pysqlcipher.py,sha256=RwePrdk3xrEg2c0vpFQSos6823EKEHJKrF-jgo7mYrE,4692 -sqlalchemy/dialects/sqlite/pysqlite.py,sha256=wDRcXcw8JFnGjbeu4qAeEp1vK8M_z86vY8Ls4AOSzEI,20983 -sqlalchemy/dialects/sybase/__init__.py,sha256=W9wFI2eRTBJVLaoplH5esmiITkz9LiNa-nDhiG8DwpM,1363 -sqlalchemy/dialects/sybase/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/dialects/sybase/__pycache__/base.cpython-37.pyc,, -sqlalchemy/dialects/sybase/__pycache__/mxodbc.cpython-37.pyc,, -sqlalchemy/dialects/sybase/__pycache__/pyodbc.cpython-37.pyc,, -sqlalchemy/dialects/sybase/__pycache__/pysybase.cpython-37.pyc,, -sqlalchemy/dialects/sybase/base.py,sha256=N1G5tUKOtBjeOXeRciZP6iui1M8CYWFW9KchA-qDi_s,31953 -sqlalchemy/dialects/sybase/mxodbc.py,sha256=LqEEcLRrXuLICWLpg2ePgczZhuGzU20JsO2j_W3M2Ho,902 -sqlalchemy/dialects/sybase/pyodbc.py,sha256=013aGwoG1C19S_v4lPSZj4ComdPrdlwSMH8F7pjyMS8,2120 -sqlalchemy/dialects/sybase/pysybase.py,sha256=7iF-t7qfR5VGNeYzHkDoPNthUAJGSAIjeLwXrC5BJQE,3313 -sqlalchemy/engine/__init__.py,sha256=YTdumyq5iYjIsQ1P1FDgTDnEPqZgGasCao6k15prmUw,24256 -sqlalchemy/engine/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/base.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/default.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/interfaces.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/reflection.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/result.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/strategies.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/threadlocal.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/url.cpython-37.pyc,, -sqlalchemy/engine/__pycache__/util.cpython-37.pyc,, -sqlalchemy/engine/base.py,sha256=k5WVUNgasJE56I3nMvW8sAfap3UTuGxzXz9L4lwrSXM,87719 -sqlalchemy/engine/default.py,sha256=r4NyJf2d6qVcBgQrh1c5PWxdcg1FI0KhCms_1nQXcw0,54994 -sqlalchemy/engine/interfaces.py,sha256=AzkS3nGoNk3VI85GPiDFb7ugSmPdWTYpMHI9MmC0AdQ,46993 -sqlalchemy/engine/reflection.py,sha256=Ccbz18UE3XJrGShy5ZSa6S-PZTkIfn-WTVsOsuwTktg,34561 -sqlalchemy/engine/result.py,sha256=1-nFJlpdlJFODmmGaQrRF1R3UDS3X8EUEhGHtPHLs4A,54716 -sqlalchemy/engine/strategies.py,sha256=BuDOCGp6TMAej65TOresW9VHW2VfS60hx-rH4WfDPis,9847 -sqlalchemy/engine/threadlocal.py,sha256=EsPbaSO4S8WU4beLAyZ22iHV5YfxXt4ZejxuSMTu6pI,4764 -sqlalchemy/engine/url.py,sha256=PjgwmYtCr7CHP2TCiZBoOgctTjS7zK7GMMShPoQLBmU,9411 -sqlalchemy/engine/util.py,sha256=wDnfhJyQyxToapCC1jw6FRyW006dnChVIXYBIavzGF4,2421 -sqlalchemy/event/__init__.py,sha256=BuOhHzdZQnfPwXK3cvVUbsGJ7vhwwRzN673a8ImEPlA,596 -sqlalchemy/event/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/event/__pycache__/api.cpython-37.pyc,, -sqlalchemy/event/__pycache__/attr.cpython-37.pyc,, -sqlalchemy/event/__pycache__/base.cpython-37.pyc,, -sqlalchemy/event/__pycache__/legacy.cpython-37.pyc,, -sqlalchemy/event/__pycache__/registry.cpython-37.pyc,, -sqlalchemy/event/api.py,sha256=FeyGUN3Gtwh3g7JQTlDnn1zuILSWd-lEwb4Rvk7Eez0,7085 -sqlalchemy/event/attr.py,sha256=k4tXlwyPpXaAFyHp11nk7I9ZV4yEFgNpESGMt9Er1BQ,13853 -sqlalchemy/event/base.py,sha256=stD9Z3dWlFTDSFqcViREtgf4c8omBs2rvyeFEOXwi1g,9753 -sqlalchemy/event/legacy.py,sha256=T9ZOjmibgXCOqW2XADc1nsXxbS159ZYl2ueRUEfsZrU,5904 -sqlalchemy/event/registry.py,sha256=xA0dKdRoyE7mz3m4uhQDZ7nxC0LVWfPDOxjzxpPX0m8,8243 -sqlalchemy/events.py,sha256=7pAnJ3mPX7O-C7yPU7gQmBW5Zv-S8dcvY01NrUK3AHU,53052 -sqlalchemy/exc.py,sha256=lfTTGCfcvq8ibrgA7c0rbwQQTgO1gzm8EhMB_eY8fW4,17361 -sqlalchemy/ext/__init__.py,sha256=A8EjxtBgEW-qCJX0qqqo3OUJQe9sjjBYPYw5dN866bE,322 -sqlalchemy/ext/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/associationproxy.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/automap.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/baked.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/compiler.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/horizontal_shard.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/hybrid.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/indexable.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/instrumentation.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/mutable.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/orderinglist.cpython-37.pyc,, -sqlalchemy/ext/__pycache__/serializer.cpython-37.pyc,, -sqlalchemy/ext/associationproxy.py,sha256=8TpfZ8gA3X0lI7eDqVex7wySKxjcqcaIJJlM0qkRpTU,49768 -sqlalchemy/ext/automap.py,sha256=4g3HrD-jyJ1LctT3O4iTNLAYVPrP4OwdMCruQrXbLiI,42157 -sqlalchemy/ext/baked.py,sha256=7fjqFZhTGXe5C1cDw4Y-ldO8hWLzCxYacUDsx5TzXSo,21989 -sqlalchemy/ext/compiler.py,sha256=q5kP9F7PaReG7HEx1H5P0EuwN0mJ25Uk-KutMfuj-JY,17147 -sqlalchemy/ext/declarative/__init__.py,sha256=7c0OfggXwiLSVvehkwJJDDunv3AuY6Kq-4eMuNT-VNA,902 -sqlalchemy/ext/declarative/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/ext/declarative/__pycache__/api.cpython-37.pyc,, -sqlalchemy/ext/declarative/__pycache__/base.cpython-37.pyc,, -sqlalchemy/ext/declarative/__pycache__/clsregistry.cpython-37.pyc,, -sqlalchemy/ext/declarative/api.py,sha256=9clomjjILIdqQGE8x2kgZdRxjfRfM27WbdjMoSs7mNE,27636 -sqlalchemy/ext/declarative/base.py,sha256=vAVhpcwXCXFhQ_Yl-yL6D1ypnmogue4AW4CHgCXgQQI,32093 -sqlalchemy/ext/declarative/clsregistry.py,sha256=Nvxdt0KmYDIscE7V9tUEpDauf4EEc6dFZLiS2Cx0Cdo,12049 -sqlalchemy/ext/horizontal_shard.py,sha256=fBwLdomhyoa5iBVRaealC5TUzcm64Qx8QXTVGIf3SaA,9138 -sqlalchemy/ext/hybrid.py,sha256=JCyVxtk3ZSTGsIXFCjiNY4OxXGA_K4r9FRBhTGF6FJE,40325 -sqlalchemy/ext/indexable.py,sha256=pj8WNfIe_YsOIqFsel_93sMzTNsA9Rg03mGs11UNA3E,11254 -sqlalchemy/ext/instrumentation.py,sha256=7pxb7SDAnFvtHUxXnx8rqg4XKCadnWDGOBzFQHqeBS8,14351 -sqlalchemy/ext/mutable.py,sha256=qPhBEnRCj3GuRUF1a44J9jEdcvfsDCcgeGzZN_eSOHM,31820 -sqlalchemy/ext/orderinglist.py,sha256=sOdhObx8L8E7-LV2nSwRFOYFJK5hcz-g7K8gNKKewms,13900 -sqlalchemy/ext/serializer.py,sha256=ece4dfFCNbKgg-PbzDG_vZXM3C04w-C1REpF9h6INgQ,5784 -sqlalchemy/inspection.py,sha256=d4_LstciZQ766aPV5E8DlVI922f7eZ5L4rSMOCECyB4,3031 -sqlalchemy/interfaces.py,sha256=F8zw_dufYkf7zfc2bxb00AZUmOuSr5zoiWqx3GCNWNk,12740 -sqlalchemy/log.py,sha256=9K0QmnWqg4YTKdHkcJpudvdNsY2m9LmH5MKxj9INqlU,6705 -sqlalchemy/orm/__init__.py,sha256=PxbRLhF3nBDgTh198el-akJ29gsu0X6ooF1pZKk11zs,9558 -sqlalchemy/orm/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/attributes.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/base.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/collections.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/dependency.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/deprecated_interfaces.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/descriptor_props.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/dynamic.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/evaluator.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/events.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/exc.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/identity.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/instrumentation.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/interfaces.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/loading.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/mapper.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/path_registry.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/persistence.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/properties.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/query.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/relationships.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/scoping.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/session.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/state.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/strategies.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/strategy_options.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/sync.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/unitofwork.cpython-37.pyc,, -sqlalchemy/orm/__pycache__/util.cpython-37.pyc,, -sqlalchemy/orm/attributes.py,sha256=F7bOBInY4KWB4erAeH3ImmUMJlyuDm4uNin1Qa6bD4Q,67867 -sqlalchemy/orm/base.py,sha256=8X7-3ZM5MYSAz1n_CYlIM2spYEELf_u9ZRdDqtuNYAE,15317 -sqlalchemy/orm/collections.py,sha256=CLeCvvSuoMpn7ms-2l0PUNADiUTjHvNAcuZz4-XJg_c,52683 -sqlalchemy/orm/dependency.py,sha256=02Jyovdu-dyQ6umen79zeggcYnocRk4QW4zT2StA4vs,46556 -sqlalchemy/orm/deprecated_interfaces.py,sha256=NoUlf7eCwxS2QB-IC_Gr1d6IplYpvXWSHuftu0gzvV0,20763 -sqlalchemy/orm/descriptor_props.py,sha256=S0EJbqAQFsMxGF7gMdZwjsZn4NACa-sinwWeV3XGqOs,28377 -sqlalchemy/orm/dynamic.py,sha256=GcrMj7Uwvmh2CD-OatPExTvY0H6DtysBcQfX89dwgs8,14666 -sqlalchemy/orm/evaluator.py,sha256=clC6Tf6Ppv-xwmDTAcNh5axOqrvrnzC5Jq7rowPo3kA,5441 -sqlalchemy/orm/events.py,sha256=dV_-UqtxyN-1mJvRhZOAoBvjl7SM_5Aq0kXI3Pdm5fw,104408 -sqlalchemy/orm/exc.py,sha256=G_szgGYDr2ojIaYk9-qAjQA6B3d5Kbi0tVTIgq5HHbs,6616 -sqlalchemy/orm/identity.py,sha256=7Jhn-byH1e-K7xihNaYEpQLEkUaTuvykR3seXY7ahgU,10436 -sqlalchemy/orm/instrumentation.py,sha256=b1CcLwYzBa_ppCimxruTDEdoUNuCp8M5lP3WxEdSOf4,18131 -sqlalchemy/orm/interfaces.py,sha256=7hJtC3irDY9htapLJdjzxKCCja8MBrJobOYn5UTG-NA,25851 -sqlalchemy/orm/loading.py,sha256=oITlYdKbQoqgfa8gZSxUukS4tdbYwffOW0NBaedXsdw,33847 -sqlalchemy/orm/mapper.py,sha256=Na8nmNbm110CzQ4-jleDDEe18BATT_rup63QJCIoGcY,129890 -sqlalchemy/orm/path_registry.py,sha256=AT4cnr3fvxo44FthmyKjzX7LTHrfdgOotr8jStycWXY,13764 -sqlalchemy/orm/persistence.py,sha256=nHp_TCPOfBQiNYj5s8Q3L0zesHQrcJ2WbUK87JPhPXI,65927 -sqlalchemy/orm/properties.py,sha256=xIKHjXzA42KL-FtoFWuUe_9xsos3zSbZ8vHEuAPzV5k,12695 -sqlalchemy/orm/query.py,sha256=wMWrOosgc8OHkvPQW-7t5PpEbwPaN_LVSqVQlr86Rjo,178949 -sqlalchemy/orm/relationships.py,sha256=fdvc1ds4mbUN50gP7jyStuM-KMNX5bd1clzj6PsQ8Io,136637 -sqlalchemy/orm/scoping.py,sha256=F-RHAC3Ynw3kl_kwfalr8dGoiN50oyunXUGL0Tyz3U4,6414 -sqlalchemy/orm/session.py,sha256=CM-KKA5iWl6ozqsV0-iEoFQqBe4XP4OIdIVbiEvuV_4,131284 -sqlalchemy/orm/state.py,sha256=ZCEcx38aRsD3aCWF1XUiaGajFcYUjlDWK4sXfzsDe58,30794 -sqlalchemy/orm/strategies.py,sha256=M1ECN149rJRIWyTZopF3eeuBewEh207JKOkIYlOO4Dg,87001 -sqlalchemy/orm/strategy_options.py,sha256=1ggl_tsAjg37ors2cfoNN3mFxwepoSEe43gI3bPB3Oc,57376 -sqlalchemy/orm/sync.py,sha256=564q5ie_-aOiswgHZtaK-pnB8sCfzakOXte_V9V27Dk,5823 -sqlalchemy/orm/unitofwork.py,sha256=OAYzkT_p5Yul4krfnZTuN84zuuYxGe1EzZCOkdpeocI,24735 -sqlalchemy/orm/util.py,sha256=FLVsN4QcZP5pIptjJ4CcdMyd0xTbA7NnIftu89KkV0g,45432 -sqlalchemy/pool/__init__.py,sha256=-Vflck_t3sr66dgA4mehtNPZdvOsK7CgkjvY7UC7CbY,1483 -sqlalchemy/pool/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/pool/__pycache__/base.cpython-37.pyc,, -sqlalchemy/pool/__pycache__/dbapi_proxy.cpython-37.pyc,, -sqlalchemy/pool/__pycache__/impl.cpython-37.pyc,, -sqlalchemy/pool/base.py,sha256=-RL2p09Jx_X-kxmDltHdyG2cCSmYCrSDAELLNnFjd4g,36523 -sqlalchemy/pool/dbapi_proxy.py,sha256=zKCnvTcKfKuf04zqPMDjuuLtmdpIbkIKnk4dICLg_WA,4320 -sqlalchemy/pool/impl.py,sha256=oJYs7lUgfU7HbWp5VvTuHUXqBB1DSNTWuskevw6vNBA,14645 -sqlalchemy/processors.py,sha256=i_DiEYBHp5JJvvp7omB_TXSXf5efWTILvjVYvu8LHmw,5744 -sqlalchemy/schema.py,sha256=mkZmV4FQ4XWqRMsRoIf0lmB13bftAFPZjcszEKaE5gE,2418 -sqlalchemy/sql/__init__.py,sha256=12olVEiRBSf27BGAt_aWOUsLBF3sptfDvt-o-4K_7ts,3789 -sqlalchemy/sql/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/annotation.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/base.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/compiler.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/crud.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/ddl.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/default_comparator.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/dml.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/elements.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/expression.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/functions.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/naming.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/operators.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/schema.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/selectable.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/sqltypes.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/type_api.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/util.cpython-37.pyc,, -sqlalchemy/sql/__pycache__/visitors.cpython-37.pyc,, -sqlalchemy/sql/annotation.py,sha256=8XH8I_XmZI5iF2QwqX9N0auQCnh0EHZkOeoPiFQoUZM,6725 -sqlalchemy/sql/base.py,sha256=vDkZqV6Q-IigRLdEzf-IuQqHW6DeckQ5N85vbgzXpVA,21740 -sqlalchemy/sql/compiler.py,sha256=OkqHP1UOVR0fysEWdUFl44w9X582YDlOgKLTrkin2uc,126523 -sqlalchemy/sql/crud.py,sha256=ca_9Rjlxw_OnWZxBas_ElvMGtClvIUhLjxWOHhHJ6BE,25889 -sqlalchemy/sql/ddl.py,sha256=Mwfkqrc551QMnDeozZLRDu62je2SlP6ABXs6L9PGrtE,41416 -sqlalchemy/sql/default_comparator.py,sha256=tRDxI3AGdgrhG-hWymXgH2952Fqfw5f5-zB8Rbfq2qQ,12234 -sqlalchemy/sql/dml.py,sha256=YF3tCrAUdXskjLyUe5KPKlDp_zbCnujVlyGxa4MdQlk,35335 -sqlalchemy/sql/elements.py,sha256=dFZ8KSm_mFkYMJjEAEP3ipl3Uf0p0U3dpUQCFqE6GYs,159917 -sqlalchemy/sql/expression.py,sha256=BVhn2KBPg3CQBmBfKPQqvYMgTHqHkt_-bqej3RLSD-g,9209 -sqlalchemy/sql/functions.py,sha256=gxWSE5KoTnh7ylVLwPnmGwVKwvJY38VjMjdOOtsvXEI,35833 -sqlalchemy/sql/naming.py,sha256=zVMDaA0Npbe8NzBSVBykw69cobe-dfQDC4r451cpvUk,5889 -sqlalchemy/sql/operators.py,sha256=J7ghLip6g-S-xGjNTx5RBo-hjM48RZ9pqVI8dIUY77A,42548 -sqlalchemy/sql/schema.py,sha256=28UI94JEdxrJq8G3RnQj6x-f4pNIOwckhQaPfq_kKmg,172921 -sqlalchemy/sql/selectable.py,sha256=enoXrsZIMlmFFo3fTK5w3wCxEttTSIvXFjKG8jmPlRw,137999 -sqlalchemy/sql/sqltypes.py,sha256=edW3yywfapznqhcBfbcirbg_8Ow44HLKzar80eyl23M,100787 -sqlalchemy/sql/type_api.py,sha256=BhCUsW--YVXV2iCoA3eWao608YmNkFw8plEwR1dhb9A,52229 -sqlalchemy/sql/util.py,sha256=gnU_xzrrmhD4vJ2RG5qAmWCe-jqG7dkmGRr-t2t3ZTg,29192 -sqlalchemy/sql/visitors.py,sha256=frayBlJ-hGW5wWJJC_EwA0WXs-RJuKSVgJr-bJ-VAf4,15953 -sqlalchemy/testing/__init__.py,sha256=dvAdeVcRveMsKSxAV71HG_wyxNgUjvIg03zyasFaVCI,2789 -sqlalchemy/testing/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/assertions.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/assertsql.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/config.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/engines.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/entities.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/exclusions.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/fixtures.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/mock.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/pickleable.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/profiling.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/provision.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/replay_fixture.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/requirements.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/schema.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/util.cpython-37.pyc,, -sqlalchemy/testing/__pycache__/warnings.cpython-37.pyc,, -sqlalchemy/testing/assertions.py,sha256=bqhUma94aCjrAVS6_w2OOECkCClHkvV0YlOpo_mMZOU,19779 -sqlalchemy/testing/assertsql.py,sha256=Qss3YS3O0ac68rBh2FPGskgDd6yJzG_5KUn0UhhtQS0,13573 -sqlalchemy/testing/config.py,sha256=OQC0OW8r_rlj3Bljv03Vb1pVABJoLwM0MWUll-daaGk,5521 -sqlalchemy/testing/engines.py,sha256=vL6jvWWIXe5XJKCzx2NRkgeT6aNatiNdes_ZHbdH5pw,10437 -sqlalchemy/testing/entities.py,sha256=c40-zDP6Y6vx18Pmj9Lx4JZ26lr4dQ_42JoffyEP9cA,3203 -sqlalchemy/testing/exclusions.py,sha256=0hOS3GnCs9T149eB4fZfxtGSWsPU_bi1VAlLhldr16w,13037 -sqlalchemy/testing/fixtures.py,sha256=eMARL5rpAaypiTpiK8YCAUFOZztrTTWCoLyElMTUSNA,15034 -sqlalchemy/testing/mock.py,sha256=TMVhpHQtM3v-2CFNYsP6np2vsF7aNDMWXCnjfI9rNvI,893 -sqlalchemy/testing/pickleable.py,sha256=6JlSnkXCrbbjeHkbFBwdA8BBrYfLz-PVA7slVhGNYYE,2693 -sqlalchemy/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -sqlalchemy/testing/plugin/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/testing/plugin/__pycache__/bootstrap.cpython-37.pyc,, -sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-37.pyc,, -sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-37.pyc,, -sqlalchemy/testing/plugin/bootstrap.py,sha256=0rkror_9S175GPGNnbtbDmfdLEhu9v-AAv715lR8KyU,1468 -sqlalchemy/testing/plugin/plugin_base.py,sha256=3VM5tJHm74gCTawivXHCQeLhx9-aiPZ71UPQ8v8aD9Y,20361 -sqlalchemy/testing/plugin/pytestplugin.py,sha256=fvT5LeUX6kyIxN3y5FZSaaaTBkilGNPi54qGz51fsMM,16189 -sqlalchemy/testing/profiling.py,sha256=Sm1jEw_H0IOFl_fL8xSvYH5oSOYnRi-jYN1krlR7rJI,8855 -sqlalchemy/testing/provision.py,sha256=qWmrATrDsGAJUgQgg5gyS7iRxAYvsHY2T_psTvV4GTg,5509 -sqlalchemy/testing/replay_fixture.py,sha256=W_QZD96t7ichRNvILOjhuoQXTCYnd2usiHBQhPkzUYI,5875 -sqlalchemy/testing/requirements.py,sha256=YMD5ILDKaYFY2g9pTY_7UwkTneuBpPgiigDMunGt_BQ,32131 -sqlalchemy/testing/schema.py,sha256=V5Kggty3LB9YDzbLqmSHsc60J7fUXLDnBJnt_ZmXkas,3712 -sqlalchemy/testing/suite/__init__.py,sha256=SUWU-LR3asH2hN2YsIhlpqxeuo8fpvej3o6nct-L4xU,358 -sqlalchemy/testing/suite/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_cte.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_dialect.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_insert.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_results.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_select.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_types.cpython-37.pyc,, -sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-37.pyc,, -sqlalchemy/testing/suite/test_cte.py,sha256=VbBzRrNWXk2EkuFAz5f5vFXaR9tU240c63dSY88qpf0,6801 -sqlalchemy/testing/suite/test_ddl.py,sha256=TGFhoJEy-_NqPb5ypR8x8eYW7Xus323kt4p9DXRsjtk,2896 -sqlalchemy/testing/suite/test_dialect.py,sha256=LQmHD11UKObwB5bwh4QCJChk501AExe7FHvScOB5QAE,6197 -sqlalchemy/testing/suite/test_insert.py,sha256=Hiwa4iE-oG4csNG57CieNgN2-hGfHX8xgbMQT3rM0TE,9672 -sqlalchemy/testing/suite/test_reflection.py,sha256=YhJ7O4MN9R__VOjr0yrNnkudiaSezj493gwQi25d84s,41641 -sqlalchemy/testing/suite/test_results.py,sha256=wemYY7ZTVisKLbks8p1Maf1HEOLZ9vigIsGoHHc4Cbw,10971 -sqlalchemy/testing/suite/test_select.py,sha256=r_Whu2VLr6l_9BNp8vYikzOYzbh-V73l_yn-OX98nVc,23049 -sqlalchemy/testing/suite/test_sequence.py,sha256=oacBvtAqW3Ua3gcqyqnT1U_hpJutEW_EmEbwvf7Xq7E,4661 -sqlalchemy/testing/suite/test_types.py,sha256=ttLLKd5faoa0Uvr-HKRTvieNhlASaSnwj7c9HuB50jk,37159 -sqlalchemy/testing/suite/test_update_delete.py,sha256=I2NOhWu7iwKLJzcqM_6sLh3WCE1jmcN8tLwBzvNYLPg,1491 -sqlalchemy/testing/util.py,sha256=Pn41mDkl_Bb7mCbzGmqA4m1d5LCrpvh8v4C7lUCKvwY,10149 -sqlalchemy/testing/warnings.py,sha256=m0M3oN0gR7VH7d_VbFZaQu2igcsJDKTJvKRwNdfEwsY,1671 -sqlalchemy/types.py,sha256=LRIjlg-DVeBMAhVI7iXXY8NhQQDDr2UPKp3ONwyMZhI,3377 -sqlalchemy/util/__init__.py,sha256=ohFYPWxLu_BxGvDgCLvl7r2CrkB0y3vcN3DnePBsauA,6648 -sqlalchemy/util/__pycache__/__init__.cpython-37.pyc,, -sqlalchemy/util/__pycache__/_collections.cpython-37.pyc,, -sqlalchemy/util/__pycache__/compat.cpython-37.pyc,, -sqlalchemy/util/__pycache__/deprecations.cpython-37.pyc,, -sqlalchemy/util/__pycache__/langhelpers.cpython-37.pyc,, -sqlalchemy/util/__pycache__/queue.cpython-37.pyc,, -sqlalchemy/util/__pycache__/topological.cpython-37.pyc,, -sqlalchemy/util/_collections.py,sha256=MfX2a2MJ95_cYqGQFDWTuf_y7frdtY-z7LBI261HJWE,29219 -sqlalchemy/util/compat.py,sha256=LqV8UIGP7-WEmBI7H-sIsxGi1B9XRppxuxJyGoYE4_c,16828 -sqlalchemy/util/deprecations.py,sha256=odcWi5Ciq7T-kpYbavOjMaK89fuX6BvN4j-zB_Pr8BA,7474 -sqlalchemy/util/langhelpers.py,sha256=aaV0kbtmiQfRXCYaHgYGITYE01apkoFuJWlwYTO0p5U,50512 -sqlalchemy/util/queue.py,sha256=QHh_QckIfyisS9q_blxbwamt92JPXKZgt-pf971dsEs,6827 -sqlalchemy/util/topological.py,sha256=lbXO1ZDDTtYHps_rE7NlEZ3AG773IDLP_7L941DTt6U,2767 diff --git a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/WHEEL b/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/WHEEL deleted file mode 100644 index a194ea7..0000000 --- a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.32.3) -Root-Is-Purelib: false -Tag: cp37-cp37m-linux_armv7l - diff --git a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/top_level.txt deleted file mode 100644 index 39fb2be..0000000 --- a/venv/lib/python3.7/site-packages/SQLAlchemy-1.3.17.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -sqlalchemy diff --git a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/LICENSE.rst b/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/LICENSE.rst deleted file mode 100644 index c37cae4..0000000 --- a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2007 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/METADATA b/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/METADATA deleted file mode 100644 index 25977a3..0000000 --- a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/METADATA +++ /dev/null @@ -1,127 +0,0 @@ -Metadata-Version: 2.1 -Name: Werkzeug -Version: 1.0.0 -Summary: The comprehensive WSGI web application library. -Home-page: https://palletsprojects.com/p/werkzeug/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://werkzeug.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/werkzeug -Project-URL: Issue tracker, https://github.com/pallets/werkzeug/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Application -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware -Classifier: Topic :: Software Development :: Libraries :: Application Frameworks -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* -Description-Content-Type: text/x-rst -Provides-Extra: dev -Requires-Dist: pytest ; extra == 'dev' -Requires-Dist: coverage ; extra == 'dev' -Requires-Dist: tox ; extra == 'dev' -Requires-Dist: sphinx ; extra == 'dev' -Requires-Dist: pallets-sphinx-themes ; extra == 'dev' -Requires-Dist: sphinx-issues ; extra == 'dev' -Provides-Extra: watchdog -Requires-Dist: watchdog ; extra == 'watchdog' - -Werkzeug -======== - -*werkzeug* German noun: "tool". Etymology: *werk* ("work"), *zeug* ("stuff") - -Werkzeug is a comprehensive `WSGI`_ web application library. It began as -a simple collection of various utilities for WSGI applications and has -become one of the most advanced WSGI utility libraries. - -It includes: - -- An interactive debugger that allows inspecting stack traces and - source code in the browser with an interactive interpreter for any - frame in the stack. -- A full-featured request object with objects to interact with - headers, query args, form data, files, and cookies. -- A response object that can wrap other WSGI applications and handle - streaming data. -- A routing system for matching URLs to endpoints and generating URLs - for endpoints, with an extensible system for capturing variables - from URLs. -- HTTP utilities to handle entity tags, cache control, dates, user - agents, cookies, files, and more. -- A threaded WSGI server for use while developing applications - locally. -- A test client for simulating HTTP requests during testing without - requiring running a server. - -Werkzeug is Unicode aware and doesn't enforce any dependencies. It is up -to the developer to choose a template engine, database adapter, and even -how to handle requests. It can be used to build all sorts of end user -applications such as blogs, wikis, or bulletin boards. - -`Flask`_ wraps Werkzeug, using it to handle the details of WSGI while -providing more structure and patterns for defining powerful -applications. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U Werkzeug - - -A Simple Example ----------------- - -.. code-block:: python - - from werkzeug.wrappers import Request, Response - - @Request.application - def application(request): - return Response('Hello, World!') - - if __name__ == '__main__': - from werkzeug.serving import run_simple - run_simple('localhost', 4000, application) - - -Links ------ - -- Website: https://palletsprojects.com/p/werkzeug/ -- Documentation: https://werkzeug.palletsprojects.com/ -- Releases: https://pypi.org/project/Werkzeug/ -- Code: https://github.com/pallets/werkzeug -- Issue tracker: https://github.com/pallets/werkzeug/issues -- Test status: https://dev.azure.com/pallets/werkzeug/_build -- Official chat: https://discord.gg/t6rrQZH - -.. _WSGI: https://wsgi.readthedocs.io/en/latest/ -.. _Flask: https://www.palletsprojects.com/p/flask/ -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - diff --git a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/RECORD b/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/RECORD deleted file mode 100644 index 7ffb320..0000000 --- a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/RECORD +++ /dev/null @@ -1,101 +0,0 @@ -Werkzeug-1.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Werkzeug-1.0.0.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 -Werkzeug-1.0.0.dist-info/METADATA,sha256=PYrtVKBaudcg8XAd-XR_FP0XKeHwh2svMnYDkF3NMEM,4683 -Werkzeug-1.0.0.dist-info/RECORD,, -Werkzeug-1.0.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 -Werkzeug-1.0.0.dist-info/top_level.txt,sha256=QRyj2VjwJoQkrwjwFIOlB8Xg3r9un0NtqVHQF-15xaw,9 -werkzeug/__init__.py,sha256=tuMKcU_g68IyAqFr7KP27w82oHdbHCJo5jsELm56krM,502 -werkzeug/__pycache__/__init__.cpython-37.pyc,, -werkzeug/__pycache__/_compat.cpython-37.pyc,, -werkzeug/__pycache__/_internal.cpython-37.pyc,, -werkzeug/__pycache__/_reloader.cpython-37.pyc,, -werkzeug/__pycache__/datastructures.cpython-37.pyc,, -werkzeug/__pycache__/exceptions.cpython-37.pyc,, -werkzeug/__pycache__/filesystem.cpython-37.pyc,, -werkzeug/__pycache__/formparser.cpython-37.pyc,, -werkzeug/__pycache__/http.cpython-37.pyc,, -werkzeug/__pycache__/local.cpython-37.pyc,, -werkzeug/__pycache__/posixemulation.cpython-37.pyc,, -werkzeug/__pycache__/routing.cpython-37.pyc,, -werkzeug/__pycache__/security.cpython-37.pyc,, -werkzeug/__pycache__/serving.cpython-37.pyc,, -werkzeug/__pycache__/test.cpython-37.pyc,, -werkzeug/__pycache__/testapp.cpython-37.pyc,, -werkzeug/__pycache__/urls.cpython-37.pyc,, -werkzeug/__pycache__/useragents.cpython-37.pyc,, -werkzeug/__pycache__/utils.cpython-37.pyc,, -werkzeug/__pycache__/wsgi.cpython-37.pyc,, -werkzeug/_compat.py,sha256=zjufTNrhQ8BgYSGSh-sVu6iW3r3O9WzjE9j-qJobx-g,6671 -werkzeug/_internal.py,sha256=d_4AqheyS6dHMViwdc0drFrjs67ZzT6Ej2gWf-Z-Iys,14351 -werkzeug/_reloader.py,sha256=I3mg3oRQ0lLzl06oEoVopN3bN7CtINuuUQdqDcmTnEs,11531 -werkzeug/datastructures.py,sha256=lgSnQ5E_hMlbKD6_Iq0eT1CBW_CPobVUqhunHqUWrPs,100436 -werkzeug/debug/__init__.py,sha256=3RtUMc5Y9hYyK11ugHltgkQ9Dt-ViR945Vy_X5NV7zU,17289 -werkzeug/debug/__pycache__/__init__.cpython-37.pyc,, -werkzeug/debug/__pycache__/console.cpython-37.pyc,, -werkzeug/debug/__pycache__/repr.cpython-37.pyc,, -werkzeug/debug/__pycache__/tbtools.cpython-37.pyc,, -werkzeug/debug/console.py,sha256=YHfFF7b4gRfG9aM7_KxwnUBN5nX8mr0OBTPOIMCpyiQ,5461 -werkzeug/debug/repr.py,sha256=lIwuhbyrMwVe3P_cFqNyqzHL7P93TLKod7lw9clydEw,9621 -werkzeug/debug/shared/FONT_LICENSE,sha256=LwAVEI1oYnvXiNMT9SnCH_TaLCxCpeHziDrMg0gPkAI,4673 -werkzeug/debug/shared/console.png,sha256=bxax6RXXlvOij_KeqvSNX0ojJf83YbnZ7my-3Gx9w2A,507 -werkzeug/debug/shared/debugger.js,sha256=rOhqZMRfpZnnu6_XCGn6wMWPhtfwRAcyZKksdIxPJas,6400 -werkzeug/debug/shared/jquery.js,sha256=CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo,88145 -werkzeug/debug/shared/less.png,sha256=-4-kNRaXJSONVLahrQKUxMwXGm9R4OnZ9SxDGpHlIR4,191 -werkzeug/debug/shared/more.png,sha256=GngN7CioHQoV58rH6ojnkYi8c_qED2Aka5FO5UXrReY,200 -werkzeug/debug/shared/source.png,sha256=RoGcBTE4CyCB85GBuDGTFlAnUqxwTBiIfDqW15EpnUQ,818 -werkzeug/debug/shared/style.css,sha256=gZ9uhmb5zj3XLuT9RvnMp6jMINgQ-VVBCp-2AZbG3YQ,6604 -werkzeug/debug/shared/ubuntu.ttf,sha256=1eaHFyepmy4FyDvjLVzpITrGEBu_CZYY94jE0nED1c0,70220 -werkzeug/debug/tbtools.py,sha256=2iJ8RURUZUSbopOIehy53LnVJWx47lsHN2V2l6hc7Wc,20363 -werkzeug/exceptions.py,sha256=UTYSDkmAsH-vt8VSidlEffwqBVNXuT7bRg-_NqgUe8A,25188 -werkzeug/filesystem.py,sha256=HzKl-j0Hd8Jl66j778UbPTAYNnY6vUZgYLlBZ0e7uw0,2101 -werkzeug/formparser.py,sha256=Sto0jZid9im9ZVIf56vilCdyX-arK33wSftkYsLCnzo,21788 -werkzeug/http.py,sha256=YLEcmPFGAwltCLUl-D97l1EEWwLOl_UshuMOXssGm0M,43052 -werkzeug/local.py,sha256=_Tk7gB238pPWUU7habxFkZF02fiCMRVW6d62YWL1Rh0,14371 -werkzeug/middleware/__init__.py,sha256=f1SFZo67IlW4k1uqKzNHxYQlsakUS-D6KK_j0e3jjwQ,549 -werkzeug/middleware/__pycache__/__init__.cpython-37.pyc,, -werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc,, -werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc,, -werkzeug/middleware/__pycache__/lint.cpython-37.pyc,, -werkzeug/middleware/__pycache__/profiler.cpython-37.pyc,, -werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc,, -werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc,, -werkzeug/middleware/dispatcher.py,sha256=_-KoMzHtcISHS7ouWKAOraqlCLprdh83YOAn_8DjLp8,2240 -werkzeug/middleware/http_proxy.py,sha256=lRjTdMmghHiZuZrS7_UJ3gZc-vlFizhBbFZ-XZPLwIA,7117 -werkzeug/middleware/lint.py,sha256=ItTwuWJnflF8xMT1uqU_Ty1ryhux-CjeUfskqaUpxsw,12967 -werkzeug/middleware/profiler.py,sha256=8B_s23d6BGrU_q54gJsm6kcCbOJbTSqrXCsioHON0Xs,4471 -werkzeug/middleware/proxy_fix.py,sha256=K5oZ3DPXOzdZi0Xba5zW7ClPOxgUuqXHQHvY2-AWCGw,6431 -werkzeug/middleware/shared_data.py,sha256=2V4lLqK9CZLFcxtOAjZE7ZVY4SIU_cKLdKKaMmJPO3o,9581 -werkzeug/posixemulation.py,sha256=gSSiv1SCmOyzOM_nq1ZaZCtxP__C5MeDJl_4yXJmi4Q,3541 -werkzeug/routing.py,sha256=zF5Px2KLYThv634WBZRK3jiOPUQIcRe1iGz0R0eaSzM,79014 -werkzeug/security.py,sha256=81149MplFq7-hD4RK4sKp9kzXXejjV9D4lWBzaRyeQ8,8106 -werkzeug/serving.py,sha256=YvTqvurA-Mnj8mkqRe2kBdVr2ap4ibCq1ByQjOA6g1w,38694 -werkzeug/test.py,sha256=GJ9kxTMSJ-nB7kfGtxuROr9JGmXxDRev-2U1SkeUJGE,39564 -werkzeug/testapp.py,sha256=bHekqMsqRfVxwgFbvOMem-DYa_sdB7R47yUXpt1RUTo,9329 -werkzeug/urls.py,sha256=T8-hV_1vwhu6xhX93FwsHteK-W-kIE2orj5WoMf-WFw,39322 -werkzeug/useragents.py,sha256=UOckz1Ts8buNA_KJvVOT6iqNP5EFl3P5JqlpwiQUI-w,5451 -werkzeug/utils.py,sha256=hrVK4u_wi8z9viBO9bgOLlm1aaIvCpn-p2d1FeZQDEo,25251 -werkzeug/wrappers/__init__.py,sha256=S4VioKAmF_av9Ec9zQvG71X1EOkYfPx1TYck9jyDiyY,1384 -werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/accept.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/auth.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/cors.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/etag.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/json.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/request.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/response.cpython-37.pyc,, -werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc,, -werkzeug/wrappers/accept.py,sha256=TIvjUc0g73fhTWX54wg_D9NNzKvpnG1X8u1w26tK1o8,1760 -werkzeug/wrappers/auth.py,sha256=Pmn6iaGHBrUyHbJpW0lZhO_q9RVoAa5QalaTqcavdAI,1158 -werkzeug/wrappers/base_request.py,sha256=4TuGlKWeKQdlq4eU94hJYcXSfWo8Rk7CS1Ef5lJ3ZM0,26012 -werkzeug/wrappers/base_response.py,sha256=JTxJZ8o-IBetpoWJqt2HFwPaNWNDAlM3_GXJe1Whw80,27784 -werkzeug/wrappers/common_descriptors.py,sha256=X2Ktd5zUWsmcd4ciaF62Dd8Lru9pLGP_XDUNukc8cXs,12829 -werkzeug/wrappers/cors.py,sha256=hwbXEVjiqDT4MybRgstMYQw4NqgiXEiQ9wmlC3sqNA8,3512 -werkzeug/wrappers/etag.py,sha256=XMXtyfByBsOjxwaX8U7ZtUY7JXkbQLP45oXZ0qkyTNs,12217 -werkzeug/wrappers/json.py,sha256=HvK_A4NpO0sLqgb10sTJcoZydYOwyNiPCJPV7SVgcgE,4343 -werkzeug/wrappers/request.py,sha256=QbHGqDpGPN684pnOPEokwkPESfm-NnfYM7ydOMxW_NI,1514 -werkzeug/wrappers/response.py,sha256=Oqv8TMG_dnOKTq_V30ddgkO5B7IJhkVPODvm7cbhZ3c,2524 -werkzeug/wrappers/user_agent.py,sha256=YJb-vr12cujG7sQMG9V89VsJa-03SWSenhg1W4cT0EY,435 -werkzeug/wsgi.py,sha256=ZGk85NzRyQTzkYis-xl8V9ydJgfClBdStvhzDzER2mw,34367 diff --git a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/WHEEL b/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/WHEEL deleted file mode 100644 index 8b701e9..0000000 --- a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/top_level.txt deleted file mode 100644 index 6fe8da8..0000000 --- a/venv/lib/python3.7/site-packages/Werkzeug-1.0.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -werkzeug diff --git a/venv/lib/python3.7/site-packages/__pycache__/console_log.cpython-37.pyc b/venv/lib/python3.7/site-packages/__pycache__/console_log.cpython-37.pyc deleted file mode 100644 index c38aa21..0000000 Binary files a/venv/lib/python3.7/site-packages/__pycache__/console_log.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc b/venv/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc deleted file mode 100644 index 55391d1..0000000 Binary files a/venv/lib/python3.7/site-packages/__pycache__/easy_install.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/__pycache__/editor.cpython-37.pyc b/venv/lib/python3.7/site-packages/__pycache__/editor.cpython-37.pyc deleted file mode 100644 index c481453..0000000 Binary files a/venv/lib/python3.7/site-packages/__pycache__/editor.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/__pycache__/six.cpython-37.pyc b/venv/lib/python3.7/site-packages/__pycache__/six.cpython-37.pyc deleted file mode 100644 index 9d354e8..0000000 Binary files a/venv/lib/python3.7/site-packages/__pycache__/six.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/LICENSE b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/LICENSE deleted file mode 100644 index c286303..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright 2009-2020 Michael Bayer. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/METADATA b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/METADATA deleted file mode 100644 index 552808f..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/METADATA +++ /dev/null @@ -1,138 +0,0 @@ -Metadata-Version: 2.1 -Name: alembic -Version: 1.4.2 -Summary: A database migration tool for SQLAlchemy. -Home-page: https://alembic.sqlalchemy.org -Author: Mike Bayer -Author-email: mike@zzzcomputing.com -License: MIT -Project-URL: Issue Tracker, https://github.com/sqlalchemy/alembic/ -Keywords: SQLAlchemy migrations -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Console -Classifier: License :: OSI Approved :: MIT License -Classifier: Intended Audience :: Developers -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Database :: Front-Ends -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Requires-Dist: SQLAlchemy (>=1.1.0) -Requires-Dist: Mako -Requires-Dist: python-editor (>=0.3) -Requires-Dist: python-dateutil - -Alembic is a database migrations tool written by the author -of `SQLAlchemy `_. A migrations tool -offers the following functionality: - -* Can emit ALTER statements to a database in order to change - the structure of tables and other constructs -* Provides a system whereby "migration scripts" may be constructed; - each script indicates a particular series of steps that can "upgrade" a - target database to a new version, and optionally a series of steps that can - "downgrade" similarly, doing the same steps in reverse. -* Allows the scripts to execute in some sequential manner. - -The goals of Alembic are: - -* Very open ended and transparent configuration and operation. A new - Alembic environment is generated from a set of templates which is selected - among a set of options when setup first occurs. The templates then deposit a - series of scripts that define fully how database connectivity is established - and how migration scripts are invoked; the migration scripts themselves are - generated from a template within that series of scripts. The scripts can - then be further customized to define exactly how databases will be - interacted with and what structure new migration files should take. -* Full support for transactional DDL. The default scripts ensure that all - migrations occur within a transaction - for those databases which support - this (Postgresql, Microsoft SQL Server), migrations can be tested with no - need to manually undo changes upon failure. -* Minimalist script construction. Basic operations like renaming - tables/columns, adding/removing columns, changing column attributes can be - performed through one line commands like alter_column(), rename_table(), - add_constraint(). There is no need to recreate full SQLAlchemy Table - structures for simple operations like these - the functions themselves - generate minimalist schema structures behind the scenes to achieve the given - DDL sequence. -* "auto generation" of migrations. While real world migrations are far more - complex than what can be automatically determined, Alembic can still - eliminate the initial grunt work in generating new migration directives - from an altered schema. The ``--autogenerate`` feature will inspect the - current status of a database using SQLAlchemy's schema inspection - capabilities, compare it to the current state of the database model as - specified in Python, and generate a series of "candidate" migrations, - rendering them into a new migration script as Python directives. The - developer then edits the new file, adding additional directives and data - migrations as needed, to produce a finished migration. Table and column - level changes can be detected, with constraints and indexes to follow as - well. -* Full support for migrations generated as SQL scripts. Those of us who - work in corporate environments know that direct access to DDL commands on a - production database is a rare privilege, and DBAs want textual SQL scripts. - Alembic's usage model and commands are oriented towards being able to run a - series of migrations into a textual output file as easily as it runs them - directly to a database. Care must be taken in this mode to not invoke other - operations that rely upon in-memory SELECTs of rows - Alembic tries to - provide helper constructs like bulk_insert() to help with data-oriented - operations that are compatible with script-based DDL. -* Non-linear, dependency-graph versioning. Scripts are given UUID - identifiers similarly to a DVCS, and the linkage of one script to the next - is achieved via human-editable markers within the scripts themselves. - The structure of a set of migration files is considered as a - directed-acyclic graph, meaning any migration file can be dependent - on any other arbitrary set of migration files, or none at - all. Through this open-ended system, migration files can be organized - into branches, multiple roots, and mergepoints, without restriction. - Commands are provided to produce new branches, roots, and merges of - branches automatically. -* Provide a library of ALTER constructs that can be used by any SQLAlchemy - application. The DDL constructs build upon SQLAlchemy's own DDLElement base - and can be used standalone by any application or script. -* At long last, bring SQLite and its inablity to ALTER things into the fold, - but in such a way that SQLite's very special workflow needs are accommodated - in an explicit way that makes the most of a bad situation, through the - concept of a "batch" migration, where multiple changes to a table can - be batched together to form a series of instructions for a single, subsequent - "move-and-copy" workflow. You can even use "move-and-copy" workflow for - other databases, if you want to recreate a table in the background - on a busy system. - -Documentation and status of Alembic is at https://alembic.sqlalchemy.org/ - -The SQLAlchemy Project -====================== - -Alembic is part of the `SQLAlchemy Project `_ and -adheres to the same standards and conventions as the core project. - -Development / Bug reporting / Pull requests -___________________________________________ - -Please refer to the -`SQLAlchemy Community Guide `_ for -guidelines on coding and participating in this project. - -Code of Conduct -_______________ - -Above all, SQLAlchemy places great emphasis on polite, thoughtful, and -constructive communication between users and developers. -Please see our current Code of Conduct at -`Code of Conduct `_. - -License -======= - -Alembic is distributed under the `MIT license -`_. - - diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/RECORD b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/RECORD deleted file mode 100644 index 1400cbf..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/RECORD +++ /dev/null @@ -1,123 +0,0 @@ -../../../bin/alembic,sha256=VhYgSnMwRngw1J3SuVvszUPA9Z7S5ggue9MFG8vbWrY,237 -alembic-1.4.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -alembic-1.4.2.dist-info/LICENSE,sha256=vEAGUIuUWeKAHsWgwcUTyJRHJWjUmGO9MZ84k2KBhFE,1058 -alembic-1.4.2.dist-info/METADATA,sha256=LaCIAVi-eI4fLOHSaX9v0PJpFyBLdDX2DXPcLS0v9iM,7011 -alembic-1.4.2.dist-info/RECORD,, -alembic-1.4.2.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 -alembic-1.4.2.dist-info/entry_points.txt,sha256=jOSnN_2fhU8xzDQ50rdNr425J8kf_exuY8GrAo1daz8,49 -alembic-1.4.2.dist-info/top_level.txt,sha256=FwKWd5VsPFC8iQjpu1u9Cn-JnK3-V1RhUCmWqz1cl-s,8 -alembic/__init__.py,sha256=DzXW72ndu6EzMX0RlBznxpIQI1TfpxLm7fo6ah6yiKM,249 -alembic/__pycache__/__init__.cpython-37.pyc,, -alembic/__pycache__/command.cpython-37.pyc,, -alembic/__pycache__/config.cpython-37.pyc,, -alembic/__pycache__/context.cpython-37.pyc,, -alembic/__pycache__/op.cpython-37.pyc,, -alembic/autogenerate/__init__.py,sha256=98WZvBQ3k-cfpa1GsVFEq0Kqmzl1-UPYCtau9urpsIQ,431 -alembic/autogenerate/__pycache__/__init__.cpython-37.pyc,, -alembic/autogenerate/__pycache__/api.cpython-37.pyc,, -alembic/autogenerate/__pycache__/compare.cpython-37.pyc,, -alembic/autogenerate/__pycache__/render.cpython-37.pyc,, -alembic/autogenerate/__pycache__/rewriter.cpython-37.pyc,, -alembic/autogenerate/api.py,sha256=Bg1pnUM9qpSGk6sxGMaYCSME8RaRHz3K4mb0Qar4o6A,17027 -alembic/autogenerate/compare.py,sha256=cRBt7ZBFRiRjhvo0pA3XvuUw0YOdmZwy_2k_pJoZaL0,39009 -alembic/autogenerate/render.py,sha256=m8vE8w1XKI9aoaq6cmbzO0pWh2wgnKGY5E6M0Y8YYq4,29117 -alembic/autogenerate/rewriter.py,sha256=rERJzPZtzzs9rHA6XLz8M5M9PVEHFBrMZ2YZBO09f6E,5721 -alembic/command.py,sha256=C_nGc-Ay0mkKQA3Po4BgPygUS8Po87kap6-YRLgsdLM,18129 -alembic/config.py,sha256=cTJTxr1buPkVLap32k-mnu8ccd4s5so0dL4YKaZVi5w,19865 -alembic/context.py,sha256=hK1AJOQXJ29Bhn276GYcosxeG7pC5aZRT5E8c4bMJ4Q,195 -alembic/ddl/__init__.py,sha256=7cwkSz69tWKVbUxbHpE0SDOYUgbxSBkVIHHTyJ1O7V8,185 -alembic/ddl/__pycache__/__init__.cpython-37.pyc,, -alembic/ddl/__pycache__/base.cpython-37.pyc,, -alembic/ddl/__pycache__/impl.cpython-37.pyc,, -alembic/ddl/__pycache__/mssql.cpython-37.pyc,, -alembic/ddl/__pycache__/mysql.cpython-37.pyc,, -alembic/ddl/__pycache__/oracle.cpython-37.pyc,, -alembic/ddl/__pycache__/postgresql.cpython-37.pyc,, -alembic/ddl/__pycache__/sqlite.cpython-37.pyc,, -alembic/ddl/base.py,sha256=tjjyvaxbgY2q5POm8MEEqGMIdQHaUjBS3dAkN5qVFrQ,6812 -alembic/ddl/impl.py,sha256=44AQywPLQl3cKS7bUQLw1PpDX05sIKizQHNpk-FpFpg,16557 -alembic/ddl/mssql.py,sha256=7iSSBE2izonS7GWKbB2qd0OprD35oBRSIEJIxY-iRyc,9361 -alembic/ddl/mysql.py,sha256=VPU7BoxUFh-kZcxFkHsPO17On-ijiQsvsiEQg3Ckj34,13911 -alembic/ddl/oracle.py,sha256=RIaGxJV0Gp58thdrXW9UqH-4jYke-RKmHj7nGc9396A,3810 -alembic/ddl/postgresql.py,sha256=bKzEZOGYJ8Lcd5bob_HMNlo6xnkZCosQglC2iGIZFwM,17602 -alembic/ddl/sqlite.py,sha256=w1pcH1CZgDfDIu9TJAnZezF8TPpuY1lDEttJ4_dyJmU,4700 -alembic/op.py,sha256=flHtcsVqOD-ZgZKK2pv-CJ5Cwh-KJ7puMUNXzishxLw,167 -alembic/operations/__init__.py,sha256=nJbmMAwapU2py4fJ4GUBanBp-EMXhDyMngb717NIHM8,192 -alembic/operations/__pycache__/__init__.cpython-37.pyc,, -alembic/operations/__pycache__/base.cpython-37.pyc,, -alembic/operations/__pycache__/batch.cpython-37.pyc,, -alembic/operations/__pycache__/ops.cpython-37.pyc,, -alembic/operations/__pycache__/schemaobj.cpython-37.pyc,, -alembic/operations/__pycache__/toimpl.cpython-37.pyc,, -alembic/operations/base.py,sha256=6C9hWmpdYfVMnCzV_1AqUvtZ-Ixfx_5u8mBw2gxjNdM,17999 -alembic/operations/batch.py,sha256=hbbMe5bYdM6G2f9b_8AUlJkuKccFUbGZcLrVDp8n8LM,19357 -alembic/operations/ops.py,sha256=LYLcaz_dVGmhYhUl7wJ3FLU8SD_Af-rzkJbiM-fS0JY,83044 -alembic/operations/schemaobj.py,sha256=hFUtUzV-kSEq9TnXqH6IygM-aCcB5VjIpUm0kAKYGE4,5736 -alembic/operations/toimpl.py,sha256=p4CuVYHKeJiUMboY_uxG1_A8lXCTiEjjRCKXoCuNr1M,5716 -alembic/runtime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -alembic/runtime/__pycache__/__init__.cpython-37.pyc,, -alembic/runtime/__pycache__/environment.cpython-37.pyc,, -alembic/runtime/__pycache__/migration.cpython-37.pyc,, -alembic/runtime/environment.py,sha256=l1EgAiKX9kO7mam9wrgpQ90X1qGWCuC4t3xOCfYN7bU,37862 -alembic/runtime/migration.py,sha256=wHtXMMrHV76agAVWVip3g2wlUfo5bBbgDMN3ak7NfqE,42389 -alembic/script/__init__.py,sha256=SxmoPlnSDLFW_9p-OTG-yaev76Ok7TiyN6u2TAKczkU,116 -alembic/script/__pycache__/__init__.cpython-37.pyc,, -alembic/script/__pycache__/base.cpython-37.pyc,, -alembic/script/__pycache__/revision.cpython-37.pyc,, -alembic/script/__pycache__/write_hooks.cpython-37.pyc,, -alembic/script/base.py,sha256=WdY4-7RottWuwv07AcOB0xZMpcMWfy7isq6kqmRlpcI,31774 -alembic/script/revision.py,sha256=v_eTBexwhZJYUrlSh_MH97fXBWh1yZBcO25ganPQLLY,34453 -alembic/script/write_hooks.py,sha256=0heAelcqI2n4WW440Mz1cqPkHUNvfuS-meMzpiMBDd4,2768 -alembic/templates/generic/README,sha256=MVlc9TYmr57RbhXET6QxgyCcwWP7w-vLkEsirENqiIQ,38 -alembic/templates/generic/__pycache__/env.cpython-37.pyc,, -alembic/templates/generic/alembic.ini.mako,sha256=wexLbI6y199q77PCKJADZAmUIXUhGBhlYUMt-hzzwQ0,2044 -alembic/templates/generic/env.py,sha256=LN06WkE0nj1RGL--SbzYfx4DRopSl5g2Ihu6knUJMZQ,2039 -alembic/templates/generic/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494 -alembic/templates/multidb/README,sha256=c7CNHkVvVSJsGZ75Qlcuo1nXKQITDu0W3hSULyz1pWg,41 -alembic/templates/multidb/__pycache__/env.cpython-37.pyc,, -alembic/templates/multidb/alembic.ini.mako,sha256=1ci-FAmvH99PCOcWwq-D8fWs4Hx2XxMQPr-2VWe2BQI,2138 -alembic/templates/multidb/env.py,sha256=kDOOlbzDLc1nhFbsKJ-bclZ0mpEjLMeJdS9Gu_H5zWo,4162 -alembic/templates/multidb/script.py.mako,sha256=k09J7yYXfXFyedV6D5VgJzuPQxPnYKxID0huIabH46w,923 -alembic/templates/pylons/README,sha256=gr4MQnn_ScvV_kasPpXgo6ntAtcIWmOlga9vURbgUwI,59 -alembic/templates/pylons/__pycache__/env.cpython-37.pyc,, -alembic/templates/pylons/alembic.ini.mako,sha256=i-NMyRa8jZoX89ra-pDHdXQzsB3q7YvQRDFTaz6z2Jw,1523 -alembic/templates/pylons/env.py,sha256=VZyuB0IllLLu1i8jec_-Wj5Ry7D0KgOr8cSISLYFK_8,2245 -alembic/templates/pylons/script.py.mako,sha256=8_xgA-gm_OhehnO7CiIijWgnm00ZlszEHtIHrAYFJl0,494 -alembic/testing/__init__.py,sha256=sWNfwWdj_gDR2nk08lA8zZtsqLbfYMPT25W4JJu-gIY,1065 -alembic/testing/__pycache__/__init__.cpython-37.pyc,, -alembic/testing/__pycache__/assertions.cpython-37.pyc,, -alembic/testing/__pycache__/env.cpython-37.pyc,, -alembic/testing/__pycache__/exclusions.cpython-37.pyc,, -alembic/testing/__pycache__/fixture_functions.cpython-37.pyc,, -alembic/testing/__pycache__/fixtures.cpython-37.pyc,, -alembic/testing/__pycache__/requirements.cpython-37.pyc,, -alembic/testing/__pycache__/util.cpython-37.pyc,, -alembic/testing/assertions.py,sha256=zgQkrDYUPFixItL3hWr0OmNMDZyKY0lZUTsy9xQ1nzc,3292 -alembic/testing/env.py,sha256=A--5nnemVi8mmimkNxF_d2dqBjGl2IOVRlUH3JJvWZI,10250 -alembic/testing/exclusions.py,sha256=ppAglvtwGg442E0F6TOmuuTgLT_NkWs3mbQ3Zv8uPeU,14289 -alembic/testing/fixture_functions.py,sha256=FOhaHKaDtNlItGIyOG6uVGB2pj6Aoi4Qlr9H-xPPF1Y,2952 -alembic/testing/fixtures.py,sha256=Q5XxgJAeWXEhCcBmaYRndTG89ZZj4jo0Qu8dn1TMTRM,7869 -alembic/testing/plugin/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -alembic/testing/plugin/__pycache__/__init__.cpython-37.pyc,, -alembic/testing/plugin/__pycache__/bootstrap.cpython-37.pyc,, -alembic/testing/plugin/__pycache__/plugin_base.cpython-37.pyc,, -alembic/testing/plugin/__pycache__/pytestplugin.cpython-37.pyc,, -alembic/testing/plugin/bootstrap.py,sha256=5sN6F4OJ3UUNVUeNBzY-MjN_r3tOH5NrGx4maJuRC9U,857 -alembic/testing/plugin/plugin_base.py,sha256=Hcs-ODRbzUSFvtVQ7eiL_d6bXbGeLut0-Y2XQAEwIMw,3080 -alembic/testing/plugin/pytestplugin.py,sha256=1MF5hq73_Xiu97sVMuuS-uex0mSvvhtFZh_Io6QJnkM,7529 -alembic/testing/requirements.py,sha256=dZUI-PhV2TOPuxFZQEUAAivXynitjWgCkJIipjGz-1w,4136 -alembic/testing/util.py,sha256=sA_4b_EQ0gfz2BozAw-eNYanJYBQh8oO66iLMVIs7tw,2577 -alembic/util/__init__.py,sha256=HCPvV0cpyhdIgyVRKDuYIP_S9EJE0adE97d92zW81p8,1473 -alembic/util/__pycache__/__init__.cpython-37.pyc,, -alembic/util/__pycache__/compat.cpython-37.pyc,, -alembic/util/__pycache__/exc.cpython-37.pyc,, -alembic/util/__pycache__/langhelpers.cpython-37.pyc,, -alembic/util/__pycache__/messaging.cpython-37.pyc,, -alembic/util/__pycache__/pyfiles.cpython-37.pyc,, -alembic/util/__pycache__/sqla_compat.cpython-37.pyc,, -alembic/util/compat.py,sha256=knJEgDHsBcUDzotYEvg23V0Gj2ZvS8i0fj8IpG4kBeM,10563 -alembic/util/exc.py,sha256=GBd-Fw-pvtsUNg6wrub7yhY2venv1MD1eMuJZebJiMY,40 -alembic/util/langhelpers.py,sha256=BXkBYZQxh96Jb2B3GKlZvnhJ0bBVRujatLosBlVeZmk,9246 -alembic/util/messaging.py,sha256=r8nAmwzbvGmO8Rtdxq-a6EJdEJEG1zP4l0eUEMTyItM,2633 -alembic/util/pyfiles.py,sha256=mpcMVT6fVcTSLFxB2fWD6RSExMW6jkv4p5FWDDF7yJs,3028 -alembic/util/sqla_compat.py,sha256=czTo3m1Mrs51BQOQCcKhBhsh9ATIZMplfjqAjoWFp1g,8718 diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/WHEEL b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/WHEEL deleted file mode 100644 index 8b701e9..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/entry_points.txt b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/entry_points.txt deleted file mode 100644 index 27ac374..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[console_scripts] -alembic = alembic.config:main - diff --git a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/top_level.txt deleted file mode 100644 index b5bd98d..0000000 --- a/venv/lib/python3.7/site-packages/alembic-1.4.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -alembic diff --git a/venv/lib/python3.7/site-packages/alembic/__init__.py b/venv/lib/python3.7/site-packages/alembic/__init__.py deleted file mode 100644 index 3bcbd64..0000000 --- a/venv/lib/python3.7/site-packages/alembic/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -import sys - -from . import context # noqa -from . import op # noqa -from .runtime import environment -from .runtime import migration - -__version__ = "1.4.2" - -sys.modules["alembic.migration"] = migration -sys.modules["alembic.environment"] = environment diff --git a/venv/lib/python3.7/site-packages/alembic/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 4f6900e..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/__pycache__/command.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/__pycache__/command.cpython-37.pyc deleted file mode 100644 index 7e14a68..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/__pycache__/command.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/__pycache__/config.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/__pycache__/config.cpython-37.pyc deleted file mode 100644 index 38d7149..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/__pycache__/config.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/__pycache__/context.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/__pycache__/context.cpython-37.pyc deleted file mode 100644 index 324c415..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/__pycache__/context.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/__pycache__/op.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/__pycache__/op.cpython-37.pyc deleted file mode 100644 index e8daa77..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/__pycache__/op.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/__init__.py b/venv/lib/python3.7/site-packages/alembic/autogenerate/__init__.py deleted file mode 100644 index a0f8ec2..0000000 --- a/venv/lib/python3.7/site-packages/alembic/autogenerate/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .api import _render_migration_diffs # noqa -from .api import compare_metadata # noqa -from .api import produce_migrations # noqa -from .api import render_python_code # noqa -from .api import RevisionContext # noqa -from .compare import _produce_net_changes # noqa -from .compare import comparators # noqa -from .render import render_op_text # noqa -from .render import renderers # noqa -from .rewriter import Rewriter # noqa diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 7d2ed9f..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/api.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/api.cpython-37.pyc deleted file mode 100644 index 58d9dbe..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/api.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/compare.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/compare.cpython-37.pyc deleted file mode 100644 index 1573a5b..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/compare.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/render.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/render.cpython-37.pyc deleted file mode 100644 index a65dc28..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/render.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/rewriter.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/rewriter.cpython-37.pyc deleted file mode 100644 index 3a94a60..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/autogenerate/__pycache__/rewriter.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/api.py b/venv/lib/python3.7/site-packages/alembic/autogenerate/api.py deleted file mode 100644 index 2902f80..0000000 --- a/venv/lib/python3.7/site-packages/alembic/autogenerate/api.py +++ /dev/null @@ -1,513 +0,0 @@ -"""Provide the 'autogenerate' feature which can produce migration operations -automatically.""" - -import contextlib - -from sqlalchemy import inspect - -from . import compare -from . import render -from .. import util -from ..operations import ops - - -def compare_metadata(context, metadata): - """Compare a database schema to that given in a - :class:`~sqlalchemy.schema.MetaData` instance. - - The database connection is presented in the context - of a :class:`.MigrationContext` object, which - provides database connectivity as well as optional - comparison functions to use for datatypes and - server defaults - see the "autogenerate" arguments - at :meth:`.EnvironmentContext.configure` - for details on these. - - The return format is a list of "diff" directives, - each representing individual differences:: - - from alembic.migration import MigrationContext - from alembic.autogenerate import compare_metadata - from sqlalchemy.schema import SchemaItem - from sqlalchemy.types import TypeEngine - from sqlalchemy import (create_engine, MetaData, Column, - Integer, String, Table) - import pprint - - engine = create_engine("sqlite://") - - engine.execute(''' - create table foo ( - id integer not null primary key, - old_data varchar, - x integer - )''') - - engine.execute(''' - create table bar ( - data varchar - )''') - - metadata = MetaData() - Table('foo', metadata, - Column('id', Integer, primary_key=True), - Column('data', Integer), - Column('x', Integer, nullable=False) - ) - Table('bat', metadata, - Column('info', String) - ) - - mc = MigrationContext.configure(engine.connect()) - - diff = compare_metadata(mc, metadata) - pprint.pprint(diff, indent=2, width=20) - - Output:: - - [ ( 'add_table', - Table('bat', MetaData(bind=None), - Column('info', String(), table=), schema=None)), - ( 'remove_table', - Table(u'bar', MetaData(bind=None), - Column(u'data', VARCHAR(), table=), schema=None)), - ( 'add_column', - None, - 'foo', - Column('data', Integer(), table=)), - ( 'remove_column', - None, - 'foo', - Column(u'old_data', VARCHAR(), table=None)), - [ ( 'modify_nullable', - None, - 'foo', - u'x', - { 'existing_server_default': None, - 'existing_type': INTEGER()}, - True, - False)]] - - - :param context: a :class:`.MigrationContext` - instance. - :param metadata: a :class:`~sqlalchemy.schema.MetaData` - instance. - - .. seealso:: - - :func:`.produce_migrations` - produces a :class:`.MigrationScript` - structure based on metadata comparison. - - """ - - migration_script = produce_migrations(context, metadata) - return migration_script.upgrade_ops.as_diffs() - - -def produce_migrations(context, metadata): - """Produce a :class:`.MigrationScript` structure based on schema - comparison. - - This function does essentially what :func:`.compare_metadata` does, - but then runs the resulting list of diffs to produce the full - :class:`.MigrationScript` object. For an example of what this looks like, - see the example in :ref:`customizing_revision`. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :func:`.compare_metadata` - returns more fundamental "diff" - data from comparing a schema. - - """ - - autogen_context = AutogenContext(context, metadata=metadata) - - migration_script = ops.MigrationScript( - rev_id=None, - upgrade_ops=ops.UpgradeOps([]), - downgrade_ops=ops.DowngradeOps([]), - ) - - compare._populate_migration_script(autogen_context, migration_script) - - return migration_script - - -def render_python_code( - up_or_down_op, - sqlalchemy_module_prefix="sa.", - alembic_module_prefix="op.", - render_as_batch=False, - imports=(), - render_item=None, - migration_context=None, -): - """Render Python code given an :class:`.UpgradeOps` or - :class:`.DowngradeOps` object. - - This is a convenience function that can be used to test the - autogenerate output of a user-defined :class:`.MigrationScript` structure. - - """ - opts = { - "sqlalchemy_module_prefix": sqlalchemy_module_prefix, - "alembic_module_prefix": alembic_module_prefix, - "render_item": render_item, - "render_as_batch": render_as_batch, - } - - if migration_context is None: - from ..runtime.migration import MigrationContext - from sqlalchemy.engine.default import DefaultDialect - - migration_context = MigrationContext.configure( - dialect=DefaultDialect() - ) - - autogen_context = AutogenContext(migration_context, opts=opts) - autogen_context.imports = set(imports) - return render._indent( - render._render_cmd_body(up_or_down_op, autogen_context) - ) - - -def _render_migration_diffs(context, template_args): - """legacy, used by test_autogen_composition at the moment""" - - autogen_context = AutogenContext(context) - - upgrade_ops = ops.UpgradeOps([]) - compare._produce_net_changes(autogen_context, upgrade_ops) - - migration_script = ops.MigrationScript( - rev_id=None, - upgrade_ops=upgrade_ops, - downgrade_ops=upgrade_ops.reverse(), - ) - - render._render_python_into_templatevars( - autogen_context, migration_script, template_args - ) - - -class AutogenContext(object): - """Maintains configuration and state that's specific to an - autogenerate operation.""" - - metadata = None - """The :class:`~sqlalchemy.schema.MetaData` object - representing the destination. - - This object is the one that is passed within ``env.py`` - to the :paramref:`.EnvironmentContext.configure.target_metadata` - parameter. It represents the structure of :class:`.Table` and other - objects as stated in the current database model, and represents the - destination structure for the database being examined. - - While the :class:`~sqlalchemy.schema.MetaData` object is primarily - known as a collection of :class:`~sqlalchemy.schema.Table` objects, - it also has an :attr:`~sqlalchemy.schema.MetaData.info` dictionary - that may be used by end-user schemes to store additional schema-level - objects that are to be compared in custom autogeneration schemes. - - """ - - connection = None - """The :class:`~sqlalchemy.engine.base.Connection` object currently - connected to the database backend being compared. - - This is obtained from the :attr:`.MigrationContext.bind` and is - utimately set up in the ``env.py`` script. - - """ - - dialect = None - """The :class:`~sqlalchemy.engine.Dialect` object currently in use. - - This is normally obtained from the - :attr:`~sqlalchemy.engine.base.Connection.dialect` attribute. - - """ - - imports = None - """A ``set()`` which contains string Python import directives. - - The directives are to be rendered into the ``${imports}`` section - of a script template. The set is normally empty and can be modified - within hooks such as the - :paramref:`.EnvironmentContext.configure.render_item` hook. - - .. versionadded:: 0.8.3 - - .. seealso:: - - :ref:`autogen_render_types` - - """ - - migration_context = None - """The :class:`.MigrationContext` established by the ``env.py`` script.""" - - def __init__( - self, migration_context, metadata=None, opts=None, autogenerate=True - ): - - if ( - autogenerate - and migration_context is not None - and migration_context.as_sql - ): - raise util.CommandError( - "autogenerate can't use as_sql=True as it prevents querying " - "the database for schema information" - ) - - if opts is None: - opts = migration_context.opts - - self.metadata = metadata = ( - opts.get("target_metadata", None) if metadata is None else metadata - ) - - if ( - autogenerate - and metadata is None - and migration_context is not None - and migration_context.script is not None - ): - raise util.CommandError( - "Can't proceed with --autogenerate option; environment " - "script %s does not provide " - "a MetaData object or sequence of objects to the context." - % (migration_context.script.env_py_location) - ) - - include_symbol = opts.get("include_symbol", None) - include_object = opts.get("include_object", None) - - object_filters = [] - if include_symbol: - - def include_symbol_filter( - object_, name, type_, reflected, compare_to - ): - if type_ == "table": - return include_symbol(name, object_.schema) - else: - return True - - object_filters.append(include_symbol_filter) - if include_object: - object_filters.append(include_object) - - self._object_filters = object_filters - - self.migration_context = migration_context - if self.migration_context is not None: - self.connection = self.migration_context.bind - self.dialect = self.migration_context.dialect - - self.imports = set() - self.opts = opts - self._has_batch = False - - @util.memoized_property - def inspector(self): - return inspect(self.connection) - - @contextlib.contextmanager - def _within_batch(self): - self._has_batch = True - yield - self._has_batch = False - - def run_filters(self, object_, name, type_, reflected, compare_to): - """Run the context's object filters and return True if the targets - should be part of the autogenerate operation. - - This method should be run for every kind of object encountered within - an autogenerate operation, giving the environment the chance - to filter what objects should be included in the comparison. - The filters here are produced directly via the - :paramref:`.EnvironmentContext.configure.include_object` - and :paramref:`.EnvironmentContext.configure.include_symbol` - functions, if present. - - """ - for fn in self._object_filters: - if not fn(object_, name, type_, reflected, compare_to): - return False - else: - return True - - @util.memoized_property - def sorted_tables(self): - """Return an aggregate of the :attr:`.MetaData.sorted_tables` collection(s). - - For a sequence of :class:`.MetaData` objects, this - concatenates the :attr:`.MetaData.sorted_tables` collection - for each individual :class:`.MetaData` in the order of the - sequence. It does **not** collate the sorted tables collections. - - .. versionadded:: 0.9.0 - - """ - result = [] - for m in util.to_list(self.metadata): - result.extend(m.sorted_tables) - return result - - @util.memoized_property - def table_key_to_table(self): - """Return an aggregate of the :attr:`.MetaData.tables` dictionaries. - - The :attr:`.MetaData.tables` collection is a dictionary of table key - to :class:`.Table`; this method aggregates the dictionary across - multiple :class:`.MetaData` objects into one dictionary. - - Duplicate table keys are **not** supported; if two :class:`.MetaData` - objects contain the same table key, an exception is raised. - - .. versionadded:: 0.9.0 - - """ - result = {} - for m in util.to_list(self.metadata): - intersect = set(result).intersection(set(m.tables)) - if intersect: - raise ValueError( - "Duplicate table keys across multiple " - "MetaData objects: %s" - % (", ".join('"%s"' % key for key in sorted(intersect))) - ) - - result.update(m.tables) - return result - - -class RevisionContext(object): - """Maintains configuration and state that's specific to a revision - file generation operation.""" - - def __init__( - self, - config, - script_directory, - command_args, - process_revision_directives=None, - ): - self.config = config - self.script_directory = script_directory - self.command_args = command_args - self.process_revision_directives = process_revision_directives - self.template_args = { - "config": config # Let templates use config for - # e.g. multiple databases - } - self.generated_revisions = [self._default_revision()] - - def _to_script(self, migration_script): - template_args = {} - for k, v in self.template_args.items(): - template_args.setdefault(k, v) - - if getattr(migration_script, "_needs_render", False): - autogen_context = self._last_autogen_context - - # clear out existing imports if we are doing multiple - # renders - autogen_context.imports = set() - if migration_script.imports: - autogen_context.imports.update(migration_script.imports) - render._render_python_into_templatevars( - autogen_context, migration_script, template_args - ) - - return self.script_directory.generate_revision( - migration_script.rev_id, - migration_script.message, - refresh=True, - head=migration_script.head, - splice=migration_script.splice, - branch_labels=migration_script.branch_label, - version_path=migration_script.version_path, - depends_on=migration_script.depends_on, - **template_args - ) - - def run_autogenerate(self, rev, migration_context): - self._run_environment(rev, migration_context, True) - - def run_no_autogenerate(self, rev, migration_context): - self._run_environment(rev, migration_context, False) - - def _run_environment(self, rev, migration_context, autogenerate): - if autogenerate: - if self.command_args["sql"]: - raise util.CommandError( - "Using --sql with --autogenerate does not make any sense" - ) - if set(self.script_directory.get_revisions(rev)) != set( - self.script_directory.get_revisions("heads") - ): - raise util.CommandError("Target database is not up to date.") - - upgrade_token = migration_context.opts["upgrade_token"] - downgrade_token = migration_context.opts["downgrade_token"] - - migration_script = self.generated_revisions[-1] - if not getattr(migration_script, "_needs_render", False): - migration_script.upgrade_ops_list[-1].upgrade_token = upgrade_token - migration_script.downgrade_ops_list[ - -1 - ].downgrade_token = downgrade_token - migration_script._needs_render = True - else: - migration_script._upgrade_ops.append( - ops.UpgradeOps([], upgrade_token=upgrade_token) - ) - migration_script._downgrade_ops.append( - ops.DowngradeOps([], downgrade_token=downgrade_token) - ) - - self._last_autogen_context = autogen_context = AutogenContext( - migration_context, autogenerate=autogenerate - ) - - if autogenerate: - compare._populate_migration_script( - autogen_context, migration_script - ) - - if self.process_revision_directives: - self.process_revision_directives( - migration_context, rev, self.generated_revisions - ) - - hook = migration_context.opts["process_revision_directives"] - if hook: - hook(migration_context, rev, self.generated_revisions) - - for migration_script in self.generated_revisions: - migration_script._needs_render = True - - def _default_revision(self): - op = ops.MigrationScript( - rev_id=self.command_args["rev_id"] or util.rev_id(), - message=self.command_args["message"], - upgrade_ops=ops.UpgradeOps([]), - downgrade_ops=ops.DowngradeOps([]), - head=self.command_args["head"], - splice=self.command_args["splice"], - branch_label=self.command_args["branch_label"], - version_path=self.command_args["version_path"], - depends_on=self.command_args["depends_on"], - ) - return op - - def generate_scripts(self): - for generated_revision in self.generated_revisions: - yield self._to_script(generated_revision) diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/compare.py b/venv/lib/python3.7/site-packages/alembic/autogenerate/compare.py deleted file mode 100644 index 1b8fb52..0000000 --- a/venv/lib/python3.7/site-packages/alembic/autogenerate/compare.py +++ /dev/null @@ -1,1209 +0,0 @@ -import contextlib -import logging -import re - -from sqlalchemy import event -from sqlalchemy import inspect -from sqlalchemy import schema as sa_schema -from sqlalchemy import types as sqltypes -from sqlalchemy.util import OrderedSet - -from alembic.ddl.base import _fk_spec -from .render import _user_defined_render -from .. import util -from ..operations import ops -from ..util import compat -from ..util import sqla_compat - -log = logging.getLogger(__name__) - - -def _populate_migration_script(autogen_context, migration_script): - upgrade_ops = migration_script.upgrade_ops_list[-1] - downgrade_ops = migration_script.downgrade_ops_list[-1] - - _produce_net_changes(autogen_context, upgrade_ops) - upgrade_ops.reverse_into(downgrade_ops) - - -comparators = util.Dispatcher(uselist=True) - - -def _produce_net_changes(autogen_context, upgrade_ops): - - connection = autogen_context.connection - include_schemas = autogen_context.opts.get("include_schemas", False) - - inspector = inspect(connection) - - default_schema = connection.dialect.default_schema_name - if include_schemas: - schemas = set(inspector.get_schema_names()) - # replace default schema name with None - schemas.discard("information_schema") - # replace the "default" schema with None - schemas.discard(default_schema) - schemas.add(None) - else: - schemas = [None] - - comparators.dispatch("schema", autogen_context.dialect.name)( - autogen_context, upgrade_ops, schemas - ) - - -@comparators.dispatch_for("schema") -def _autogen_for_tables(autogen_context, upgrade_ops, schemas): - inspector = autogen_context.inspector - - conn_table_names = set() - - version_table_schema = ( - autogen_context.migration_context.version_table_schema - ) - version_table = autogen_context.migration_context.version_table - - for s in schemas: - tables = set(inspector.get_table_names(schema=s)) - if s == version_table_schema: - tables = tables.difference( - [autogen_context.migration_context.version_table] - ) - conn_table_names.update(zip([s] * len(tables), tables)) - - metadata_table_names = OrderedSet( - [(table.schema, table.name) for table in autogen_context.sorted_tables] - ).difference([(version_table_schema, version_table)]) - - _compare_tables( - conn_table_names, - metadata_table_names, - inspector, - upgrade_ops, - autogen_context, - ) - - -def _compare_tables( - conn_table_names, - metadata_table_names, - inspector, - upgrade_ops, - autogen_context, -): - - default_schema = inspector.bind.dialect.default_schema_name - - # tables coming from the connection will not have "schema" - # set if it matches default_schema_name; so we need a list - # of table names from local metadata that also have "None" if schema - # == default_schema_name. Most setups will be like this anyway but - # some are not (see #170) - metadata_table_names_no_dflt_schema = OrderedSet( - [ - (schema if schema != default_schema else None, tname) - for schema, tname in metadata_table_names - ] - ) - - # to adjust for the MetaData collection storing the tables either - # as "schemaname.tablename" or just "tablename", create a new lookup - # which will match the "non-default-schema" keys to the Table object. - tname_to_table = dict( - ( - no_dflt_schema, - autogen_context.table_key_to_table[ - sa_schema._get_table_key(tname, schema) - ], - ) - for no_dflt_schema, (schema, tname) in zip( - metadata_table_names_no_dflt_schema, metadata_table_names - ) - ) - metadata_table_names = metadata_table_names_no_dflt_schema - - for s, tname in metadata_table_names.difference(conn_table_names): - name = "%s.%s" % (s, tname) if s else tname - metadata_table = tname_to_table[(s, tname)] - if autogen_context.run_filters( - metadata_table, tname, "table", False, None - ): - upgrade_ops.ops.append( - ops.CreateTableOp.from_table(metadata_table) - ) - log.info("Detected added table %r", name) - modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) - - comparators.dispatch("table")( - autogen_context, - modify_table_ops, - s, - tname, - None, - metadata_table, - ) - if not modify_table_ops.is_empty(): - upgrade_ops.ops.append(modify_table_ops) - - removal_metadata = sa_schema.MetaData() - for s, tname in conn_table_names.difference(metadata_table_names): - name = sa_schema._get_table_key(tname, s) - exists = name in removal_metadata.tables - t = sa_schema.Table(tname, removal_metadata, schema=s) - - if not exists: - event.listen( - t, - "column_reflect", - # fmt: off - autogen_context.migration_context.impl. - _compat_autogen_column_reflect - (inspector), - # fmt: on - ) - inspector.reflecttable(t, None) - if autogen_context.run_filters(t, tname, "table", True, None): - - modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) - - comparators.dispatch("table")( - autogen_context, modify_table_ops, s, tname, t, None - ) - if not modify_table_ops.is_empty(): - upgrade_ops.ops.append(modify_table_ops) - - upgrade_ops.ops.append(ops.DropTableOp.from_table(t)) - log.info("Detected removed table %r", name) - - existing_tables = conn_table_names.intersection(metadata_table_names) - - existing_metadata = sa_schema.MetaData() - conn_column_info = {} - for s, tname in existing_tables: - name = sa_schema._get_table_key(tname, s) - exists = name in existing_metadata.tables - t = sa_schema.Table(tname, existing_metadata, schema=s) - if not exists: - event.listen( - t, - "column_reflect", - # fmt: off - autogen_context.migration_context.impl. - _compat_autogen_column_reflect(inspector), - # fmt: on - ) - inspector.reflecttable(t, None) - conn_column_info[(s, tname)] = t - - for s, tname in sorted(existing_tables, key=lambda x: (x[0] or "", x[1])): - s = s or None - name = "%s.%s" % (s, tname) if s else tname - metadata_table = tname_to_table[(s, tname)] - conn_table = existing_metadata.tables[name] - - if autogen_context.run_filters( - metadata_table, tname, "table", False, conn_table - ): - - modify_table_ops = ops.ModifyTableOps(tname, [], schema=s) - with _compare_columns( - s, - tname, - conn_table, - metadata_table, - modify_table_ops, - autogen_context, - inspector, - ): - - comparators.dispatch("table")( - autogen_context, - modify_table_ops, - s, - tname, - conn_table, - metadata_table, - ) - - if not modify_table_ops.is_empty(): - upgrade_ops.ops.append(modify_table_ops) - - -def _make_index(params, conn_table): - ix = sa_schema.Index( - params["name"], - *[conn_table.c[cname] for cname in params["column_names"]], - unique=params["unique"] - ) - if "duplicates_constraint" in params: - ix.info["duplicates_constraint"] = params["duplicates_constraint"] - return ix - - -def _make_unique_constraint(params, conn_table): - uq = sa_schema.UniqueConstraint( - *[conn_table.c[cname] for cname in params["column_names"]], - name=params["name"] - ) - if "duplicates_index" in params: - uq.info["duplicates_index"] = params["duplicates_index"] - - return uq - - -def _make_foreign_key(params, conn_table): - tname = params["referred_table"] - if params["referred_schema"]: - tname = "%s.%s" % (params["referred_schema"], tname) - - options = params.get("options", {}) - - const = sa_schema.ForeignKeyConstraint( - [conn_table.c[cname] for cname in params["constrained_columns"]], - ["%s.%s" % (tname, n) for n in params["referred_columns"]], - onupdate=options.get("onupdate"), - ondelete=options.get("ondelete"), - deferrable=options.get("deferrable"), - initially=options.get("initially"), - name=params["name"], - ) - # needed by 0.7 - conn_table.append_constraint(const) - return const - - -@contextlib.contextmanager -def _compare_columns( - schema, - tname, - conn_table, - metadata_table, - modify_table_ops, - autogen_context, - inspector, -): - name = "%s.%s" % (schema, tname) if schema else tname - metadata_cols_by_name = dict( - (c.name, c) for c in metadata_table.c if not c.system - ) - conn_col_names = dict((c.name, c) for c in conn_table.c) - metadata_col_names = OrderedSet(sorted(metadata_cols_by_name)) - - for cname in metadata_col_names.difference(conn_col_names): - if autogen_context.run_filters( - metadata_cols_by_name[cname], cname, "column", False, None - ): - modify_table_ops.ops.append( - ops.AddColumnOp.from_column_and_tablename( - schema, tname, metadata_cols_by_name[cname] - ) - ) - log.info("Detected added column '%s.%s'", name, cname) - - for colname in metadata_col_names.intersection(conn_col_names): - metadata_col = metadata_cols_by_name[colname] - conn_col = conn_table.c[colname] - if not autogen_context.run_filters( - metadata_col, colname, "column", False, conn_col - ): - continue - alter_column_op = ops.AlterColumnOp(tname, colname, schema=schema) - - comparators.dispatch("column")( - autogen_context, - alter_column_op, - schema, - tname, - colname, - conn_col, - metadata_col, - ) - - if alter_column_op.has_changes(): - modify_table_ops.ops.append(alter_column_op) - - yield - - for cname in set(conn_col_names).difference(metadata_col_names): - if autogen_context.run_filters( - conn_table.c[cname], cname, "column", True, None - ): - modify_table_ops.ops.append( - ops.DropColumnOp.from_column_and_tablename( - schema, tname, conn_table.c[cname] - ) - ) - log.info("Detected removed column '%s.%s'", name, cname) - - -class _constraint_sig(object): - def md_name_to_sql_name(self, context): - return sqla_compat._get_constraint_final_name( - self.const, context.dialect - ) - - def __eq__(self, other): - return self.const == other.const - - def __ne__(self, other): - return self.const != other.const - - def __hash__(self): - return hash(self.const) - - -class _uq_constraint_sig(_constraint_sig): - is_index = False - is_unique = True - - def __init__(self, const): - self.const = const - self.name = const.name - self.sig = tuple(sorted([col.name for col in const.columns])) - - @property - def column_names(self): - return [col.name for col in self.const.columns] - - -class _ix_constraint_sig(_constraint_sig): - is_index = True - - def __init__(self, const): - self.const = const - self.name = const.name - self.sig = tuple(sorted([col.name for col in const.columns])) - self.is_unique = bool(const.unique) - - def md_name_to_sql_name(self, context): - return sqla_compat._get_constraint_final_name( - self.const, context.dialect - ) - - @property - def column_names(self): - return sqla_compat._get_index_column_names(self.const) - - -class _fk_constraint_sig(_constraint_sig): - def __init__(self, const, include_options=False): - self.const = const - self.name = const.name - - ( - self.source_schema, - self.source_table, - self.source_columns, - self.target_schema, - self.target_table, - self.target_columns, - onupdate, - ondelete, - deferrable, - initially, - ) = _fk_spec(const) - - self.sig = ( - self.source_schema, - self.source_table, - tuple(self.source_columns), - self.target_schema, - self.target_table, - tuple(self.target_columns), - ) - if include_options: - self.sig += ( - (None if onupdate.lower() == "no action" else onupdate.lower()) - if onupdate - else None, - (None if ondelete.lower() == "no action" else ondelete.lower()) - if ondelete - else None, - # convert initially + deferrable into one three-state value - "initially_deferrable" - if initially and initially.lower() == "deferred" - else "deferrable" - if deferrable - else "not deferrable", - ) - - -@comparators.dispatch_for("table") -def _compare_indexes_and_uniques( - autogen_context, modify_ops, schema, tname, conn_table, metadata_table -): - - inspector = autogen_context.inspector - is_create_table = conn_table is None - is_drop_table = metadata_table is None - - # 1a. get raw indexes and unique constraints from metadata ... - if metadata_table is not None: - metadata_unique_constraints = set( - uq - for uq in metadata_table.constraints - if isinstance(uq, sa_schema.UniqueConstraint) - ) - metadata_indexes = set(metadata_table.indexes) - else: - metadata_unique_constraints = set() - metadata_indexes = set() - - conn_uniques = conn_indexes = frozenset() - - supports_unique_constraints = False - - unique_constraints_duplicate_unique_indexes = False - - if conn_table is not None: - # 1b. ... and from connection, if the table exists - if hasattr(inspector, "get_unique_constraints"): - try: - conn_uniques = inspector.get_unique_constraints( - tname, schema=schema - ) - supports_unique_constraints = True - except NotImplementedError: - pass - except TypeError: - # number of arguments is off for the base - # method in SQLAlchemy due to the cache decorator - # not being present - pass - else: - for uq in conn_uniques: - if uq.get("duplicates_index"): - unique_constraints_duplicate_unique_indexes = True - try: - conn_indexes = inspector.get_indexes(tname, schema=schema) - except NotImplementedError: - pass - - # 2. convert conn-level objects from raw inspector records - # into schema objects - if is_drop_table: - # for DROP TABLE uniques are inline, don't need them - conn_uniques = set() - else: - conn_uniques = set( - _make_unique_constraint(uq_def, conn_table) - for uq_def in conn_uniques - ) - - conn_indexes = set(_make_index(ix, conn_table) for ix in conn_indexes) - - # 2a. if the dialect dupes unique indexes as unique constraints - # (mysql and oracle), correct for that - - if unique_constraints_duplicate_unique_indexes: - _correct_for_uq_duplicates_uix( - conn_uniques, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - autogen_context.dialect, - ) - - # 3. give the dialect a chance to omit indexes and constraints that - # we know are either added implicitly by the DB or that the DB - # can't accurately report on - autogen_context.migration_context.impl.correct_for_autogen_constraints( - conn_uniques, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ) - - # 4. organize the constraints into "signature" collections, the - # _constraint_sig() objects provide a consistent facade over both - # Index and UniqueConstraint so we can easily work with them - # interchangeably - metadata_unique_constraints = set( - _uq_constraint_sig(uq) for uq in metadata_unique_constraints - ) - - metadata_indexes = set(_ix_constraint_sig(ix) for ix in metadata_indexes) - - conn_unique_constraints = set( - _uq_constraint_sig(uq) for uq in conn_uniques - ) - - conn_indexes = set(_ix_constraint_sig(ix) for ix in conn_indexes) - - # 5. index things by name, for those objects that have names - metadata_names = dict( - (c.md_name_to_sql_name(autogen_context), c) - for c in metadata_unique_constraints.union(metadata_indexes) - if isinstance(c, _ix_constraint_sig) - or sqla_compat._constraint_is_named(c.const, autogen_context.dialect) - ) - - conn_uniques_by_name = dict((c.name, c) for c in conn_unique_constraints) - conn_indexes_by_name = dict((c.name, c) for c in conn_indexes) - conn_names = dict( - (c.name, c) - for c in conn_unique_constraints.union(conn_indexes) - if c.name is not None - ) - - doubled_constraints = dict( - (name, (conn_uniques_by_name[name], conn_indexes_by_name[name])) - for name in set(conn_uniques_by_name).intersection( - conn_indexes_by_name - ) - ) - - # 6. index things by "column signature", to help with unnamed unique - # constraints. - conn_uniques_by_sig = dict((uq.sig, uq) for uq in conn_unique_constraints) - metadata_uniques_by_sig = dict( - (uq.sig, uq) for uq in metadata_unique_constraints - ) - metadata_indexes_by_sig = dict((ix.sig, ix) for ix in metadata_indexes) - unnamed_metadata_uniques = dict( - (uq.sig, uq) - for uq in metadata_unique_constraints - if not sqla_compat._constraint_is_named( - uq.const, autogen_context.dialect - ) - ) - - # assumptions: - # 1. a unique constraint or an index from the connection *always* - # has a name. - # 2. an index on the metadata side *always* has a name. - # 3. a unique constraint on the metadata side *might* have a name. - # 4. The backend may double up indexes as unique constraints and - # vice versa (e.g. MySQL, Postgresql) - - def obj_added(obj): - if obj.is_index: - if autogen_context.run_filters( - obj.const, obj.name, "index", False, None - ): - modify_ops.ops.append(ops.CreateIndexOp.from_index(obj.const)) - log.info( - "Detected added index '%s' on %s", - obj.name, - ", ".join(["'%s'" % obj.column_names]), - ) - else: - if not supports_unique_constraints: - # can't report unique indexes as added if we don't - # detect them - return - if is_create_table or is_drop_table: - # unique constraints are created inline with table defs - return - if autogen_context.run_filters( - obj.const, obj.name, "unique_constraint", False, None - ): - modify_ops.ops.append( - ops.AddConstraintOp.from_constraint(obj.const) - ) - log.info( - "Detected added unique constraint '%s' on %s", - obj.name, - ", ".join(["'%s'" % obj.column_names]), - ) - - def obj_removed(obj): - if obj.is_index: - if obj.is_unique and not supports_unique_constraints: - # many databases double up unique constraints - # as unique indexes. without that list we can't - # be sure what we're doing here - return - - if autogen_context.run_filters( - obj.const, obj.name, "index", True, None - ): - modify_ops.ops.append(ops.DropIndexOp.from_index(obj.const)) - log.info( - "Detected removed index '%s' on '%s'", obj.name, tname - ) - else: - if is_create_table or is_drop_table: - # if the whole table is being dropped, we don't need to - # consider unique constraint separately - return - if autogen_context.run_filters( - obj.const, obj.name, "unique_constraint", True, None - ): - modify_ops.ops.append( - ops.DropConstraintOp.from_constraint(obj.const) - ) - log.info( - "Detected removed unique constraint '%s' on '%s'", - obj.name, - tname, - ) - - def obj_changed(old, new, msg): - if old.is_index: - if autogen_context.run_filters( - new.const, new.name, "index", False, old.const - ): - log.info( - "Detected changed index '%s' on '%s':%s", - old.name, - tname, - ", ".join(msg), - ) - modify_ops.ops.append(ops.DropIndexOp.from_index(old.const)) - modify_ops.ops.append(ops.CreateIndexOp.from_index(new.const)) - else: - if autogen_context.run_filters( - new.const, new.name, "unique_constraint", False, old.const - ): - log.info( - "Detected changed unique constraint '%s' on '%s':%s", - old.name, - tname, - ", ".join(msg), - ) - modify_ops.ops.append( - ops.DropConstraintOp.from_constraint(old.const) - ) - modify_ops.ops.append( - ops.AddConstraintOp.from_constraint(new.const) - ) - - for added_name in sorted(set(metadata_names).difference(conn_names)): - obj = metadata_names[added_name] - obj_added(obj) - - for existing_name in sorted(set(metadata_names).intersection(conn_names)): - metadata_obj = metadata_names[existing_name] - - if existing_name in doubled_constraints: - conn_uq, conn_idx = doubled_constraints[existing_name] - if metadata_obj.is_index: - conn_obj = conn_idx - else: - conn_obj = conn_uq - else: - conn_obj = conn_names[existing_name] - - if conn_obj.is_index != metadata_obj.is_index: - obj_removed(conn_obj) - obj_added(metadata_obj) - else: - msg = [] - if conn_obj.is_unique != metadata_obj.is_unique: - msg.append( - " unique=%r to unique=%r" - % (conn_obj.is_unique, metadata_obj.is_unique) - ) - if conn_obj.sig != metadata_obj.sig: - msg.append( - " columns %r to %r" % (conn_obj.sig, metadata_obj.sig) - ) - - if msg: - obj_changed(conn_obj, metadata_obj, msg) - - for removed_name in sorted(set(conn_names).difference(metadata_names)): - conn_obj = conn_names[removed_name] - if not conn_obj.is_index and conn_obj.sig in unnamed_metadata_uniques: - continue - elif removed_name in doubled_constraints: - if ( - conn_obj.sig not in metadata_indexes_by_sig - and conn_obj.sig not in metadata_uniques_by_sig - ): - conn_uq, conn_idx = doubled_constraints[removed_name] - obj_removed(conn_uq) - obj_removed(conn_idx) - else: - obj_removed(conn_obj) - - for uq_sig in unnamed_metadata_uniques: - if uq_sig not in conn_uniques_by_sig: - obj_added(unnamed_metadata_uniques[uq_sig]) - - -def _correct_for_uq_duplicates_uix( - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - dialect, -): - # dedupe unique indexes vs. constraints, since MySQL / Oracle - # doesn't really have unique constraints as a separate construct. - # but look in the metadata and try to maintain constructs - # that already seem to be defined one way or the other - # on that side. This logic was formerly local to MySQL dialect, - # generalized to Oracle and others. See #276 - - # resolve final rendered name for unique constraints defined in the - # metadata. this includes truncation of long names. naming convention - # names currently should already be set as cons.name, however leave this - # to the sqla_compat to decide. - metadata_cons_names = [ - (sqla_compat._get_constraint_final_name(cons, dialect), cons) - for cons in metadata_unique_constraints - ] - - metadata_uq_names = set( - name for name, cons in metadata_cons_names if name is not None - ) - - unnamed_metadata_uqs = set( - [ - _uq_constraint_sig(cons).sig - for name, cons in metadata_cons_names - if name is None - ] - ) - - metadata_ix_names = set( - [ - sqla_compat._get_constraint_final_name(cons, dialect) - for cons in metadata_indexes - if cons.unique - ] - ) - - # for reflection side, names are in their final database form - # already since they're from the database - conn_ix_names = dict( - (cons.name, cons) for cons in conn_indexes if cons.unique - ) - - uqs_dupe_indexes = dict( - (cons.name, cons) - for cons in conn_unique_constraints - if cons.info["duplicates_index"] - ) - - for overlap in uqs_dupe_indexes: - if overlap not in metadata_uq_names: - if ( - _uq_constraint_sig(uqs_dupe_indexes[overlap]).sig - not in unnamed_metadata_uqs - ): - - conn_unique_constraints.discard(uqs_dupe_indexes[overlap]) - elif overlap not in metadata_ix_names: - conn_indexes.discard(conn_ix_names[overlap]) - - -@comparators.dispatch_for("column") -def _compare_nullable( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - # work around SQLAlchemy issue #3023 - if metadata_col.primary_key: - return - - metadata_col_nullable = metadata_col.nullable - conn_col_nullable = conn_col.nullable - alter_column_op.existing_nullable = conn_col_nullable - - if conn_col_nullable is not metadata_col_nullable: - alter_column_op.modify_nullable = metadata_col_nullable - log.info( - "Detected %s on column '%s.%s'", - "NULL" if metadata_col_nullable else "NOT NULL", - tname, - cname, - ) - - -@comparators.dispatch_for("column") -def _setup_autoincrement( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - if metadata_col.table._autoincrement_column is metadata_col: - alter_column_op.kw["autoincrement"] = True - elif metadata_col.autoincrement is True: - alter_column_op.kw["autoincrement"] = True - elif metadata_col.autoincrement is False: - alter_column_op.kw["autoincrement"] = False - - -@comparators.dispatch_for("column") -def _compare_type( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - conn_type = conn_col.type - alter_column_op.existing_type = conn_type - metadata_type = metadata_col.type - if conn_type._type_affinity is sqltypes.NullType: - log.info( - "Couldn't determine database type " "for column '%s.%s'", - tname, - cname, - ) - return - if metadata_type._type_affinity is sqltypes.NullType: - log.info( - "Column '%s.%s' has no type within " "the model; can't compare", - tname, - cname, - ) - return - - isdiff = autogen_context.migration_context._compare_type( - conn_col, metadata_col - ) - - if isdiff: - alter_column_op.modify_type = metadata_type - log.info( - "Detected type change from %r to %r on '%s.%s'", - conn_type, - metadata_type, - tname, - cname, - ) - - -def _render_server_default_for_compare( - metadata_default, metadata_col, autogen_context -): - rendered = _user_defined_render( - "server_default", metadata_default, autogen_context - ) - if rendered is not False: - return rendered - - if isinstance(metadata_default, sa_schema.DefaultClause): - if isinstance(metadata_default.arg, compat.string_types): - metadata_default = metadata_default.arg - else: - metadata_default = str( - metadata_default.arg.compile( - dialect=autogen_context.dialect, - compile_kwargs={"literal_binds": True}, - ) - ) - if isinstance(metadata_default, compat.string_types): - if metadata_col.type._type_affinity is sqltypes.String: - metadata_default = re.sub(r"^'|'$", "", metadata_default) - return repr(metadata_default) - else: - return metadata_default - else: - return None - - -def _normalize_computed_default(sqltext): - """we want to warn if a computed sql expression has changed. however - we don't want false positives and the warning is not that critical. - so filter out most forms of variability from the SQL text. - - """ - - return re.sub(r"[ \(\)'\"`\[\]]", "", sqltext).lower() - - -def _compare_computed_default( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - rendered_metadata_default = str( - metadata_col.server_default.sqltext.compile( - dialect=autogen_context.dialect, - compile_kwargs={"literal_binds": True}, - ) - ) - - # since we cannot change computed columns, we do only a crude comparison - # here where we try to eliminate syntactical differences in order to - # get a minimal comparison just to emit a warning. - - rendered_metadata_default = _normalize_computed_default( - rendered_metadata_default - ) - - if isinstance(conn_col.server_default, sa_schema.Computed): - rendered_conn_default = str( - conn_col.server_default.sqltext.compile( - dialect=autogen_context.dialect, - compile_kwargs={"literal_binds": True}, - ) - ) - if rendered_conn_default is None: - rendered_conn_default = "" - else: - rendered_conn_default = _normalize_computed_default( - rendered_conn_default - ) - else: - rendered_conn_default = "" - - if rendered_metadata_default != rendered_conn_default: - _warn_computed_not_supported(tname, cname) - - -def _warn_computed_not_supported(tname, cname): - util.warn("Computed default on %s.%s cannot be modified" % (tname, cname)) - - -@comparators.dispatch_for("column") -def _compare_server_default( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - metadata_default = metadata_col.server_default - conn_col_default = conn_col.server_default - if conn_col_default is None and metadata_default is None: - return False - - if sqla_compat.has_computed and isinstance( - metadata_default, sa_schema.Computed - ): - # return False in case of a computed column as the server - # default. Note that DDL for adding or removing "GENERATED AS" from - # an existing column is not currently known for any backend. - # Once SQLAlchemy can reflect "GENERATED" as the "computed" element, - # we would also want to ignore and/or warn for changes vs. the - # metadata (or support backend specific DDL if applicable). - if not sqla_compat.has_computed_reflection: - return False - - else: - return _compare_computed_default( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, - ) - rendered_metadata_default = _render_server_default_for_compare( - metadata_default, metadata_col, autogen_context - ) - - if sqla_compat.has_computed_reflection and isinstance( - conn_col.server_default, sa_schema.Computed - ): - _warn_computed_not_supported(tname, cname) - return False - else: - rendered_conn_default = ( - conn_col.server_default.arg.text - if conn_col.server_default - else None - ) - - alter_column_op.existing_server_default = conn_col_default - - isdiff = autogen_context.migration_context._compare_server_default( - conn_col, - metadata_col, - rendered_metadata_default, - rendered_conn_default, - ) - if isdiff: - alter_column_op.modify_server_default = metadata_default - log.info("Detected server default on column '%s.%s'", tname, cname) - - -@comparators.dispatch_for("column") -def _compare_column_comment( - autogen_context, - alter_column_op, - schema, - tname, - cname, - conn_col, - metadata_col, -): - - if not sqla_compat._dialect_supports_comments(autogen_context.dialect): - return - - metadata_comment = metadata_col.comment - conn_col_comment = conn_col.comment - if conn_col_comment is None and metadata_comment is None: - return False - - alter_column_op.existing_comment = conn_col_comment - - if conn_col_comment != metadata_comment: - alter_column_op.modify_comment = metadata_comment - log.info("Detected column comment '%s.%s'", tname, cname) - - -@comparators.dispatch_for("table") -def _compare_foreign_keys( - autogen_context, - modify_table_ops, - schema, - tname, - conn_table, - metadata_table, -): - - # if we're doing CREATE TABLE, all FKs are created - # inline within the table def - if conn_table is None or metadata_table is None: - return - - inspector = autogen_context.inspector - metadata_fks = set( - fk - for fk in metadata_table.constraints - if isinstance(fk, sa_schema.ForeignKeyConstraint) - ) - - conn_fks = inspector.get_foreign_keys(tname, schema=schema) - - backend_reflects_fk_options = conn_fks and "options" in conn_fks[0] - - conn_fks = set(_make_foreign_key(const, conn_table) for const in conn_fks) - - # give the dialect a chance to correct the FKs to match more - # closely - autogen_context.migration_context.impl.correct_for_autogen_foreignkeys( - conn_fks, metadata_fks - ) - - metadata_fks = set( - _fk_constraint_sig(fk, include_options=backend_reflects_fk_options) - for fk in metadata_fks - ) - - conn_fks = set( - _fk_constraint_sig(fk, include_options=backend_reflects_fk_options) - for fk in conn_fks - ) - - conn_fks_by_sig = dict((c.sig, c) for c in conn_fks) - metadata_fks_by_sig = dict((c.sig, c) for c in metadata_fks) - - metadata_fks_by_name = dict( - (c.name, c) for c in metadata_fks if c.name is not None - ) - conn_fks_by_name = dict( - (c.name, c) for c in conn_fks if c.name is not None - ) - - def _add_fk(obj, compare_to): - if autogen_context.run_filters( - obj.const, obj.name, "foreign_key_constraint", False, compare_to - ): - modify_table_ops.ops.append( - ops.CreateForeignKeyOp.from_constraint(const.const) - ) - - log.info( - "Detected added foreign key (%s)(%s) on table %s%s", - ", ".join(obj.source_columns), - ", ".join(obj.target_columns), - "%s." % obj.source_schema if obj.source_schema else "", - obj.source_table, - ) - - def _remove_fk(obj, compare_to): - if autogen_context.run_filters( - obj.const, obj.name, "foreign_key_constraint", True, compare_to - ): - modify_table_ops.ops.append( - ops.DropConstraintOp.from_constraint(obj.const) - ) - log.info( - "Detected removed foreign key (%s)(%s) on table %s%s", - ", ".join(obj.source_columns), - ", ".join(obj.target_columns), - "%s." % obj.source_schema if obj.source_schema else "", - obj.source_table, - ) - - # so far it appears we don't need to do this by name at all. - # SQLite doesn't preserve constraint names anyway - - for removed_sig in set(conn_fks_by_sig).difference(metadata_fks_by_sig): - const = conn_fks_by_sig[removed_sig] - if removed_sig not in metadata_fks_by_sig: - compare_to = ( - metadata_fks_by_name[const.name].const - if const.name in metadata_fks_by_name - else None - ) - _remove_fk(const, compare_to) - - for added_sig in set(metadata_fks_by_sig).difference(conn_fks_by_sig): - const = metadata_fks_by_sig[added_sig] - if added_sig not in conn_fks_by_sig: - compare_to = ( - conn_fks_by_name[const.name].const - if const.name in conn_fks_by_name - else None - ) - _add_fk(const, compare_to) - - -@comparators.dispatch_for("table") -def _compare_table_comment( - autogen_context, - modify_table_ops, - schema, - tname, - conn_table, - metadata_table, -): - - if not sqla_compat._dialect_supports_comments(autogen_context.dialect): - return - - # if we're doing CREATE TABLE, comments will be created inline - # with the create_table op. - if conn_table is None or metadata_table is None: - return - - if conn_table.comment is None and metadata_table.comment is None: - return - - if metadata_table.comment is None and conn_table.comment is not None: - modify_table_ops.ops.append( - ops.DropTableCommentOp( - tname, existing_comment=conn_table.comment, schema=schema - ) - ) - elif metadata_table.comment != conn_table.comment: - modify_table_ops.ops.append( - ops.CreateTableCommentOp( - tname, - metadata_table.comment, - existing_comment=conn_table.comment, - schema=schema, - ) - ) diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/render.py b/venv/lib/python3.7/site-packages/alembic/autogenerate/render.py deleted file mode 100644 index 99c15c3..0000000 --- a/venv/lib/python3.7/site-packages/alembic/autogenerate/render.py +++ /dev/null @@ -1,933 +0,0 @@ -import re - -from mako.pygen import PythonPrinter -from sqlalchemy import schema as sa_schema -from sqlalchemy import sql -from sqlalchemy import types as sqltypes - -from .. import util -from ..operations import ops -from ..util import compat -from ..util import sqla_compat -from ..util.compat import string_types -from ..util.compat import StringIO - - -MAX_PYTHON_ARGS = 255 - -try: - from sqlalchemy.sql.naming import conv - - def _render_gen_name(autogen_context, name): - if isinstance(name, conv): - return _f_name(_alembic_autogenerate_prefix(autogen_context), name) - else: - return name - - -except ImportError: - - def _render_gen_name(autogen_context, name): - return name - - -def _indent(text): - text = re.compile(r"^", re.M).sub(" ", text).strip() - text = re.compile(r" +$", re.M).sub("", text) - return text - - -def _render_python_into_templatevars( - autogen_context, migration_script, template_args -): - imports = autogen_context.imports - - for upgrade_ops, downgrade_ops in zip( - migration_script.upgrade_ops_list, migration_script.downgrade_ops_list - ): - template_args[upgrade_ops.upgrade_token] = _indent( - _render_cmd_body(upgrade_ops, autogen_context) - ) - template_args[downgrade_ops.downgrade_token] = _indent( - _render_cmd_body(downgrade_ops, autogen_context) - ) - template_args["imports"] = "\n".join(sorted(imports)) - - -default_renderers = renderers = util.Dispatcher() - - -def _render_cmd_body(op_container, autogen_context): - - buf = StringIO() - printer = PythonPrinter(buf) - - printer.writeline( - "# ### commands auto generated by Alembic - please adjust! ###" - ) - - has_lines = False - for op in op_container.ops: - lines = render_op(autogen_context, op) - has_lines = has_lines or lines - - for line in lines: - printer.writeline(line) - - if not has_lines: - printer.writeline("pass") - - printer.writeline("# ### end Alembic commands ###") - - return buf.getvalue() - - -def render_op(autogen_context, op): - renderer = renderers.dispatch(op) - lines = util.to_list(renderer(autogen_context, op)) - return lines - - -def render_op_text(autogen_context, op): - return "\n".join(render_op(autogen_context, op)) - - -@renderers.dispatch_for(ops.ModifyTableOps) -def _render_modify_table(autogen_context, op): - opts = autogen_context.opts - render_as_batch = opts.get("render_as_batch", False) - - if op.ops: - lines = [] - if render_as_batch: - with autogen_context._within_batch(): - lines.append( - "with op.batch_alter_table(%r, schema=%r) as batch_op:" - % (op.table_name, op.schema) - ) - for t_op in op.ops: - t_lines = render_op(autogen_context, t_op) - lines.extend(t_lines) - lines.append("") - else: - for t_op in op.ops: - t_lines = render_op(autogen_context, t_op) - lines.extend(t_lines) - - return lines - else: - return [] - - -@renderers.dispatch_for(ops.CreateTableCommentOp) -def _render_create_table_comment(autogen_context, op): - - templ = ( - "{prefix}create_table_comment(\n" - "{indent}'{tname}',\n" - "{indent}{comment},\n" - "{indent}existing_comment={existing},\n" - "{indent}schema={schema}\n" - ")" - ) - return templ.format( - prefix=_alembic_autogenerate_prefix(autogen_context), - tname=op.table_name, - comment="%r" % op.comment if op.comment is not None else None, - existing="%r" % op.existing_comment - if op.existing_comment is not None - else None, - schema="'%s'" % op.schema if op.schema is not None else None, - indent=" ", - ) - - -@renderers.dispatch_for(ops.DropTableCommentOp) -def _render_drop_table_comment(autogen_context, op): - - templ = ( - "{prefix}drop_table_comment(\n" - "{indent}'{tname}',\n" - "{indent}existing_comment={existing},\n" - "{indent}schema={schema}\n" - ")" - ) - return templ.format( - prefix=_alembic_autogenerate_prefix(autogen_context), - tname=op.table_name, - existing="%r" % op.existing_comment - if op.existing_comment is not None - else None, - schema="'%s'" % op.schema if op.schema is not None else None, - indent=" ", - ) - - -@renderers.dispatch_for(ops.CreateTableOp) -def _add_table(autogen_context, op): - table = op.to_table() - - args = [ - col - for col in [ - _render_column(col, autogen_context) for col in table.columns - ] - if col - ] + sorted( - [ - rcons - for rcons in [ - _render_constraint(cons, autogen_context) - for cons in table.constraints - ] - if rcons is not None - ] - ) - - if len(args) > MAX_PYTHON_ARGS: - args = "*[" + ",\n".join(args) + "]" - else: - args = ",\n".join(args) - - text = "%(prefix)screate_table(%(tablename)r,\n%(args)s" % { - "tablename": _ident(op.table_name), - "prefix": _alembic_autogenerate_prefix(autogen_context), - "args": args, - } - if op.schema: - text += ",\nschema=%r" % _ident(op.schema) - - comment = sqla_compat._comment_attribute(table) - if comment: - text += ",\ncomment=%r" % _ident(comment) - for k in sorted(op.kw): - text += ",\n%s=%r" % (k.replace(" ", "_"), op.kw[k]) - text += "\n)" - return text - - -@renderers.dispatch_for(ops.DropTableOp) -def _drop_table(autogen_context, op): - text = "%(prefix)sdrop_table(%(tname)r" % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": _ident(op.table_name), - } - if op.schema: - text += ", schema=%r" % _ident(op.schema) - text += ")" - return text - - -@renderers.dispatch_for(ops.CreateIndexOp) -def _add_index(autogen_context, op): - index = op.to_index() - - has_batch = autogen_context._has_batch - - if has_batch: - tmpl = ( - "%(prefix)screate_index(%(name)r, [%(columns)s], " - "unique=%(unique)r%(kwargs)s)" - ) - else: - tmpl = ( - "%(prefix)screate_index(%(name)r, %(table)r, [%(columns)s], " - "unique=%(unique)r%(schema)s%(kwargs)s)" - ) - - text = tmpl % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "name": _render_gen_name(autogen_context, index.name), - "table": _ident(index.table.name), - "columns": ", ".join( - _get_index_rendered_expressions(index, autogen_context) - ), - "unique": index.unique or False, - "schema": (", schema=%r" % _ident(index.table.schema)) - if index.table.schema - else "", - "kwargs": ( - ", " - + ", ".join( - [ - "%s=%s" - % (key, _render_potential_expr(val, autogen_context)) - for key, val in index.kwargs.items() - ] - ) - ) - if len(index.kwargs) - else "", - } - return text - - -@renderers.dispatch_for(ops.DropIndexOp) -def _drop_index(autogen_context, op): - has_batch = autogen_context._has_batch - - if has_batch: - tmpl = "%(prefix)sdrop_index(%(name)r)" - else: - tmpl = ( - "%(prefix)sdrop_index(%(name)r, " - "table_name=%(table_name)r%(schema)s)" - ) - - text = tmpl % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "name": _render_gen_name(autogen_context, op.index_name), - "table_name": _ident(op.table_name), - "schema": ((", schema=%r" % _ident(op.schema)) if op.schema else ""), - } - return text - - -@renderers.dispatch_for(ops.CreateUniqueConstraintOp) -def _add_unique_constraint(autogen_context, op): - return [_uq_constraint(op.to_constraint(), autogen_context, True)] - - -@renderers.dispatch_for(ops.CreateForeignKeyOp) -def _add_fk_constraint(autogen_context, op): - - args = [repr(_render_gen_name(autogen_context, op.constraint_name))] - if not autogen_context._has_batch: - args.append(repr(_ident(op.source_table))) - - args.extend( - [ - repr(_ident(op.referent_table)), - repr([_ident(col) for col in op.local_cols]), - repr([_ident(col) for col in op.remote_cols]), - ] - ) - - kwargs = [ - "referent_schema", - "onupdate", - "ondelete", - "initially", - "deferrable", - "use_alter", - ] - if not autogen_context._has_batch: - kwargs.insert(0, "source_schema") - - for k in kwargs: - if k in op.kw: - value = op.kw[k] - if value is not None: - args.append("%s=%r" % (k, value)) - - return "%(prefix)screate_foreign_key(%(args)s)" % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - - -@renderers.dispatch_for(ops.CreatePrimaryKeyOp) -def _add_pk_constraint(constraint, autogen_context): - raise NotImplementedError() - - -@renderers.dispatch_for(ops.CreateCheckConstraintOp) -def _add_check_constraint(constraint, autogen_context): - raise NotImplementedError() - - -@renderers.dispatch_for(ops.DropConstraintOp) -def _drop_constraint(autogen_context, op): - - if autogen_context._has_batch: - template = "%(prefix)sdrop_constraint" "(%(name)r, type_=%(type)r)" - else: - template = ( - "%(prefix)sdrop_constraint" - "(%(name)r, '%(table_name)s'%(schema)s, type_=%(type)r)" - ) - - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "name": _render_gen_name(autogen_context, op.constraint_name), - "table_name": _ident(op.table_name), - "type": op.constraint_type, - "schema": (", schema=%r" % _ident(op.schema)) if op.schema else "", - } - return text - - -@renderers.dispatch_for(ops.AddColumnOp) -def _add_column(autogen_context, op): - - schema, tname, column = op.schema, op.table_name, op.column - if autogen_context._has_batch: - template = "%(prefix)sadd_column(%(column)s)" - else: - template = "%(prefix)sadd_column(%(tname)r, %(column)s" - if schema: - template += ", schema=%(schema)r" - template += ")" - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": tname, - "column": _render_column(column, autogen_context), - "schema": schema, - } - return text - - -@renderers.dispatch_for(ops.DropColumnOp) -def _drop_column(autogen_context, op): - - schema, tname, column_name = op.schema, op.table_name, op.column_name - - if autogen_context._has_batch: - template = "%(prefix)sdrop_column(%(cname)r)" - else: - template = "%(prefix)sdrop_column(%(tname)r, %(cname)r" - if schema: - template += ", schema=%(schema)r" - template += ")" - - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": _ident(tname), - "cname": _ident(column_name), - "schema": _ident(schema), - } - return text - - -@renderers.dispatch_for(ops.AlterColumnOp) -def _alter_column(autogen_context, op): - - tname = op.table_name - cname = op.column_name - server_default = op.modify_server_default - type_ = op.modify_type - nullable = op.modify_nullable - comment = op.modify_comment - autoincrement = op.kw.get("autoincrement", None) - existing_type = op.existing_type - existing_nullable = op.existing_nullable - existing_comment = op.existing_comment - existing_server_default = op.existing_server_default - schema = op.schema - - indent = " " * 11 - - if autogen_context._has_batch: - template = "%(prefix)salter_column(%(cname)r" - else: - template = "%(prefix)salter_column(%(tname)r, %(cname)r" - - text = template % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "tname": tname, - "cname": cname, - } - if existing_type is not None: - text += ",\n%sexisting_type=%s" % ( - indent, - _repr_type(existing_type, autogen_context), - ) - if server_default is not False: - rendered = _render_server_default(server_default, autogen_context) - text += ",\n%sserver_default=%s" % (indent, rendered) - - if type_ is not None: - text += ",\n%stype_=%s" % (indent, _repr_type(type_, autogen_context)) - if nullable is not None: - text += ",\n%snullable=%r" % (indent, nullable) - if comment is not False: - text += ",\n%scomment=%r" % (indent, comment) - if existing_comment is not None: - text += ",\n%sexisting_comment=%r" % (indent, existing_comment) - if nullable is None and existing_nullable is not None: - text += ",\n%sexisting_nullable=%r" % (indent, existing_nullable) - if autoincrement is not None: - text += ",\n%sautoincrement=%r" % (indent, autoincrement) - if server_default is False and existing_server_default: - rendered = _render_server_default( - existing_server_default, autogen_context - ) - text += ",\n%sexisting_server_default=%s" % (indent, rendered) - if schema and not autogen_context._has_batch: - text += ",\n%sschema=%r" % (indent, schema) - text += ")" - return text - - -class _f_name(object): - def __init__(self, prefix, name): - self.prefix = prefix - self.name = name - - def __repr__(self): - return "%sf(%r)" % (self.prefix, _ident(self.name)) - - -def _ident(name): - """produce a __repr__() object for a string identifier that may - use quoted_name() in SQLAlchemy 0.9 and greater. - - The issue worked around here is that quoted_name() doesn't have - very good repr() behavior by itself when unicode is involved. - - """ - if name is None: - return name - elif isinstance(name, sql.elements.quoted_name): - if compat.py2k: - # the attempt to encode to ascii here isn't super ideal, - # however we are trying to cut down on an explosion of - # u'' literals only when py2k + SQLA 0.9, in particular - # makes unit tests testing code generation very difficult - try: - return name.encode("ascii") - except UnicodeError: - return compat.text_type(name) - else: - return compat.text_type(name) - elif isinstance(name, compat.string_types): - return name - - -def _render_potential_expr( - value, autogen_context, wrap_in_text=True, is_server_default=False -): - if isinstance(value, sql.ClauseElement): - - if wrap_in_text: - template = "%(prefix)stext(%(sql)r)" - else: - template = "%(sql)r" - - return template % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "sql": autogen_context.migration_context.impl.render_ddl_sql_expr( - value, is_server_default=is_server_default - ), - } - - else: - return repr(value) - - -def _get_index_rendered_expressions(idx, autogen_context): - return [ - repr(_ident(getattr(exp, "name", None))) - if isinstance(exp, sa_schema.Column) - else _render_potential_expr(exp, autogen_context) - for exp in idx.expressions - ] - - -def _uq_constraint(constraint, autogen_context, alter): - opts = [] - - has_batch = autogen_context._has_batch - - if constraint.deferrable: - opts.append(("deferrable", str(constraint.deferrable))) - if constraint.initially: - opts.append(("initially", str(constraint.initially))) - if not has_batch and alter and constraint.table.schema: - opts.append(("schema", _ident(constraint.table.schema))) - if not alter and constraint.name: - opts.append( - ("name", _render_gen_name(autogen_context, constraint.name)) - ) - - if alter: - args = [repr(_render_gen_name(autogen_context, constraint.name))] - if not has_batch: - args += [repr(_ident(constraint.table.name))] - args.append(repr([_ident(col.name) for col in constraint.columns])) - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)screate_unique_constraint(%(args)s)" % { - "prefix": _alembic_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - else: - args = [repr(_ident(col.name)) for col in constraint.columns] - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)sUniqueConstraint(%(args)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - - -def _user_autogenerate_prefix(autogen_context, target): - prefix = autogen_context.opts["user_module_prefix"] - if prefix is None: - return "%s." % target.__module__ - else: - return prefix - - -def _sqlalchemy_autogenerate_prefix(autogen_context): - return autogen_context.opts["sqlalchemy_module_prefix"] or "" - - -def _alembic_autogenerate_prefix(autogen_context): - if autogen_context._has_batch: - return "batch_op." - else: - return autogen_context.opts["alembic_module_prefix"] or "" - - -def _user_defined_render(type_, object_, autogen_context): - if "render_item" in autogen_context.opts: - render = autogen_context.opts["render_item"] - if render: - rendered = render(type_, object_, autogen_context) - if rendered is not False: - return rendered - return False - - -def _render_column(column, autogen_context): - rendered = _user_defined_render("column", column, autogen_context) - if rendered is not False: - return rendered - - args = [] - opts = [] - - if column.server_default: - if sqla_compat._server_default_is_computed(column): - rendered = _render_computed(column.computed, autogen_context) - if rendered: - args.append(rendered) - else: - rendered = _render_server_default( - column.server_default, autogen_context - ) - if rendered: - opts.append(("server_default", rendered)) - - if ( - column.autoincrement is not None - and column.autoincrement != sqla_compat.AUTOINCREMENT_DEFAULT - ): - opts.append(("autoincrement", column.autoincrement)) - - if column.nullable is not None: - opts.append(("nullable", column.nullable)) - - if column.system: - opts.append(("system", column.system)) - - comment = sqla_compat._comment_attribute(column) - if comment: - opts.append(("comment", "%r" % comment)) - - # TODO: for non-ascii colname, assign a "key" - return "%(prefix)sColumn(%(name)r, %(type)s, %(args)s%(kwargs)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "name": _ident(column.name), - "type": _repr_type(column.type, autogen_context), - "args": ", ".join([str(arg) for arg in args]) + ", " if args else "", - "kwargs": ( - ", ".join( - ["%s=%s" % (kwname, val) for kwname, val in opts] - + [ - "%s=%s" - % (key, _render_potential_expr(val, autogen_context)) - for key, val in sqla_compat._column_kwargs(column).items() - ] - ) - ), - } - - -def _render_server_default(default, autogen_context, repr_=True): - rendered = _user_defined_render("server_default", default, autogen_context) - if rendered is not False: - return rendered - - if sqla_compat.has_computed and isinstance(default, sa_schema.Computed): - return _render_computed(default, autogen_context) - elif isinstance(default, sa_schema.DefaultClause): - if isinstance(default.arg, compat.string_types): - default = default.arg - else: - return _render_potential_expr( - default.arg, autogen_context, is_server_default=True - ) - - if isinstance(default, string_types) and repr_: - default = repr(re.sub(r"^'|'$", "", default)) - - return default - - -def _render_computed(computed, autogen_context): - text = _render_potential_expr( - computed.sqltext, autogen_context, wrap_in_text=False - ) - - kwargs = {} - if computed.persisted is not None: - kwargs["persisted"] = computed.persisted - return "%(prefix)sComputed(%(text)s, %(kwargs)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "text": text, - "kwargs": (", ".join("%s=%s" % pair for pair in kwargs.items())), - } - - -def _repr_type(type_, autogen_context): - rendered = _user_defined_render("type", type_, autogen_context) - if rendered is not False: - return rendered - - if hasattr(autogen_context.migration_context, "impl"): - impl_rt = autogen_context.migration_context.impl.render_type( - type_, autogen_context - ) - else: - impl_rt = None - - mod = type(type_).__module__ - imports = autogen_context.imports - if mod.startswith("sqlalchemy.dialects"): - dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) - if imports is not None: - imports.add("from sqlalchemy.dialects import %s" % dname) - if impl_rt: - return impl_rt - else: - return "%s.%r" % (dname, type_) - elif impl_rt: - return impl_rt - elif mod.startswith("sqlalchemy."): - if type(type_) is sqltypes.Variant: - return _render_Variant_type(type_, autogen_context) - if "_render_%s_type" % type_.__visit_name__ in globals(): - fn = globals()["_render_%s_type" % type_.__visit_name__] - return fn(type_, autogen_context) - else: - prefix = _sqlalchemy_autogenerate_prefix(autogen_context) - return "%s%r" % (prefix, type_) - else: - prefix = _user_autogenerate_prefix(autogen_context, type_) - return "%s%r" % (prefix, type_) - - -def _render_ARRAY_type(type_, autogen_context): - return _render_type_w_subtype( - type_, autogen_context, "item_type", r"(.+?\()" - ) - - -def _render_Variant_type(type_, autogen_context): - base = _repr_type(type_.impl, autogen_context) - for dialect in sorted(type_.mapping): - typ = type_.mapping[dialect] - base += ".with_variant(%s, %r)" % ( - _repr_type(typ, autogen_context), - dialect, - ) - return base - - -def _render_type_w_subtype( - type_, autogen_context, attrname, regexp, prefix=None -): - outer_repr = repr(type_) - inner_type = getattr(type_, attrname, None) - if inner_type is None: - return False - - inner_repr = repr(inner_type) - - inner_repr = re.sub(r"([\(\)])", r"\\\1", inner_repr) - sub_type = _repr_type(getattr(type_, attrname), autogen_context) - outer_type = re.sub(regexp + inner_repr, r"\1%s" % sub_type, outer_repr) - - if prefix: - return "%s%s" % (prefix, outer_type) - - mod = type(type_).__module__ - if mod.startswith("sqlalchemy.dialects"): - dname = re.match(r"sqlalchemy\.dialects\.(\w+)", mod).group(1) - return "%s.%s" % (dname, outer_type) - elif mod.startswith("sqlalchemy"): - prefix = _sqlalchemy_autogenerate_prefix(autogen_context) - return "%s%s" % (prefix, outer_type) - else: - return None - - -_constraint_renderers = util.Dispatcher() - - -def _render_constraint(constraint, autogen_context): - try: - renderer = _constraint_renderers.dispatch(constraint) - except ValueError: - util.warn("No renderer is established for object %r" % constraint) - return "[Unknown Python object %r]" % constraint - else: - return renderer(constraint, autogen_context) - - -@_constraint_renderers.dispatch_for(sa_schema.PrimaryKeyConstraint) -def _render_primary_key(constraint, autogen_context): - rendered = _user_defined_render("primary_key", constraint, autogen_context) - if rendered is not False: - return rendered - - if not constraint.columns: - return None - - opts = [] - if constraint.name: - opts.append( - ("name", repr(_render_gen_name(autogen_context, constraint.name))) - ) - return "%(prefix)sPrimaryKeyConstraint(%(args)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "args": ", ".join( - [repr(c.name) for c in constraint.columns] - + ["%s=%s" % (kwname, val) for kwname, val in opts] - ), - } - - -def _fk_colspec(fk, metadata_schema): - """Implement a 'safe' version of ForeignKey._get_colspec() that - won't fail if the remote table can't be resolved. - - """ - colspec = fk._get_colspec() - tokens = colspec.split(".") - tname, colname = tokens[-2:] - - if metadata_schema is not None and len(tokens) == 2: - table_fullname = "%s.%s" % (metadata_schema, tname) - else: - table_fullname = ".".join(tokens[0:-1]) - - if ( - not fk.link_to_name - and fk.parent is not None - and fk.parent.table is not None - ): - # try to resolve the remote table in order to adjust for column.key. - # the FK constraint needs to be rendered in terms of the column - # name. - parent_metadata = fk.parent.table.metadata - if table_fullname in parent_metadata.tables: - col = parent_metadata.tables[table_fullname].c.get(colname) - if col is not None: - colname = _ident(col.name) - - colspec = "%s.%s" % (table_fullname, colname) - - return colspec - - -def _populate_render_fk_opts(constraint, opts): - - if constraint.onupdate: - opts.append(("onupdate", repr(constraint.onupdate))) - if constraint.ondelete: - opts.append(("ondelete", repr(constraint.ondelete))) - if constraint.initially: - opts.append(("initially", repr(constraint.initially))) - if constraint.deferrable: - opts.append(("deferrable", repr(constraint.deferrable))) - if constraint.use_alter: - opts.append(("use_alter", repr(constraint.use_alter))) - - -@_constraint_renderers.dispatch_for(sa_schema.ForeignKeyConstraint) -def _render_foreign_key(constraint, autogen_context): - rendered = _user_defined_render("foreign_key", constraint, autogen_context) - if rendered is not False: - return rendered - - opts = [] - if constraint.name: - opts.append( - ("name", repr(_render_gen_name(autogen_context, constraint.name))) - ) - - _populate_render_fk_opts(constraint, opts) - - apply_metadata_schema = constraint.parent.metadata.schema - return ( - "%(prefix)sForeignKeyConstraint([%(cols)s], " - "[%(refcols)s], %(args)s)" - % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "cols": ", ".join( - "%r" % _ident(f.parent.name) for f in constraint.elements - ), - "refcols": ", ".join( - repr(_fk_colspec(f, apply_metadata_schema)) - for f in constraint.elements - ), - "args": ", ".join( - ["%s=%s" % (kwname, val) for kwname, val in opts] - ), - } - ) - - -@_constraint_renderers.dispatch_for(sa_schema.UniqueConstraint) -def _render_unique_constraint(constraint, autogen_context): - rendered = _user_defined_render("unique", constraint, autogen_context) - if rendered is not False: - return rendered - - return _uq_constraint(constraint, autogen_context, False) - - -@_constraint_renderers.dispatch_for(sa_schema.CheckConstraint) -def _render_check_constraint(constraint, autogen_context): - rendered = _user_defined_render("check", constraint, autogen_context) - if rendered is not False: - return rendered - - # detect the constraint being part of - # a parent type which is probably in the Table already. - # ideally SQLAlchemy would give us more of a first class - # way to detect this. - if ( - constraint._create_rule - and hasattr(constraint._create_rule, "target") - and isinstance(constraint._create_rule.target, sqltypes.TypeEngine) - ): - return None - opts = [] - if constraint.name: - opts.append( - ("name", repr(_render_gen_name(autogen_context, constraint.name))) - ) - return "%(prefix)sCheckConstraint(%(sqltext)s%(opts)s)" % { - "prefix": _sqlalchemy_autogenerate_prefix(autogen_context), - "opts": ", " + (", ".join("%s=%s" % (k, v) for k, v in opts)) - if opts - else "", - "sqltext": _render_potential_expr( - constraint.sqltext, autogen_context, wrap_in_text=False - ), - } - - -@renderers.dispatch_for(ops.ExecuteSQLOp) -def _execute_sql(autogen_context, op): - if not isinstance(op.sqltext, string_types): - raise NotImplementedError( - "Autogenerate rendering of SQL Expression language constructs " - "not supported here; please use a plain SQL string" - ) - return "op.execute(%r)" % op.sqltext - - -renderers = default_renderers.branch() diff --git a/venv/lib/python3.7/site-packages/alembic/autogenerate/rewriter.py b/venv/lib/python3.7/site-packages/alembic/autogenerate/rewriter.py deleted file mode 100644 index 90a931f..0000000 --- a/venv/lib/python3.7/site-packages/alembic/autogenerate/rewriter.py +++ /dev/null @@ -1,158 +0,0 @@ -from alembic import util -from alembic.operations import ops - - -class Rewriter(object): - """A helper object that allows easy 'rewriting' of ops streams. - - The :class:`.Rewriter` object is intended to be passed along - to the - :paramref:`.EnvironmentContext.configure.process_revision_directives` - parameter in an ``env.py`` script. Once constructed, any number - of "rewrites" functions can be associated with it, which will be given - the opportunity to modify the structure without having to have explicit - knowledge of the overall structure. - - The function is passed the :class:`.MigrationContext` object and - ``revision`` tuple that are passed to the :paramref:`.Environment - Context.configure.process_revision_directives` function normally, - and the third argument is an individual directive of the type - noted in the decorator. The function has the choice of returning - a single op directive, which normally can be the directive that - was actually passed, or a new directive to replace it, or a list - of zero or more directives to replace it. - - .. seealso:: - - :ref:`autogen_rewriter` - usage example - - .. versionadded:: 0.8 - - """ - - _traverse = util.Dispatcher() - - _chained = None - - def __init__(self): - self.dispatch = util.Dispatcher() - - def chain(self, other): - """Produce a "chain" of this :class:`.Rewriter` to another. - - This allows two rewriters to operate serially on a stream, - e.g.:: - - writer1 = autogenerate.Rewriter() - writer2 = autogenerate.Rewriter() - - @writer1.rewrites(ops.AddColumnOp) - def add_column_nullable(context, revision, op): - op.column.nullable = True - return op - - @writer2.rewrites(ops.AddColumnOp) - def add_column_idx(context, revision, op): - idx_op = ops.CreateIndexOp( - 'ixc', op.table_name, [op.column.name]) - return [ - op, - idx_op - ] - - writer = writer1.chain(writer2) - - :param other: a :class:`.Rewriter` instance - :return: a new :class:`.Rewriter` that will run the operations - of this writer, then the "other" writer, in succession. - - """ - wr = self.__class__.__new__(self.__class__) - wr.__dict__.update(self.__dict__) - wr._chained = other - return wr - - def rewrites(self, operator): - """Register a function as rewriter for a given type. - - The function should receive three arguments, which are - the :class:`.MigrationContext`, a ``revision`` tuple, and - an op directive of the type indicated. E.g.:: - - @writer1.rewrites(ops.AddColumnOp) - def add_column_nullable(context, revision, op): - op.column.nullable = True - return op - - """ - return self.dispatch.dispatch_for(operator) - - def _rewrite(self, context, revision, directive): - try: - _rewriter = self.dispatch.dispatch(directive) - except ValueError: - _rewriter = None - yield directive - else: - if self in directive._mutations: - yield directive - else: - for r_directive in util.to_list( - _rewriter(context, revision, directive) - ): - r_directive._mutations = r_directive._mutations.union( - [self] - ) - yield r_directive - - def __call__(self, context, revision, directives): - self.process_revision_directives(context, revision, directives) - if self._chained: - self._chained(context, revision, directives) - - @_traverse.dispatch_for(ops.MigrationScript) - def _traverse_script(self, context, revision, directive): - upgrade_ops_list = [] - for upgrade_ops in directive.upgrade_ops_list: - ret = self._traverse_for(context, revision, upgrade_ops) - if len(ret) != 1: - raise ValueError( - "Can only return single object for UpgradeOps traverse" - ) - upgrade_ops_list.append(ret[0]) - directive.upgrade_ops = upgrade_ops_list - - downgrade_ops_list = [] - for downgrade_ops in directive.downgrade_ops_list: - ret = self._traverse_for(context, revision, downgrade_ops) - if len(ret) != 1: - raise ValueError( - "Can only return single object for DowngradeOps traverse" - ) - downgrade_ops_list.append(ret[0]) - directive.downgrade_ops = downgrade_ops_list - - @_traverse.dispatch_for(ops.OpContainer) - def _traverse_op_container(self, context, revision, directive): - self._traverse_list(context, revision, directive.ops) - - @_traverse.dispatch_for(ops.MigrateOperation) - def _traverse_any_directive(self, context, revision, directive): - pass - - def _traverse_for(self, context, revision, directive): - directives = list(self._rewrite(context, revision, directive)) - for directive in directives: - traverser = self._traverse.dispatch(directive) - traverser(self, context, revision, directive) - return directives - - def _traverse_list(self, context, revision, directives): - dest = [] - for directive in directives: - dest.extend(self._traverse_for(context, revision, directive)) - - directives[:] = dest - - def process_revision_directives(self, context, revision, directives): - self._traverse_list(context, revision, directives) diff --git a/venv/lib/python3.7/site-packages/alembic/command.py b/venv/lib/python3.7/site-packages/alembic/command.py deleted file mode 100644 index 7d19e3c..0000000 --- a/venv/lib/python3.7/site-packages/alembic/command.py +++ /dev/null @@ -1,611 +0,0 @@ -import os - -from . import autogenerate as autogen -from . import util -from .runtime.environment import EnvironmentContext -from .script import ScriptDirectory - - -def list_templates(config): - """List available templates. - - :param config: a :class:`.Config` object. - - """ - - config.print_stdout("Available templates:\n") - for tempname in os.listdir(config.get_template_directory()): - with open( - os.path.join(config.get_template_directory(), tempname, "README") - ) as readme: - synopsis = next(readme) - config.print_stdout("%s - %s", tempname, synopsis) - - config.print_stdout("\nTemplates are used via the 'init' command, e.g.:") - config.print_stdout("\n alembic init --template generic ./scripts") - - -def init(config, directory, template="generic", package=False): - """Initialize a new scripts directory. - - :param config: a :class:`.Config` object. - - :param directory: string path of the target directory - - :param template: string name of the migration environment template to - use. - - :param package: when True, write ``__init__.py`` files into the - environment location as well as the versions/ location. - - .. versionadded:: 1.2 - - - """ - - if os.access(directory, os.F_OK) and os.listdir(directory): - raise util.CommandError( - "Directory %s already exists and is not empty" % directory - ) - - template_dir = os.path.join(config.get_template_directory(), template) - if not os.access(template_dir, os.F_OK): - raise util.CommandError("No such template %r" % template) - - if not os.access(directory, os.F_OK): - util.status( - "Creating directory %s" % os.path.abspath(directory), - os.makedirs, - directory, - ) - - versions = os.path.join(directory, "versions") - util.status( - "Creating directory %s" % os.path.abspath(versions), - os.makedirs, - versions, - ) - - script = ScriptDirectory(directory) - - for file_ in os.listdir(template_dir): - file_path = os.path.join(template_dir, file_) - if file_ == "alembic.ini.mako": - config_file = os.path.abspath(config.config_file_name) - if os.access(config_file, os.F_OK): - util.msg("File %s already exists, skipping" % config_file) - else: - script._generate_template( - file_path, config_file, script_location=directory - ) - elif os.path.isfile(file_path): - output_file = os.path.join(directory, file_) - script._copy_file(file_path, output_file) - - if package: - for path in [ - os.path.join(os.path.abspath(directory), "__init__.py"), - os.path.join(os.path.abspath(versions), "__init__.py"), - ]: - file_ = util.status("Adding %s" % path, open, path, "w") - file_.close() - - util.msg( - "Please edit configuration/connection/logging " - "settings in %r before proceeding." % config_file - ) - - -def revision( - config, - message=None, - autogenerate=False, - sql=False, - head="head", - splice=False, - branch_label=None, - version_path=None, - rev_id=None, - depends_on=None, - process_revision_directives=None, -): - """Create a new revision file. - - :param config: a :class:`.Config` object. - - :param message: string message to apply to the revision; this is the - ``-m`` option to ``alembic revision``. - - :param autogenerate: whether or not to autogenerate the script from - the database; this is the ``--autogenerate`` option to - ``alembic revision``. - - :param sql: whether to dump the script out as a SQL string; when specified, - the script is dumped to stdout. This is the ``--sql`` option to - ``alembic revision``. - - :param head: head revision to build the new revision upon as a parent; - this is the ``--head`` option to ``alembic revision``. - - :param splice: whether or not the new revision should be made into a - new head of its own; is required when the given ``head`` is not itself - a head. This is the ``--splice`` option to ``alembic revision``. - - :param branch_label: string label to apply to the branch; this is the - ``--branch-label`` option to ``alembic revision``. - - :param version_path: string symbol identifying a specific version path - from the configuration; this is the ``--version-path`` option to - ``alembic revision``. - - :param rev_id: optional revision identifier to use instead of having - one generated; this is the ``--rev-id`` option to ``alembic revision``. - - :param depends_on: optional list of "depends on" identifiers; this is the - ``--depends-on`` option to ``alembic revision``. - - :param process_revision_directives: this is a callable that takes the - same form as the callable described at - :paramref:`.EnvironmentContext.configure.process_revision_directives`; - will be applied to the structure generated by the revision process - where it can be altered programmatically. Note that unlike all - the other parameters, this option is only available via programmatic - use of :func:`.command.revision` - - .. versionadded:: 0.9.0 - - """ - - script_directory = ScriptDirectory.from_config(config) - - command_args = dict( - message=message, - autogenerate=autogenerate, - sql=sql, - head=head, - splice=splice, - branch_label=branch_label, - version_path=version_path, - rev_id=rev_id, - depends_on=depends_on, - ) - revision_context = autogen.RevisionContext( - config, - script_directory, - command_args, - process_revision_directives=process_revision_directives, - ) - - environment = util.asbool(config.get_main_option("revision_environment")) - - if autogenerate: - environment = True - - if sql: - raise util.CommandError( - "Using --sql with --autogenerate does not make any sense" - ) - - def retrieve_migrations(rev, context): - revision_context.run_autogenerate(rev, context) - return [] - - elif environment: - - def retrieve_migrations(rev, context): - revision_context.run_no_autogenerate(rev, context) - return [] - - elif sql: - raise util.CommandError( - "Using --sql with the revision command when " - "revision_environment is not configured does not make any sense" - ) - - if environment: - with EnvironmentContext( - config, - script_directory, - fn=retrieve_migrations, - as_sql=sql, - template_args=revision_context.template_args, - revision_context=revision_context, - ): - script_directory.run_env() - - # the revision_context now has MigrationScript structure(s) present. - # these could theoretically be further processed / rewritten *here*, - # in addition to the hooks present within each run_migrations() call, - # or at the end of env.py run_migrations_online(). - - scripts = [script for script in revision_context.generate_scripts()] - if len(scripts) == 1: - return scripts[0] - else: - return scripts - - -def merge(config, revisions, message=None, branch_label=None, rev_id=None): - """Merge two revisions together. Creates a new migration file. - - .. versionadded:: 0.7.0 - - :param config: a :class:`.Config` instance - - :param message: string message to apply to the revision - - :param branch_label: string label name to apply to the new revision - - :param rev_id: hardcoded revision identifier instead of generating a new - one. - - .. seealso:: - - :ref:`branches` - - """ - - script = ScriptDirectory.from_config(config) - template_args = { - "config": config # Let templates use config for - # e.g. multiple databases - } - return script.generate_revision( - rev_id or util.rev_id(), - message, - refresh=True, - head=revisions, - branch_labels=branch_label, - **template_args - ) - - -def upgrade(config, revision, sql=False, tag=None): - """Upgrade to a later version. - - :param config: a :class:`.Config` instance. - - :param revision: string revision target or range for --sql mode - - :param sql: if True, use ``--sql`` mode - - :param tag: an arbitrary "tag" that can be intercepted by custom - ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` - method. - - """ - - script = ScriptDirectory.from_config(config) - - starting_rev = None - if ":" in revision: - if not sql: - raise util.CommandError("Range revision not allowed") - starting_rev, revision = revision.split(":", 2) - - def upgrade(rev, context): - return script._upgrade_revs(revision, rev) - - with EnvironmentContext( - config, - script, - fn=upgrade, - as_sql=sql, - starting_rev=starting_rev, - destination_rev=revision, - tag=tag, - ): - script.run_env() - - -def downgrade(config, revision, sql=False, tag=None): - """Revert to a previous version. - - :param config: a :class:`.Config` instance. - - :param revision: string revision target or range for --sql mode - - :param sql: if True, use ``--sql`` mode - - :param tag: an arbitrary "tag" that can be intercepted by custom - ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` - method. - - """ - - script = ScriptDirectory.from_config(config) - starting_rev = None - if ":" in revision: - if not sql: - raise util.CommandError("Range revision not allowed") - starting_rev, revision = revision.split(":", 2) - elif sql: - raise util.CommandError( - "downgrade with --sql requires :" - ) - - def downgrade(rev, context): - return script._downgrade_revs(revision, rev) - - with EnvironmentContext( - config, - script, - fn=downgrade, - as_sql=sql, - starting_rev=starting_rev, - destination_rev=revision, - tag=tag, - ): - script.run_env() - - -def show(config, rev): - """Show the revision(s) denoted by the given symbol. - - :param config: a :class:`.Config` instance. - - :param revision: string revision target - - """ - - script = ScriptDirectory.from_config(config) - - if rev == "current": - - def show_current(rev, context): - for sc in script.get_revisions(rev): - config.print_stdout(sc.log_entry) - return [] - - with EnvironmentContext(config, script, fn=show_current): - script.run_env() - else: - for sc in script.get_revisions(rev): - config.print_stdout(sc.log_entry) - - -def history(config, rev_range=None, verbose=False, indicate_current=False): - """List changeset scripts in chronological order. - - :param config: a :class:`.Config` instance. - - :param rev_range: string revision range - - :param verbose: output in verbose mode. - - :param indicate_current: indicate current revision. - - ..versionadded:: 0.9.9 - - """ - - script = ScriptDirectory.from_config(config) - if rev_range is not None: - if ":" not in rev_range: - raise util.CommandError( - "History range requires [start]:[end], " "[start]:, or :[end]" - ) - base, head = rev_range.strip().split(":") - else: - base = head = None - - environment = ( - util.asbool(config.get_main_option("revision_environment")) - or indicate_current - ) - - def _display_history(config, script, base, head, currents=()): - for sc in script.walk_revisions( - base=base or "base", head=head or "heads" - ): - - if indicate_current: - sc._db_current_indicator = sc.revision in currents - - config.print_stdout( - sc.cmd_format( - verbose=verbose, - include_branches=True, - include_doc=True, - include_parents=True, - ) - ) - - def _display_history_w_current(config, script, base, head): - def _display_current_history(rev, context): - if head == "current": - _display_history(config, script, base, rev, rev) - elif base == "current": - _display_history(config, script, rev, head, rev) - else: - _display_history(config, script, base, head, rev) - return [] - - with EnvironmentContext(config, script, fn=_display_current_history): - script.run_env() - - if base == "current" or head == "current" or environment: - _display_history_w_current(config, script, base, head) - else: - _display_history(config, script, base, head) - - -def heads(config, verbose=False, resolve_dependencies=False): - """Show current available heads in the script directory. - - :param config: a :class:`.Config` instance. - - :param verbose: output in verbose mode. - - :param resolve_dependencies: treat dependency version as down revisions. - - """ - - script = ScriptDirectory.from_config(config) - if resolve_dependencies: - heads = script.get_revisions("heads") - else: - heads = script.get_revisions(script.get_heads()) - - for rev in heads: - config.print_stdout( - rev.cmd_format( - verbose, include_branches=True, tree_indicators=False - ) - ) - - -def branches(config, verbose=False): - """Show current branch points. - - :param config: a :class:`.Config` instance. - - :param verbose: output in verbose mode. - - """ - script = ScriptDirectory.from_config(config) - for sc in script.walk_revisions(): - if sc.is_branch_point: - config.print_stdout( - "%s\n%s\n", - sc.cmd_format(verbose, include_branches=True), - "\n".join( - "%s -> %s" - % ( - " " * len(str(sc.revision)), - rev_obj.cmd_format( - False, include_branches=True, include_doc=verbose - ), - ) - for rev_obj in ( - script.get_revision(rev) for rev in sc.nextrev - ) - ), - ) - - -def current(config, verbose=False, head_only=False): - """Display the current revision for a database. - - :param config: a :class:`.Config` instance. - - :param verbose: output in verbose mode. - - :param head_only: deprecated; use ``verbose`` for additional output. - - """ - - script = ScriptDirectory.from_config(config) - - if head_only: - util.warn("--head-only is deprecated", stacklevel=3) - - def display_version(rev, context): - if verbose: - config.print_stdout( - "Current revision(s) for %s:", - util.obfuscate_url_pw(context.connection.engine.url), - ) - for rev in script.get_all_current(rev): - config.print_stdout(rev.cmd_format(verbose)) - - return [] - - with EnvironmentContext(config, script, fn=display_version): - script.run_env() - - -def stamp(config, revision, sql=False, tag=None, purge=False): - """'stamp' the revision table with the given revision; don't - run any migrations. - - :param config: a :class:`.Config` instance. - - :param revision: target revision or list of revisions. May be a list - to indicate stamping of multiple branch heads. - - .. note:: this parameter is called "revisions" in the command line - interface. - - .. versionchanged:: 1.2 The revision may be a single revision or - list of revisions when stamping multiple branch heads. - - :param sql: use ``--sql`` mode - - :param tag: an arbitrary "tag" that can be intercepted by custom - ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument` - method. - - :param purge: delete all entries in the version table before stamping. - - .. versionadded:: 1.2 - - """ - - script = ScriptDirectory.from_config(config) - - if sql: - destination_revs = [] - starting_rev = None - for _revision in util.to_list(revision): - if ":" in _revision: - srev, _revision = _revision.split(":", 2) - - if starting_rev != srev: - if starting_rev is None: - starting_rev = srev - else: - raise util.CommandError( - "Stamp operation with --sql only supports a " - "single starting revision at a time" - ) - destination_revs.append(_revision) - else: - destination_revs = util.to_list(revision) - - def do_stamp(rev, context): - return script._stamp_revs(util.to_tuple(destination_revs), rev) - - with EnvironmentContext( - config, - script, - fn=do_stamp, - as_sql=sql, - starting_rev=starting_rev if sql else None, - destination_rev=util.to_tuple(destination_revs), - tag=tag, - purge=purge, - ): - script.run_env() - - -def edit(config, rev): - """Edit revision script(s) using $EDITOR. - - :param config: a :class:`.Config` instance. - - :param rev: target revision. - - """ - - script = ScriptDirectory.from_config(config) - - if rev == "current": - - def edit_current(rev, context): - if not rev: - raise util.CommandError("No current revisions") - for sc in script.get_revisions(rev): - util.edit(sc.path) - return [] - - with EnvironmentContext(config, script, fn=edit_current): - script.run_env() - else: - revs = script.get_revisions(rev) - if not revs: - raise util.CommandError( - "No revision files indicated by symbol '%s'" % rev - ) - for sc in revs: - util.edit(sc.path) diff --git a/venv/lib/python3.7/site-packages/alembic/config.py b/venv/lib/python3.7/site-packages/alembic/config.py deleted file mode 100644 index f2c6ef9..0000000 --- a/venv/lib/python3.7/site-packages/alembic/config.py +++ /dev/null @@ -1,581 +0,0 @@ -from argparse import ArgumentParser -import inspect -import os -import sys - -from . import command -from . import util -from .util import compat -from .util.compat import SafeConfigParser - - -class Config(object): - - r"""Represent an Alembic configuration. - - Within an ``env.py`` script, this is available - via the :attr:`.EnvironmentContext.config` attribute, - which in turn is available at ``alembic.context``:: - - from alembic import context - - some_param = context.config.get_main_option("my option") - - When invoking Alembic programatically, a new - :class:`.Config` can be created by passing - the name of an .ini file to the constructor:: - - from alembic.config import Config - alembic_cfg = Config("/path/to/yourapp/alembic.ini") - - With a :class:`.Config` object, you can then - run Alembic commands programmatically using the directives - in :mod:`alembic.command`. - - The :class:`.Config` object can also be constructed without - a filename. Values can be set programmatically, and - new sections will be created as needed:: - - from alembic.config import Config - alembic_cfg = Config() - alembic_cfg.set_main_option("script_location", "myapp:migrations") - alembic_cfg.set_main_option("sqlalchemy.url", "postgresql://foo/bar") - alembic_cfg.set_section_option("mysection", "foo", "bar") - - .. warning:: - - When using programmatic configuration, make sure the - ``env.py`` file in use is compatible with the target configuration; - including that the call to Python ``logging.fileConfig()`` is - omitted if the programmatic configuration doesn't actually include - logging directives. - - For passing non-string values to environments, such as connections and - engines, use the :attr:`.Config.attributes` dictionary:: - - with engine.begin() as connection: - alembic_cfg.attributes['connection'] = connection - command.upgrade(alembic_cfg, "head") - - :param file\_: name of the .ini file to open. - :param ini_section: name of the main Alembic section within the - .ini file - :param output_buffer: optional file-like input buffer which - will be passed to the :class:`.MigrationContext` - used to redirect - the output of "offline generation" when using Alembic programmatically. - :param stdout: buffer where the "print" output of commands will be sent. - Defaults to ``sys.stdout``. - - .. versionadded:: 0.4 - - :param config_args: A dictionary of keys and values that will be used - for substitution in the alembic config file. The dictionary as given - is **copied** to a new one, stored locally as the attribute - ``.config_args``. When the :attr:`.Config.file_config` attribute is - first invoked, the replacement variable ``here`` will be added to this - dictionary before the dictionary is passed to ``SafeConfigParser()`` - to parse the .ini file. - - .. versionadded:: 0.7.0 - - :param attributes: optional dictionary of arbitrary Python keys/values, - which will be populated into the :attr:`.Config.attributes` dictionary. - - .. versionadded:: 0.7.5 - - .. seealso:: - - :ref:`connection_sharing` - - """ - - def __init__( - self, - file_=None, - ini_section="alembic", - output_buffer=None, - stdout=sys.stdout, - cmd_opts=None, - config_args=util.immutabledict(), - attributes=None, - ): - """Construct a new :class:`.Config` - - """ - self.config_file_name = file_ - self.config_ini_section = ini_section - self.output_buffer = output_buffer - self.stdout = stdout - self.cmd_opts = cmd_opts - self.config_args = dict(config_args) - if attributes: - self.attributes.update(attributes) - - cmd_opts = None - """The command-line options passed to the ``alembic`` script. - - Within an ``env.py`` script this can be accessed via the - :attr:`.EnvironmentContext.config` attribute. - - .. versionadded:: 0.6.0 - - .. seealso:: - - :meth:`.EnvironmentContext.get_x_argument` - - """ - - config_file_name = None - """Filesystem path to the .ini file in use.""" - - config_ini_section = None - """Name of the config file section to read basic configuration - from. Defaults to ``alembic``, that is the ``[alembic]`` section - of the .ini file. This value is modified using the ``-n/--name`` - option to the Alembic runnier. - - """ - - @util.memoized_property - def attributes(self): - """A Python dictionary for storage of additional state. - - - This is a utility dictionary which can include not just strings but - engines, connections, schema objects, or anything else. - Use this to pass objects into an env.py script, such as passing - a :class:`sqlalchemy.engine.base.Connection` when calling - commands from :mod:`alembic.command` programmatically. - - .. versionadded:: 0.7.5 - - .. seealso:: - - :ref:`connection_sharing` - - :paramref:`.Config.attributes` - - """ - return {} - - def print_stdout(self, text, *arg): - """Render a message to standard out. - - When :meth:`.Config.print_stdout` is called with additional args - those arguments will formatted against the provided text, - otherwise we simply output the provided text verbatim. - - e.g.:: - - >>> config.print_stdout('Some text %s', 'arg') - Some Text arg - - """ - - if arg: - output = compat.text_type(text) % arg - else: - output = compat.text_type(text) - - util.write_outstream(self.stdout, output, "\n") - - @util.memoized_property - def file_config(self): - """Return the underlying ``ConfigParser`` object. - - Direct access to the .ini file is available here, - though the :meth:`.Config.get_section` and - :meth:`.Config.get_main_option` - methods provide a possibly simpler interface. - - """ - - if self.config_file_name: - here = os.path.abspath(os.path.dirname(self.config_file_name)) - else: - here = "" - self.config_args["here"] = here - file_config = SafeConfigParser(self.config_args) - if self.config_file_name: - file_config.read([self.config_file_name]) - else: - file_config.add_section(self.config_ini_section) - return file_config - - def get_template_directory(self): - """Return the directory where Alembic setup templates are found. - - This method is used by the alembic ``init`` and ``list_templates`` - commands. - - """ - import alembic - - package_dir = os.path.abspath(os.path.dirname(alembic.__file__)) - return os.path.join(package_dir, "templates") - - def get_section(self, name, default=None): - """Return all the configuration options from a given .ini file section - as a dictionary. - - """ - if not self.file_config.has_section(name): - return default - - return dict(self.file_config.items(name)) - - def set_main_option(self, name, value): - """Set an option programmatically within the 'main' section. - - This overrides whatever was in the .ini file. - - :param name: name of the value - - :param value: the value. Note that this value is passed to - ``ConfigParser.set``, which supports variable interpolation using - pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of - an interpolation symbol must therefore be escaped, e.g. ``%%``. - The given value may refer to another value already in the file - using the interpolation format. - - """ - self.set_section_option(self.config_ini_section, name, value) - - def remove_main_option(self, name): - self.file_config.remove_option(self.config_ini_section, name) - - def set_section_option(self, section, name, value): - """Set an option programmatically within the given section. - - The section is created if it doesn't exist already. - The value here will override whatever was in the .ini - file. - - :param section: name of the section - - :param name: name of the value - - :param value: the value. Note that this value is passed to - ``ConfigParser.set``, which supports variable interpolation using - pyformat (e.g. ``%(some_value)s``). A raw percent sign not part of - an interpolation symbol must therefore be escaped, e.g. ``%%``. - The given value may refer to another value already in the file - using the interpolation format. - - """ - - if not self.file_config.has_section(section): - self.file_config.add_section(section) - self.file_config.set(section, name, value) - - def get_section_option(self, section, name, default=None): - """Return an option from the given section of the .ini file. - - """ - if not self.file_config.has_section(section): - raise util.CommandError( - "No config file %r found, or file has no " - "'[%s]' section" % (self.config_file_name, section) - ) - if self.file_config.has_option(section, name): - return self.file_config.get(section, name) - else: - return default - - def get_main_option(self, name, default=None): - """Return an option from the 'main' section of the .ini file. - - This defaults to being a key from the ``[alembic]`` - section, unless the ``-n/--name`` flag were used to - indicate a different section. - - """ - return self.get_section_option(self.config_ini_section, name, default) - - -class CommandLine(object): - def __init__(self, prog=None): - self._generate_args(prog) - - def _generate_args(self, prog): - def add_options(fn, parser, positional, kwargs): - kwargs_opts = { - "template": ( - "-t", - "--template", - dict( - default="generic", - type=str, - help="Setup template for use with 'init'", - ), - ), - "message": ( - "-m", - "--message", - dict( - type=str, help="Message string to use with 'revision'" - ), - ), - "sql": ( - "--sql", - dict( - action="store_true", - help="Don't emit SQL to database - dump to " - "standard output/file instead. See docs on " - "offline mode.", - ), - ), - "tag": ( - "--tag", - dict( - type=str, - help="Arbitrary 'tag' name - can be used by " - "custom env.py scripts.", - ), - ), - "head": ( - "--head", - dict( - type=str, - help="Specify head revision or @head " - "to base new revision on.", - ), - ), - "splice": ( - "--splice", - dict( - action="store_true", - help="Allow a non-head revision as the " - "'head' to splice onto", - ), - ), - "depends_on": ( - "--depends-on", - dict( - action="append", - help="Specify one or more revision identifiers " - "which this revision should depend on.", - ), - ), - "rev_id": ( - "--rev-id", - dict( - type=str, - help="Specify a hardcoded revision id instead of " - "generating one", - ), - ), - "version_path": ( - "--version-path", - dict( - type=str, - help="Specify specific path from config for " - "version file", - ), - ), - "branch_label": ( - "--branch-label", - dict( - type=str, - help="Specify a branch label to apply to the " - "new revision", - ), - ), - "verbose": ( - "-v", - "--verbose", - dict(action="store_true", help="Use more verbose output"), - ), - "resolve_dependencies": ( - "--resolve-dependencies", - dict( - action="store_true", - help="Treat dependency versions as down revisions", - ), - ), - "autogenerate": ( - "--autogenerate", - dict( - action="store_true", - help="Populate revision script with candidate " - "migration operations, based on comparison " - "of database to model.", - ), - ), - "head_only": ( - "--head-only", - dict( - action="store_true", - help="Deprecated. Use --verbose for " - "additional output", - ), - ), - "rev_range": ( - "-r", - "--rev-range", - dict( - action="store", - help="Specify a revision range; " - "format is [start]:[end]", - ), - ), - "indicate_current": ( - "-i", - "--indicate-current", - dict( - action="store_true", - help="Indicate the current revision", - ), - ), - "purge": ( - "--purge", - dict( - action="store_true", - help="Unconditionally erase the version table " - "before stamping", - ), - ), - "package": ( - "--package", - dict( - action="store_true", - help="Write empty __init__.py files to the " - "environment and version locations", - ), - ), - } - positional_help = { - "directory": "location of scripts directory", - "revision": "revision identifier", - "revisions": "one or more revisions, or 'heads' for all heads", - } - for arg in kwargs: - if arg in kwargs_opts: - args = kwargs_opts[arg] - args, kw = args[0:-1], args[-1] - parser.add_argument(*args, **kw) - - for arg in positional: - if ( - arg == "revisions" - or fn in positional_translations - and positional_translations[fn][arg] == "revisions" - ): - subparser.add_argument( - "revisions", - nargs="+", - help=positional_help.get("revisions"), - ) - else: - subparser.add_argument(arg, help=positional_help.get(arg)) - - parser = ArgumentParser(prog=prog) - - parser.add_argument( - "-c", - "--config", - type=str, - default=os.environ.get("ALEMBIC_CONFIG", "alembic.ini"), - help="Alternate config file; defaults to value of " - 'ALEMBIC_CONFIG environment variable, or "alembic.ini"', - ) - parser.add_argument( - "-n", - "--name", - type=str, - default="alembic", - help="Name of section in .ini file to " "use for Alembic config", - ) - parser.add_argument( - "-x", - action="append", - help="Additional arguments consumed by " - "custom env.py scripts, e.g. -x " - "setting1=somesetting -x setting2=somesetting", - ) - parser.add_argument( - "--raiseerr", - action="store_true", - help="Raise a full stack trace on error", - ) - subparsers = parser.add_subparsers() - - positional_translations = {command.stamp: {"revision": "revisions"}} - - for fn in [getattr(command, n) for n in dir(command)]: - if ( - inspect.isfunction(fn) - and fn.__name__[0] != "_" - and fn.__module__ == "alembic.command" - ): - - spec = compat.inspect_getargspec(fn) - if spec[3]: - positional = spec[0][1 : -len(spec[3])] - kwarg = spec[0][-len(spec[3]) :] - else: - positional = spec[0][1:] - kwarg = [] - - if fn in positional_translations: - positional = [ - positional_translations[fn].get(name, name) - for name in positional - ] - - # parse first line(s) of helptext without a line break - help_ = fn.__doc__ - if help_: - help_text = [] - for line in help_.split("\n"): - if not line.strip(): - break - else: - help_text.append(line.strip()) - else: - help_text = "" - subparser = subparsers.add_parser( - fn.__name__, help=" ".join(help_text) - ) - add_options(fn, subparser, positional, kwarg) - subparser.set_defaults(cmd=(fn, positional, kwarg)) - self.parser = parser - - def run_cmd(self, config, options): - fn, positional, kwarg = options.cmd - - try: - fn( - config, - *[getattr(options, k, None) for k in positional], - **dict((k, getattr(options, k, None)) for k in kwarg) - ) - except util.CommandError as e: - if options.raiseerr: - raise - else: - util.err(str(e)) - - def main(self, argv=None): - options = self.parser.parse_args(argv) - if not hasattr(options, "cmd"): - # see http://bugs.python.org/issue9253, argparse - # behavior changed incompatibly in py3.3 - self.parser.error("too few arguments") - else: - cfg = Config( - file_=options.config, - ini_section=options.name, - cmd_opts=options, - ) - self.run_cmd(cfg, options) - - -def main(argv=None, prog=None, **kwargs): - """The console runner function for Alembic.""" - - CommandLine(prog=prog).main(argv=argv) - - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/alembic/context.py b/venv/lib/python3.7/site-packages/alembic/context.py deleted file mode 100644 index 758fca8..0000000 --- a/venv/lib/python3.7/site-packages/alembic/context.py +++ /dev/null @@ -1,5 +0,0 @@ -from .runtime.environment import EnvironmentContext - -# create proxy functions for -# each method on the EnvironmentContext class. -EnvironmentContext.create_module_class_proxy(globals(), locals()) diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__init__.py b/venv/lib/python3.7/site-packages/alembic/ddl/__init__.py deleted file mode 100644 index 7d50ba0..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from . import mssql # noqa -from . import mysql # noqa -from . import oracle # noqa -from . import postgresql # noqa -from . import sqlite # noqa -from .impl import DefaultImpl # noqa diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 95ed311..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/base.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/base.cpython-37.pyc deleted file mode 100644 index cfc480c..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/base.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/impl.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/impl.cpython-37.pyc deleted file mode 100644 index afcf940..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/impl.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/mssql.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/mssql.cpython-37.pyc deleted file mode 100644 index 407a60b..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/mssql.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/mysql.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/mysql.cpython-37.pyc deleted file mode 100644 index 67f709b..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/mysql.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/oracle.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/oracle.cpython-37.pyc deleted file mode 100644 index 6fd6407..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/oracle.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/postgresql.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/postgresql.cpython-37.pyc deleted file mode 100644 index 78026ae..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/postgresql.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/sqlite.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/sqlite.cpython-37.pyc deleted file mode 100644 index 955ed75..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/ddl/__pycache__/sqlite.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/base.py b/venv/lib/python3.7/site-packages/alembic/ddl/base.py deleted file mode 100644 index b8d9dce..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/base.py +++ /dev/null @@ -1,227 +0,0 @@ -import functools - -from sqlalchemy import exc -from sqlalchemy import Integer -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.schema import Column -from sqlalchemy.schema import DDLElement -from sqlalchemy.sql.elements import quoted_name - -from ..util import sqla_compat -from ..util.sqla_compat import _columns_for_constraint # noqa -from ..util.sqla_compat import _find_columns # noqa -from ..util.sqla_compat import _fk_spec # noqa -from ..util.sqla_compat import _is_type_bound # noqa -from ..util.sqla_compat import _table_for_constraint # noqa - - -class AlterTable(DDLElement): - - """Represent an ALTER TABLE statement. - - Only the string name and optional schema name of the table - is required, not a full Table object. - - """ - - def __init__(self, table_name, schema=None): - self.table_name = table_name - self.schema = schema - - -class RenameTable(AlterTable): - def __init__(self, old_table_name, new_table_name, schema=None): - super(RenameTable, self).__init__(old_table_name, schema=schema) - self.new_table_name = new_table_name - - -class AlterColumn(AlterTable): - def __init__( - self, - name, - column_name, - schema=None, - existing_type=None, - existing_nullable=None, - existing_server_default=None, - existing_comment=None, - ): - super(AlterColumn, self).__init__(name, schema=schema) - self.column_name = column_name - self.existing_type = ( - sqltypes.to_instance(existing_type) - if existing_type is not None - else None - ) - self.existing_nullable = existing_nullable - self.existing_server_default = existing_server_default - self.existing_comment = existing_comment - - -class ColumnNullable(AlterColumn): - def __init__(self, name, column_name, nullable, **kw): - super(ColumnNullable, self).__init__(name, column_name, **kw) - self.nullable = nullable - - -class ColumnType(AlterColumn): - def __init__(self, name, column_name, type_, **kw): - super(ColumnType, self).__init__(name, column_name, **kw) - self.type_ = sqltypes.to_instance(type_) - - -class ColumnName(AlterColumn): - def __init__(self, name, column_name, newname, **kw): - super(ColumnName, self).__init__(name, column_name, **kw) - self.newname = newname - - -class ColumnDefault(AlterColumn): - def __init__(self, name, column_name, default, **kw): - super(ColumnDefault, self).__init__(name, column_name, **kw) - self.default = default - - -class AddColumn(AlterTable): - def __init__(self, name, column, schema=None): - super(AddColumn, self).__init__(name, schema=schema) - self.column = column - - -class DropColumn(AlterTable): - def __init__(self, name, column, schema=None): - super(DropColumn, self).__init__(name, schema=schema) - self.column = column - - -class ColumnComment(AlterColumn): - def __init__(self, name, column_name, comment, **kw): - super(ColumnComment, self).__init__(name, column_name, **kw) - self.comment = comment - - -@compiles(RenameTable) -def visit_rename_table(element, compiler, **kw): - return "%s RENAME TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, element.schema), - ) - - -@compiles(AddColumn) -def visit_add_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - add_column(compiler, element.column, **kw), - ) - - -@compiles(DropColumn) -def visit_drop_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - drop_column(compiler, element.column.name, **kw), - ) - - -@compiles(ColumnNullable) -def visit_column_nullable(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "DROP NOT NULL" if element.nullable else "SET NOT NULL", - ) - - -@compiles(ColumnType) -def visit_column_type(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "TYPE %s" % format_type(compiler, element.type_), - ) - - -@compiles(ColumnName) -def visit_column_name(element, compiler, **kw): - return "%s RENAME %s TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - ) - - -@compiles(ColumnDefault) -def visit_column_default(element, compiler, **kw): - if sqla_compat.has_computed and ( - isinstance(element.default, sqla_compat.Computed) - or isinstance(element.existing_server_default, sqla_compat.Computed) - ): - raise exc.CompileError( - 'Adding or removing a "computed" construct, e.g. GENERATED ' - "ALWAYS AS, to or from an existing column is not supported." - ) - - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "SET DEFAULT %s" % format_server_default(compiler, element.default) - if element.default is not None - else "DROP DEFAULT", - ) - - -def quote_dotted(name, quote): - """quote the elements of a dotted name""" - - if isinstance(name, quoted_name): - return quote(name) - result = ".".join([quote(x) for x in name.split(".")]) - return result - - -def format_table_name(compiler, name, schema): - quote = functools.partial(compiler.preparer.quote) - if schema: - return quote_dotted(schema, quote) + "." + quote(name) - else: - return quote(name) - - -def format_column_name(compiler, name): - return compiler.preparer.quote(name) - - -def format_server_default(compiler, default): - return compiler.get_column_default_string( - Column("x", Integer, server_default=default) - ) - - -def format_type(compiler, type_): - return compiler.dialect.type_compiler.process(type_) - - -def alter_table(compiler, name, schema): - return "ALTER TABLE %s" % format_table_name(compiler, name, schema) - - -def drop_column(compiler, name): - return "DROP COLUMN %s" % format_column_name(compiler, name) - - -def alter_column(compiler, name): - return "ALTER COLUMN %s" % format_column_name(compiler, name) - - -def add_column(compiler, column, **kw): - text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) - - const = " ".join( - compiler.process(constraint) for constraint in column.constraints - ) - if const: - text += " " + const - - return text diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/impl.py b/venv/lib/python3.7/site-packages/alembic/ddl/impl.py deleted file mode 100644 index 9df2a72..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/impl.py +++ /dev/null @@ -1,509 +0,0 @@ -from collections import namedtuple -import re - -from sqlalchemy import schema -from sqlalchemy import text - -from . import base -from .. import util -from ..util import sqla_compat -from ..util.compat import string_types -from ..util.compat import text_type -from ..util.compat import with_metaclass - - -class ImplMeta(type): - def __init__(cls, classname, bases, dict_): - newtype = type.__init__(cls, classname, bases, dict_) - if "__dialect__" in dict_: - _impls[dict_["__dialect__"]] = cls - return newtype - - -_impls = {} - -Params = namedtuple("Params", ["token0", "tokens", "args", "kwargs"]) - - -class DefaultImpl(with_metaclass(ImplMeta)): - - """Provide the entrypoint for major migration operations, - including database-specific behavioral variances. - - While individual SQL/DDL constructs already provide - for database-specific implementations, variances here - allow for entirely different sequences of operations - to take place for a particular migration, such as - SQL Server's special 'IDENTITY INSERT' step for - bulk inserts. - - """ - - __dialect__ = "default" - - transactional_ddl = False - command_terminator = ";" - type_synonyms = ({"NUMERIC", "DECIMAL"},) - type_arg_extract = () - - def __init__( - self, - dialect, - connection, - as_sql, - transactional_ddl, - output_buffer, - context_opts, - ): - self.dialect = dialect - self.connection = connection - self.as_sql = as_sql - self.literal_binds = context_opts.get("literal_binds", False) - - self.output_buffer = output_buffer - self.memo = {} - self.context_opts = context_opts - if transactional_ddl is not None: - self.transactional_ddl = transactional_ddl - - if self.literal_binds: - if not self.as_sql: - raise util.CommandError( - "Can't use literal_binds setting without as_sql mode" - ) - - @classmethod - def get_by_dialect(cls, dialect): - return _impls[dialect.name] - - def static_output(self, text): - self.output_buffer.write(text_type(text + "\n\n")) - self.output_buffer.flush() - - def requires_recreate_in_batch(self, batch_op): - """Return True if the given :class:`.BatchOperationsImpl` - would need the table to be recreated and copied in order to - proceed. - - Normally, only returns True on SQLite when operations other - than add_column are present. - - """ - return False - - def prep_table_for_batch(self, table): - """perform any operations needed on a table before a new - one is created to replace it in batch mode. - - the PG dialect uses this to drop constraints on the table - before the new one uses those same names. - - """ - - @property - def bind(self): - return self.connection - - def _exec( - self, - construct, - execution_options=None, - multiparams=(), - params=util.immutabledict(), - ): - if isinstance(construct, string_types): - construct = text(construct) - if self.as_sql: - if multiparams or params: - # TODO: coverage - raise Exception("Execution arguments not allowed with as_sql") - - if self.literal_binds and not isinstance( - construct, schema.DDLElement - ): - compile_kw = dict(compile_kwargs={"literal_binds": True}) - else: - compile_kw = {} - - self.static_output( - text_type( - construct.compile(dialect=self.dialect, **compile_kw) - ) - .replace("\t", " ") - .strip() - + self.command_terminator - ) - else: - conn = self.connection - if execution_options: - conn = conn.execution_options(**execution_options) - return conn.execute(construct, *multiparams, **params) - - def execute(self, sql, execution_options=None): - self._exec(sql, execution_options) - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - autoincrement=None, - comment=False, - existing_comment=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - existing_autoincrement=None, - ): - if autoincrement is not None or existing_autoincrement is not None: - util.warn( - "autoincrement and existing_autoincrement " - "only make sense for MySQL", - stacklevel=3, - ) - if nullable is not None: - self._exec( - base.ColumnNullable( - table_name, - column_name, - nullable, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - if server_default is not False: - self._exec( - base.ColumnDefault( - table_name, - column_name, - server_default, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - if type_ is not None: - self._exec( - base.ColumnType( - table_name, - column_name, - type_, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - - if comment is not False: - self._exec( - base.ColumnComment( - table_name, - column_name, - comment, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - ) - ) - - # do the new name last ;) - if name is not None: - self._exec( - base.ColumnName( - table_name, - column_name, - name, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - ) - ) - - def add_column(self, table_name, column, schema=None): - self._exec(base.AddColumn(table_name, column, schema=schema)) - - def drop_column(self, table_name, column, schema=None, **kw): - self._exec(base.DropColumn(table_name, column, schema=schema)) - - def add_constraint(self, const): - if const._create_rule is None or const._create_rule(self): - self._exec(schema.AddConstraint(const)) - - def drop_constraint(self, const): - self._exec(schema.DropConstraint(const)) - - def rename_table(self, old_table_name, new_table_name, schema=None): - self._exec( - base.RenameTable(old_table_name, new_table_name, schema=schema) - ) - - def create_table(self, table): - table.dispatch.before_create( - table, self.connection, checkfirst=False, _ddl_runner=self - ) - self._exec(schema.CreateTable(table)) - table.dispatch.after_create( - table, self.connection, checkfirst=False, _ddl_runner=self - ) - for index in table.indexes: - self._exec(schema.CreateIndex(index)) - - with_comment = ( - sqla_compat._dialect_supports_comments(self.dialect) - and not self.dialect.inline_comments - ) - comment = sqla_compat._comment_attribute(table) - if comment and with_comment: - self.create_table_comment(table) - - for column in table.columns: - comment = sqla_compat._comment_attribute(column) - if comment and with_comment: - self.create_column_comment(column) - - def drop_table(self, table): - self._exec(schema.DropTable(table)) - - def create_index(self, index): - self._exec(schema.CreateIndex(index)) - - def create_table_comment(self, table): - self._exec(schema.SetTableComment(table)) - - def drop_table_comment(self, table): - self._exec(schema.DropTableComment(table)) - - def create_column_comment(self, column): - self._exec(schema.SetColumnComment(column)) - - def drop_index(self, index): - self._exec(schema.DropIndex(index)) - - def bulk_insert(self, table, rows, multiinsert=True): - if not isinstance(rows, list): - raise TypeError("List expected") - elif rows and not isinstance(rows[0], dict): - raise TypeError("List of dictionaries expected") - if self.as_sql: - for row in rows: - self._exec( - table.insert(inline=True).values( - **dict( - ( - k, - sqla_compat._literal_bindparam( - k, v, type_=table.c[k].type - ) - if not isinstance( - v, sqla_compat._literal_bindparam - ) - else v, - ) - for k, v in row.items() - ) - ) - ) - else: - # work around http://www.sqlalchemy.org/trac/ticket/2461 - if not hasattr(table, "_autoincrement_column"): - table._autoincrement_column = None - if rows: - if multiinsert: - self._exec(table.insert(inline=True), multiparams=rows) - else: - for row in rows: - self._exec(table.insert(inline=True).values(**row)) - - def _tokenize_column_type(self, column): - definition = self.dialect.type_compiler.process(column.type).lower() - - # tokenize the SQLAlchemy-generated version of a type, so that - # the two can be compared. - # - # examples: - # NUMERIC(10, 5) - # TIMESTAMP WITH TIMEZONE - # INTEGER UNSIGNED - # INTEGER (10) UNSIGNED - # INTEGER(10) UNSIGNED - # varchar character set utf8 - # - - tokens = re.findall(r"[\w\-_]+|\(.+?\)", definition) - - term_tokens = [] - paren_term = None - - for token in tokens: - if re.match(r"^\(.*\)$", token): - paren_term = token - else: - term_tokens.append(token) - - params = Params(term_tokens[0], term_tokens[1:], [], {}) - - if paren_term: - for term in re.findall("[^(),]+", paren_term): - if "=" in term: - key, val = term.split("=") - params.kwargs[key.strip()] = val.strip() - else: - params.args.append(term.strip()) - - return params - - def _column_types_match(self, inspector_params, metadata_params): - if inspector_params.token0 == metadata_params.token0: - return True - - synonyms = [{t.lower() for t in batch} for batch in self.type_synonyms] - inspector_all_terms = " ".join( - [inspector_params.token0] + inspector_params.tokens - ) - metadata_all_terms = " ".join( - [metadata_params.token0] + metadata_params.tokens - ) - - for batch in synonyms: - if {inspector_all_terms, metadata_all_terms}.issubset(batch) or { - inspector_params.token0, - metadata_params.token0, - }.issubset(batch): - return True - return False - - def _column_args_match(self, inspected_params, meta_params): - """We want to compare column parameters. However, we only want - to compare parameters that are set. If they both have `collation`, - we want to make sure they are the same. However, if only one - specifies it, dont flag it for being less specific - """ - - if ( - len(meta_params.tokens) == len(inspected_params.tokens) - and meta_params.tokens != inspected_params.tokens - ): - return False - - if ( - len(meta_params.args) == len(inspected_params.args) - and meta_params.args != inspected_params.args - ): - return False - - insp = " ".join(inspected_params.tokens).lower() - meta = " ".join(meta_params.tokens).lower() - - for reg in self.type_arg_extract: - mi = re.search(reg, insp) - mm = re.search(reg, meta) - - if mi and mm and mi.group(1) != mm.group(1): - return False - - return True - - def compare_type(self, inspector_column, metadata_column): - """Returns True if there ARE differences between the types of the two - columns. Takes impl.type_synonyms into account between retrospected - and metadata types - """ - inspector_params = self._tokenize_column_type(inspector_column) - metadata_params = self._tokenize_column_type(metadata_column) - - if not self._column_types_match(inspector_params, metadata_params,): - return True - if not self._column_args_match(inspector_params, metadata_params): - return True - return False - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - return rendered_inspector_default != rendered_metadata_default - - def correct_for_autogen_constraints( - self, - conn_uniques, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - pass - - def render_ddl_sql_expr(self, expr, is_server_default=False, **kw): - """Render a SQL expression that is typically a server default, - index expression, etc. - - .. versionadded:: 1.0.11 - - """ - - compile_kw = dict( - compile_kwargs={"literal_binds": True, "include_table": False} - ) - return text_type(expr.compile(dialect=self.dialect, **compile_kw)) - - def _compat_autogen_column_reflect(self, inspector): - return self.autogen_column_reflect - - def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks): - pass - - def autogen_column_reflect(self, inspector, table, column_info): - """A hook that is attached to the 'column_reflect' event for when - a Table is reflected from the database during the autogenerate - process. - - Dialects can elect to modify the information gathered here. - - """ - - def start_migrations(self): - """A hook called when :meth:`.EnvironmentContext.run_migrations` - is called. - - Implementations can set up per-migration-run state here. - - """ - - def emit_begin(self): - """Emit the string ``BEGIN``, or the backend-specific - equivalent, on the current connection context. - - This is used in offline mode and typically - via :meth:`.EnvironmentContext.begin_transaction`. - - """ - self.static_output("BEGIN" + self.command_terminator) - - def emit_commit(self): - """Emit the string ``COMMIT``, or the backend-specific - equivalent, on the current connection context. - - This is used in offline mode and typically - via :meth:`.EnvironmentContext.begin_transaction`. - - """ - self.static_output("COMMIT" + self.command_terminator) - - def render_type(self, type_obj, autogen_context): - return False diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/mssql.py b/venv/lib/python3.7/site-packages/alembic/ddl/mssql.py deleted file mode 100644 index 78a7eb6..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/mssql.py +++ /dev/null @@ -1,273 +0,0 @@ -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.schema import Column -from sqlalchemy.schema import CreateIndex -from sqlalchemy.sql.expression import ClauseElement -from sqlalchemy.sql.expression import Executable - -from .base import AddColumn -from .base import alter_column -from .base import alter_table -from .base import ColumnDefault -from .base import ColumnName -from .base import ColumnNullable -from .base import ColumnType -from .base import format_column_name -from .base import format_server_default -from .base import format_table_name -from .base import format_type -from .base import RenameTable -from .impl import DefaultImpl -from .. import util - - -class MSSQLImpl(DefaultImpl): - __dialect__ = "mssql" - transactional_ddl = True - batch_separator = "GO" - - type_synonyms = DefaultImpl.type_synonyms + ({"VARCHAR", "NVARCHAR"},) - - def __init__(self, *arg, **kw): - super(MSSQLImpl, self).__init__(*arg, **kw) - self.batch_separator = self.context_opts.get( - "mssql_batch_separator", self.batch_separator - ) - - def _exec(self, construct, *args, **kw): - result = super(MSSQLImpl, self)._exec(construct, *args, **kw) - if self.as_sql and self.batch_separator: - self.static_output(self.batch_separator) - return result - - def emit_begin(self): - self.static_output("BEGIN TRANSACTION" + self.command_terminator) - - def emit_commit(self): - super(MSSQLImpl, self).emit_commit() - if self.as_sql and self.batch_separator: - self.static_output(self.batch_separator) - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - **kw - ): - - if nullable is not None and existing_type is None: - if type_ is not None: - existing_type = type_ - # the NULL/NOT NULL alter will handle - # the type alteration - type_ = None - else: - raise util.CommandError( - "MS-SQL ALTER COLUMN operations " - "with NULL or NOT NULL require the " - "existing_type or a new type_ be passed." - ) - - super(MSSQLImpl, self).alter_column( - table_name, - column_name, - nullable=nullable, - type_=type_, - schema=schema, - existing_type=existing_type, - existing_nullable=existing_nullable, - **kw - ) - - if server_default is not False: - if existing_server_default is not False or server_default is None: - self._exec( - _ExecDropConstraint( - table_name, - column_name, - "sys.default_constraints", - schema, - ) - ) - if server_default is not None: - super(MSSQLImpl, self).alter_column( - table_name, - column_name, - schema=schema, - server_default=server_default, - ) - - if name is not None: - super(MSSQLImpl, self).alter_column( - table_name, column_name, schema=schema, name=name - ) - - def create_index(self, index): - # this likely defaults to None if not present, so get() - # should normally not return the default value. being - # defensive in any case - mssql_include = index.kwargs.get("mssql_include", None) or () - for col in mssql_include: - if col not in index.table.c: - index.table.append_column(Column(col, sqltypes.NullType)) - self._exec(CreateIndex(index)) - - def bulk_insert(self, table, rows, **kw): - if self.as_sql: - self._exec( - "SET IDENTITY_INSERT %s ON" - % self.dialect.identifier_preparer.format_table(table) - ) - super(MSSQLImpl, self).bulk_insert(table, rows, **kw) - self._exec( - "SET IDENTITY_INSERT %s OFF" - % self.dialect.identifier_preparer.format_table(table) - ) - else: - super(MSSQLImpl, self).bulk_insert(table, rows, **kw) - - def drop_column(self, table_name, column, schema=None, **kw): - drop_default = kw.pop("mssql_drop_default", False) - if drop_default: - self._exec( - _ExecDropConstraint( - table_name, column, "sys.default_constraints", schema - ) - ) - drop_check = kw.pop("mssql_drop_check", False) - if drop_check: - self._exec( - _ExecDropConstraint( - table_name, column, "sys.check_constraints", schema - ) - ) - drop_fks = kw.pop("mssql_drop_foreign_key", False) - if drop_fks: - self._exec(_ExecDropFKConstraint(table_name, column, schema)) - super(MSSQLImpl, self).drop_column( - table_name, column, schema=schema, **kw - ) - - -class _ExecDropConstraint(Executable, ClauseElement): - def __init__(self, tname, colname, type_, schema): - self.tname = tname - self.colname = colname - self.type_ = type_ - self.schema = schema - - -class _ExecDropFKConstraint(Executable, ClauseElement): - def __init__(self, tname, colname, schema): - self.tname = tname - self.colname = colname - self.schema = schema - - -@compiles(_ExecDropConstraint, "mssql") -def _exec_drop_col_constraint(element, compiler, **kw): - schema, tname, colname, type_ = ( - element.schema, - element.tname, - element.colname, - element.type_, - ) - # from http://www.mssqltips.com/sqlservertip/1425/\ - # working-with-default-constraints-in-sql-server/ - # TODO: needs table formatting, etc. - return """declare @const_name varchar(256) -select @const_name = [name] from %(type)s -where parent_object_id = object_id('%(schema_dot)s%(tname)s') -and col_name(parent_object_id, parent_column_id) = '%(colname)s' -exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { - "type": type_, - "tname": tname, - "colname": colname, - "tname_quoted": format_table_name(compiler, tname, schema), - "schema_dot": schema + "." if schema else "", - } - - -@compiles(_ExecDropFKConstraint, "mssql") -def _exec_drop_col_fk_constraint(element, compiler, **kw): - schema, tname, colname = element.schema, element.tname, element.colname - - return """declare @const_name varchar(256) -select @const_name = [name] from -sys.foreign_keys fk join sys.foreign_key_columns fkc -on fk.object_id=fkc.constraint_object_id -where fkc.parent_object_id = object_id('%(schema_dot)s%(tname)s') -`and col_name(fkc.parent_object_id, fkc.parent_column_id) = '%(colname)s' -exec('alter table %(tname_quoted)s drop constraint ' + @const_name)""" % { - "tname": tname, - "colname": colname, - "tname_quoted": format_table_name(compiler, tname, schema), - "schema_dot": schema + "." if schema else "", - } - - -@compiles(AddColumn, "mssql") -def visit_add_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - mssql_add_column(compiler, element.column, **kw), - ) - - -def mssql_add_column(compiler, column, **kw): - return "ADD %s" % compiler.get_column_specification(column, **kw) - - -@compiles(ColumnNullable, "mssql") -def visit_column_nullable(element, compiler, **kw): - return "%s %s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - format_type(compiler, element.existing_type), - "NULL" if element.nullable else "NOT NULL", - ) - - -@compiles(ColumnDefault, "mssql") -def visit_column_default(element, compiler, **kw): - # TODO: there can also be a named constraint - # with ADD CONSTRAINT here - return "%s ADD DEFAULT %s FOR %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_server_default(compiler, element.default), - format_column_name(compiler, element.column_name), - ) - - -@compiles(ColumnName, "mssql") -def visit_rename_column(element, compiler, **kw): - return "EXEC sp_rename '%s.%s', %s, 'COLUMN'" % ( - format_table_name(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - ) - - -@compiles(ColumnType, "mssql") -def visit_column_type(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - format_type(compiler, element.type_), - ) - - -@compiles(RenameTable, "mssql") -def visit_rename_table(element, compiler, **kw): - return "EXEC sp_rename '%s', %s" % ( - format_table_name(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, None), - ) diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/mysql.py b/venv/lib/python3.7/site-packages/alembic/ddl/mysql.py deleted file mode 100644 index 02ce253..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/mysql.py +++ /dev/null @@ -1,400 +0,0 @@ -import re - -from sqlalchemy import schema -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles - -from .base import alter_table -from .base import AlterColumn -from .base import ColumnDefault -from .base import ColumnName -from .base import ColumnNullable -from .base import ColumnType -from .base import format_column_name -from .base import format_server_default -from .impl import DefaultImpl -from .. import util -from ..autogenerate import compare -from ..util.compat import string_types -from ..util.sqla_compat import _is_mariadb -from ..util.sqla_compat import _is_type_bound - - -class MySQLImpl(DefaultImpl): - __dialect__ = "mysql" - - transactional_ddl = False - type_synonyms = DefaultImpl.type_synonyms + ({"BOOL", "TINYINT"},) - type_arg_extract = [r"character set ([\w\-_]+)", r"collate ([\w\-_]+)"] - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - autoincrement=None, - existing_autoincrement=None, - comment=False, - existing_comment=None, - **kw - ): - if name is not None or self._is_mysql_allowed_functional_default( - type_ if type_ is not None else existing_type, server_default - ): - self._exec( - MySQLChangeColumn( - table_name, - column_name, - schema=schema, - newname=name if name is not None else column_name, - nullable=nullable - if nullable is not None - else existing_nullable - if existing_nullable is not None - else True, - type_=type_ if type_ is not None else existing_type, - default=server_default - if server_default is not False - else existing_server_default, - autoincrement=autoincrement - if autoincrement is not None - else existing_autoincrement, - comment=comment - if comment is not False - else existing_comment, - ) - ) - elif ( - nullable is not None - or type_ is not None - or autoincrement is not None - or comment is not False - ): - self._exec( - MySQLModifyColumn( - table_name, - column_name, - schema=schema, - newname=name if name is not None else column_name, - nullable=nullable - if nullable is not None - else existing_nullable - if existing_nullable is not None - else True, - type_=type_ if type_ is not None else existing_type, - default=server_default - if server_default is not False - else existing_server_default, - autoincrement=autoincrement - if autoincrement is not None - else existing_autoincrement, - comment=comment - if comment is not False - else existing_comment, - ) - ) - elif server_default is not False: - self._exec( - MySQLAlterDefault( - table_name, column_name, server_default, schema=schema - ) - ) - - def drop_constraint(self, const): - if isinstance(const, schema.CheckConstraint) and _is_type_bound(const): - return - - super(MySQLImpl, self).drop_constraint(const) - - def _is_mysql_allowed_functional_default(self, type_, server_default): - return ( - type_ is not None - and type_._type_affinity is sqltypes.DateTime - and server_default is not None - ) - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - # partially a workaround for SQLAlchemy issue #3023; if the - # column were created without "NOT NULL", MySQL may have added - # an implicit default of '0' which we need to skip - # TODO: this is not really covered anymore ? - if ( - metadata_column.type._type_affinity is sqltypes.Integer - and inspector_column.primary_key - and not inspector_column.autoincrement - and not rendered_metadata_default - and rendered_inspector_default == "'0'" - ): - return False - elif inspector_column.type._type_affinity is sqltypes.Integer: - rendered_inspector_default = ( - re.sub(r"^'|'$", "", rendered_inspector_default) - if rendered_inspector_default is not None - else None - ) - return rendered_inspector_default != rendered_metadata_default - elif rendered_inspector_default and rendered_metadata_default: - # adjust for "function()" vs. "FUNCTION" as can occur particularly - # for the CURRENT_TIMESTAMP function on newer MariaDB versions - - # SQLAlchemy MySQL dialect bundles ON UPDATE into the server - # default; adjust for this possibly being present. - onupdate_ins = re.match( - r"(.*) (on update.*?)(?:\(\))?$", - rendered_inspector_default.lower(), - ) - onupdate_met = re.match( - r"(.*) (on update.*?)(?:\(\))?$", - rendered_metadata_default.lower(), - ) - - if onupdate_ins: - if not onupdate_met: - return True - elif onupdate_ins.group(2) != onupdate_met.group(2): - return True - - rendered_inspector_default = onupdate_ins.group(1) - rendered_metadata_default = onupdate_met.group(1) - - return re.sub( - r"(.*?)(?:\(\))?$", r"\1", rendered_inspector_default.lower() - ) != re.sub( - r"(.*?)(?:\(\))?$", r"\1", rendered_metadata_default.lower() - ) - else: - return rendered_inspector_default != rendered_metadata_default - - def correct_for_autogen_constraints( - self, - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - - # TODO: if SQLA 1.0, make use of "duplicates_index" - # metadata - removed = set() - for idx in list(conn_indexes): - if idx.unique: - continue - # MySQL puts implicit indexes on FK columns, even if - # composite and even if MyISAM, so can't check this too easily. - # the name of the index may be the column name or it may - # be the name of the FK constraint. - for col in idx.columns: - if idx.name == col.name: - conn_indexes.remove(idx) - removed.add(idx.name) - break - for fk in col.foreign_keys: - if fk.name == idx.name: - conn_indexes.remove(idx) - removed.add(idx.name) - break - if idx.name in removed: - break - - # then remove indexes from the "metadata_indexes" - # that we've removed from reflected, otherwise they come out - # as adds (see #202) - for idx in list(metadata_indexes): - if idx.name in removed: - metadata_indexes.remove(idx) - - def correct_for_autogen_foreignkeys(self, conn_fks, metadata_fks): - conn_fk_by_sig = dict( - (compare._fk_constraint_sig(fk).sig, fk) for fk in conn_fks - ) - metadata_fk_by_sig = dict( - (compare._fk_constraint_sig(fk).sig, fk) for fk in metadata_fks - ) - - for sig in set(conn_fk_by_sig).intersection(metadata_fk_by_sig): - mdfk = metadata_fk_by_sig[sig] - cnfk = conn_fk_by_sig[sig] - # MySQL considers RESTRICT to be the default and doesn't - # report on it. if the model has explicit RESTRICT and - # the conn FK has None, set it to RESTRICT - if ( - mdfk.ondelete is not None - and mdfk.ondelete.lower() == "restrict" - and cnfk.ondelete is None - ): - cnfk.ondelete = "RESTRICT" - if ( - mdfk.onupdate is not None - and mdfk.onupdate.lower() == "restrict" - and cnfk.onupdate is None - ): - cnfk.onupdate = "RESTRICT" - - -class MySQLAlterDefault(AlterColumn): - def __init__(self, name, column_name, default, schema=None): - super(AlterColumn, self).__init__(name, schema=schema) - self.column_name = column_name - self.default = default - - -class MySQLChangeColumn(AlterColumn): - def __init__( - self, - name, - column_name, - schema=None, - newname=None, - type_=None, - nullable=None, - default=False, - autoincrement=None, - comment=False, - ): - super(AlterColumn, self).__init__(name, schema=schema) - self.column_name = column_name - self.nullable = nullable - self.newname = newname - self.default = default - self.autoincrement = autoincrement - self.comment = comment - if type_ is None: - raise util.CommandError( - "All MySQL CHANGE/MODIFY COLUMN operations " - "require the existing type." - ) - - self.type_ = sqltypes.to_instance(type_) - - -class MySQLModifyColumn(MySQLChangeColumn): - pass - - -@compiles(ColumnNullable, "mysql") -@compiles(ColumnName, "mysql") -@compiles(ColumnDefault, "mysql") -@compiles(ColumnType, "mysql") -def _mysql_doesnt_support_individual(element, compiler, **kw): - raise NotImplementedError( - "Individual alter column constructs not supported by MySQL" - ) - - -@compiles(MySQLAlterDefault, "mysql") -def _mysql_alter_default(element, compiler, **kw): - return "%s ALTER COLUMN %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - "SET DEFAULT %s" % format_server_default(compiler, element.default) - if element.default is not None - else "DROP DEFAULT", - ) - - -@compiles(MySQLModifyColumn, "mysql") -def _mysql_modify_column(element, compiler, **kw): - return "%s MODIFY %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - _mysql_colspec( - compiler, - nullable=element.nullable, - server_default=element.default, - type_=element.type_, - autoincrement=element.autoincrement, - comment=element.comment, - ), - ) - - -@compiles(MySQLChangeColumn, "mysql") -def _mysql_change_column(element, compiler, **kw): - return "%s CHANGE %s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - _mysql_colspec( - compiler, - nullable=element.nullable, - server_default=element.default, - type_=element.type_, - autoincrement=element.autoincrement, - comment=element.comment, - ), - ) - - -def _render_value(compiler, expr): - if isinstance(expr, string_types): - return "'%s'" % expr - else: - return compiler.sql_compiler.process(expr) - - -def _mysql_colspec( - compiler, nullable, server_default, type_, autoincrement, comment -): - spec = "%s %s" % ( - compiler.dialect.type_compiler.process(type_), - "NULL" if nullable else "NOT NULL", - ) - if autoincrement: - spec += " AUTO_INCREMENT" - if server_default is not False and server_default is not None: - spec += " DEFAULT %s" % _render_value(compiler, server_default) - if comment: - spec += " COMMENT %s" % compiler.sql_compiler.render_literal_value( - comment, sqltypes.String() - ) - - return spec - - -@compiles(schema.DropConstraint, "mysql") -def _mysql_drop_constraint(element, compiler, **kw): - """Redefine SQLAlchemy's drop constraint to - raise errors for invalid constraint type.""" - - constraint = element.element - if isinstance( - constraint, - ( - schema.ForeignKeyConstraint, - schema.PrimaryKeyConstraint, - schema.UniqueConstraint, - ), - ): - return compiler.visit_drop_constraint(element, **kw) - elif isinstance(constraint, schema.CheckConstraint): - # note that SQLAlchemy as of 1.2 does not yet support - # DROP CONSTRAINT for MySQL/MariaDB, so we implement fully - # here. - if _is_mariadb(compiler.dialect): - return "ALTER TABLE %s DROP CONSTRAINT %s" % ( - compiler.preparer.format_table(constraint.table), - compiler.preparer.format_constraint(constraint), - ) - else: - return "ALTER TABLE %s DROP CHECK %s" % ( - compiler.preparer.format_table(constraint.table), - compiler.preparer.format_constraint(constraint), - ) - else: - raise NotImplementedError( - "No generic 'DROP CONSTRAINT' in MySQL - " - "please specify constraint type" - ) diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/oracle.py b/venv/lib/python3.7/site-packages/alembic/ddl/oracle.py deleted file mode 100644 index aaf759a..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/oracle.py +++ /dev/null @@ -1,123 +0,0 @@ -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.sql import sqltypes - -from .base import AddColumn -from .base import alter_table -from .base import ColumnComment -from .base import ColumnDefault -from .base import ColumnName -from .base import ColumnNullable -from .base import ColumnType -from .base import format_column_name -from .base import format_server_default -from .base import format_table_name -from .base import format_type -from .base import RenameTable -from .impl import DefaultImpl - - -class OracleImpl(DefaultImpl): - __dialect__ = "oracle" - transactional_ddl = False - batch_separator = "/" - command_terminator = "" - type_synonyms = DefaultImpl.type_synonyms + ( - {"VARCHAR", "VARCHAR2"}, - {"BIGINT", "INTEGER", "SMALLINT", "DECIMAL", "NUMERIC", "NUMBER"}, - ) - - def __init__(self, *arg, **kw): - super(OracleImpl, self).__init__(*arg, **kw) - self.batch_separator = self.context_opts.get( - "oracle_batch_separator", self.batch_separator - ) - - def _exec(self, construct, *args, **kw): - result = super(OracleImpl, self)._exec(construct, *args, **kw) - if self.as_sql and self.batch_separator: - self.static_output(self.batch_separator) - return result - - def emit_begin(self): - self._exec("SET TRANSACTION READ WRITE") - - def emit_commit(self): - self._exec("COMMIT") - - -@compiles(AddColumn, "oracle") -def visit_add_column(element, compiler, **kw): - return "%s %s" % ( - alter_table(compiler, element.table_name, element.schema), - add_column(compiler, element.column, **kw), - ) - - -@compiles(ColumnNullable, "oracle") -def visit_column_nullable(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "NULL" if element.nullable else "NOT NULL", - ) - - -@compiles(ColumnType, "oracle") -def visit_column_type(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "%s" % format_type(compiler, element.type_), - ) - - -@compiles(ColumnName, "oracle") -def visit_column_name(element, compiler, **kw): - return "%s RENAME COLUMN %s TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_column_name(compiler, element.column_name), - format_column_name(compiler, element.newname), - ) - - -@compiles(ColumnDefault, "oracle") -def visit_column_default(element, compiler, **kw): - return "%s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "DEFAULT %s" % format_server_default(compiler, element.default) - if element.default is not None - else "DEFAULT NULL", - ) - - -@compiles(ColumnComment, "oracle") -def visit_column_comment(element, compiler, **kw): - ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" - - comment = compiler.sql_compiler.render_literal_value( - (element.comment if element.comment is not None else ""), - sqltypes.String(), - ) - - return ddl.format( - table_name=element.table_name, - column_name=element.column_name, - comment=comment, - ) - - -@compiles(RenameTable, "oracle") -def visit_rename_table(element, compiler, **kw): - return "%s RENAME TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, None), - ) - - -def alter_column(compiler, name): - return "MODIFY %s" % format_column_name(compiler, name) - - -def add_column(compiler, column, **kw): - return "ADD %s" % compiler.get_column_specification(column, **kw) diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/postgresql.py b/venv/lib/python3.7/site-packages/alembic/ddl/postgresql.py deleted file mode 100644 index 4ddc0ed..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/postgresql.py +++ /dev/null @@ -1,528 +0,0 @@ -import logging -import re - -from sqlalchemy import Column -from sqlalchemy import Numeric -from sqlalchemy import text -from sqlalchemy import types as sqltypes -from sqlalchemy.dialects.postgresql import BIGINT -from sqlalchemy.dialects.postgresql import ExcludeConstraint -from sqlalchemy.dialects.postgresql import INTEGER -from sqlalchemy.sql.expression import ColumnClause -from sqlalchemy.sql.expression import UnaryExpression -from sqlalchemy.types import NULLTYPE - -from .base import alter_column -from .base import alter_table -from .base import AlterColumn -from .base import ColumnComment -from .base import compiles -from .base import format_column_name -from .base import format_table_name -from .base import format_type -from .base import RenameTable -from .impl import DefaultImpl -from .. import util -from ..autogenerate import render -from ..operations import ops -from ..operations import schemaobj -from ..operations.base import BatchOperations -from ..operations.base import Operations -from ..util import compat -from ..util import sqla_compat - - -log = logging.getLogger(__name__) - - -class PostgresqlImpl(DefaultImpl): - __dialect__ = "postgresql" - transactional_ddl = True - type_synonyms = DefaultImpl.type_synonyms + ( - {"FLOAT", "DOUBLE PRECISION"}, - ) - - def prep_table_for_batch(self, table): - for constraint in table.constraints: - if constraint.name is not None: - self.drop_constraint(constraint) - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - # don't do defaults for SERIAL columns - if ( - metadata_column.primary_key - and metadata_column is metadata_column.table._autoincrement_column - ): - return False - - conn_col_default = rendered_inspector_default - - defaults_equal = conn_col_default == rendered_metadata_default - if defaults_equal: - return False - - if None in (conn_col_default, rendered_metadata_default): - return not defaults_equal - - if compat.py2k: - # look for a python 2 "u''" string and filter - m = re.match(r"^u'(.*)'$", rendered_metadata_default) - if m: - rendered_metadata_default = "'%s'" % m.group(1) - - # check for unquoted string and quote for PG String types - if ( - not isinstance(inspector_column.type, Numeric) - and metadata_column.server_default is not None - and isinstance( - metadata_column.server_default.arg, compat.string_types - ) - and not re.match(r"^'.*'$", rendered_metadata_default) - ): - rendered_metadata_default = "'%s'" % rendered_metadata_default - - return not self.connection.scalar( - text( - "SELECT %s = %s" - % (conn_col_default, rendered_metadata_default) - ) - ) - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - schema=None, - autoincrement=None, - existing_type=None, - existing_server_default=None, - existing_nullable=None, - existing_autoincrement=None, - **kw - ): - - using = kw.pop("postgresql_using", None) - - if using is not None and type_ is None: - raise util.CommandError( - "postgresql_using must be used with the type_ parameter" - ) - - if type_ is not None: - self._exec( - PostgresqlColumnType( - table_name, - column_name, - type_, - schema=schema, - using=using, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - ) - ) - - super(PostgresqlImpl, self).alter_column( - table_name, - column_name, - nullable=nullable, - server_default=server_default, - name=name, - schema=schema, - autoincrement=autoincrement, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_autoincrement=existing_autoincrement, - **kw - ) - - def autogen_column_reflect(self, inspector, table, column_info): - if column_info.get("default") and isinstance( - column_info["type"], (INTEGER, BIGINT) - ): - seq_match = re.match( - r"nextval\('(.+?)'::regclass\)", column_info["default"] - ) - if seq_match: - info = sqla_compat._exec_on_inspector( - inspector, - text( - "select c.relname, a.attname " - "from pg_class as c join " - "pg_depend d on d.objid=c.oid and " - "d.classid='pg_class'::regclass and " - "d.refclassid='pg_class'::regclass " - "join pg_class t on t.oid=d.refobjid " - "join pg_attribute a on a.attrelid=t.oid and " - "a.attnum=d.refobjsubid " - "where c.relkind='S' and c.relname=:seqname" - ), - seqname=seq_match.group(1), - ).first() - if info: - seqname, colname = info - if colname == column_info["name"]: - log.info( - "Detected sequence named '%s' as " - "owned by integer column '%s(%s)', " - "assuming SERIAL and omitting", - seqname, - table.name, - colname, - ) - # sequence, and the owner is this column, - # its a SERIAL - whack it! - del column_info["default"] - - def correct_for_autogen_constraints( - self, - conn_unique_constraints, - conn_indexes, - metadata_unique_constraints, - metadata_indexes, - ): - - conn_indexes_by_name = dict((c.name, c) for c in conn_indexes) - - doubled_constraints = set( - index - for index in conn_indexes - if index.info.get("duplicates_constraint") - ) - - for ix in doubled_constraints: - conn_indexes.remove(ix) - - for idx in list(metadata_indexes): - if idx.name in conn_indexes_by_name: - continue - exprs = idx.expressions - for expr in exprs: - while isinstance(expr, UnaryExpression): - expr = expr.element - if not isinstance(expr, Column): - util.warn( - "autogenerate skipping functional index %s; " - "not supported by SQLAlchemy reflection" % idx.name - ) - metadata_indexes.discard(idx) - - def render_type(self, type_, autogen_context): - mod = type(type_).__module__ - if not mod.startswith("sqlalchemy.dialects.postgresql"): - return False - - if hasattr(self, "_render_%s_type" % type_.__visit_name__): - meth = getattr(self, "_render_%s_type" % type_.__visit_name__) - return meth(type_, autogen_context) - - return False - - def _render_HSTORE_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "text_type", r"(.+?\(.*text_type=)" - ) - - def _render_ARRAY_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "item_type", r"(.+?\()" - ) - - def _render_JSON_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" - ) - - def _render_JSONB_type(self, type_, autogen_context): - return render._render_type_w_subtype( - type_, autogen_context, "astext_type", r"(.+?\(.*astext_type=)" - ) - - -class PostgresqlColumnType(AlterColumn): - def __init__(self, name, column_name, type_, **kw): - using = kw.pop("using", None) - super(PostgresqlColumnType, self).__init__(name, column_name, **kw) - self.type_ = sqltypes.to_instance(type_) - self.using = using - - -@compiles(RenameTable, "postgresql") -def visit_rename_table(element, compiler, **kw): - return "%s RENAME TO %s" % ( - alter_table(compiler, element.table_name, element.schema), - format_table_name(compiler, element.new_table_name, None), - ) - - -@compiles(PostgresqlColumnType, "postgresql") -def visit_column_type(element, compiler, **kw): - return "%s %s %s %s" % ( - alter_table(compiler, element.table_name, element.schema), - alter_column(compiler, element.column_name), - "TYPE %s" % format_type(compiler, element.type_), - "USING %s" % element.using if element.using else "", - ) - - -@compiles(ColumnComment, "postgresql") -def visit_column_comment(element, compiler, **kw): - ddl = "COMMENT ON COLUMN {table_name}.{column_name} IS {comment}" - comment = ( - compiler.sql_compiler.render_literal_value( - element.comment, sqltypes.String() - ) - if element.comment is not None - else "NULL" - ) - - return ddl.format( - table_name=format_table_name( - compiler, element.table_name, element.schema - ), - column_name=format_column_name(compiler, element.column_name), - comment=comment, - ) - - -@Operations.register_operation("create_exclude_constraint") -@BatchOperations.register_operation( - "create_exclude_constraint", "batch_create_exclude_constraint" -) -@ops.AddConstraintOp.register_add_constraint("exclude_constraint") -class CreateExcludeConstraintOp(ops.AddConstraintOp): - """Represent a create exclude constraint operation.""" - - constraint_type = "exclude" - - def __init__( - self, - constraint_name, - table_name, - elements, - where=None, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.elements = elements - self.where = where - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - return cls( - constraint.name, - constraint_table.name, - [(expr, op) for expr, name, op in constraint._render_exprs], - where=constraint.where, - schema=constraint_table.schema, - _orig_constraint=constraint, - deferrable=constraint.deferrable, - initially=constraint.initially, - using=constraint.using, - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - schema_obj = schemaobj.SchemaObjects(migration_context) - t = schema_obj.table(self.table_name, schema=self.schema) - excl = ExcludeConstraint( - *self.elements, - name=self.constraint_name, - where=self.where, - **self.kw - ) - for expr, name, oper in excl._render_exprs: - t.append_column(Column(name, NULLTYPE)) - t.append_constraint(excl) - return excl - - @classmethod - def create_exclude_constraint( - cls, operations, constraint_name, table_name, *elements, **kw - ): - """Issue an alter to create an EXCLUDE constraint using the - current migration context. - - .. note:: This method is Postgresql specific, and additionally - requires at least SQLAlchemy 1.0. - - e.g.:: - - from alembic import op - - op.create_exclude_constraint( - "user_excl", - "user", - - ("period", '&&'), - ("group", '='), - where=("group != 'some group'") - - ) - - Note that the expressions work the same way as that of - the ``ExcludeConstraint`` object itself; if plain strings are - passed, quoting rules must be applied manually. - - :param name: Name of the constraint. - :param table_name: String name of the source table. - :param elements: exclude conditions. - :param where: SQL expression or SQL string with optional WHERE - clause. - :param deferrable: optional bool. If set, emit DEFERRABLE or - NOT DEFERRABLE when issuing DDL for this constraint. - :param initially: optional string. If set, emit INITIALLY - when issuing DDL for this constraint. - :param schema: Optional schema name to operate within. - - .. versionadded:: 0.9.0 - - """ - op = cls(constraint_name, table_name, elements, **kw) - return operations.invoke(op) - - @classmethod - def batch_create_exclude_constraint( - cls, operations, constraint_name, *elements, **kw - ): - """Issue a "create exclude constraint" instruction using the - current batch migration context. - - .. note:: This method is Postgresql specific, and additionally - requires at least SQLAlchemy 1.0. - - .. versionadded:: 0.9.0 - - .. seealso:: - - :meth:`.Operations.create_exclude_constraint` - - """ - kw["schema"] = operations.impl.schema - op = cls(constraint_name, operations.impl.table_name, elements, **kw) - return operations.invoke(op) - - -@render.renderers.dispatch_for(CreateExcludeConstraintOp) -def _add_exclude_constraint(autogen_context, op): - return _exclude_constraint(op.to_constraint(), autogen_context, alter=True) - - -@render._constraint_renderers.dispatch_for(ExcludeConstraint) -def _render_inline_exclude_constraint(constraint, autogen_context): - rendered = render._user_defined_render( - "exclude", constraint, autogen_context - ) - if rendered is not False: - return rendered - - return _exclude_constraint(constraint, autogen_context, False) - - -def _postgresql_autogenerate_prefix(autogen_context): - - imports = autogen_context.imports - if imports is not None: - imports.add("from sqlalchemy.dialects import postgresql") - return "postgresql." - - -def _exclude_constraint(constraint, autogen_context, alter): - opts = [] - - has_batch = autogen_context._has_batch - - if constraint.deferrable: - opts.append(("deferrable", str(constraint.deferrable))) - if constraint.initially: - opts.append(("initially", str(constraint.initially))) - if constraint.using: - opts.append(("using", str(constraint.using))) - if not has_batch and alter and constraint.table.schema: - opts.append(("schema", render._ident(constraint.table.schema))) - if not alter and constraint.name: - opts.append( - ("name", render._render_gen_name(autogen_context, constraint.name)) - ) - - if alter: - args = [ - repr(render._render_gen_name(autogen_context, constraint.name)) - ] - if not has_batch: - args += [repr(render._ident(constraint.table.name))] - args.extend( - [ - "(%s, %r)" - % ( - _render_potential_column(sqltext, autogen_context), - opstring, - ) - for sqltext, name, opstring in constraint._render_exprs - ] - ) - if constraint.where is not None: - args.append( - "where=%s" - % render._render_potential_expr( - constraint.where, autogen_context - ) - ) - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)screate_exclude_constraint(%(args)s)" % { - "prefix": render._alembic_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - else: - args = [ - "(%s, %r)" - % (_render_potential_column(sqltext, autogen_context), opstring) - for sqltext, name, opstring in constraint._render_exprs - ] - if constraint.where is not None: - args.append( - "where=%s" - % render._render_potential_expr( - constraint.where, autogen_context - ) - ) - args.extend(["%s=%r" % (k, v) for k, v in opts]) - return "%(prefix)sExcludeConstraint(%(args)s)" % { - "prefix": _postgresql_autogenerate_prefix(autogen_context), - "args": ", ".join(args), - } - - -def _render_potential_column(value, autogen_context): - if isinstance(value, ColumnClause): - template = "%(prefix)scolumn(%(name)r)" - - return template % { - "prefix": render._sqlalchemy_autogenerate_prefix(autogen_context), - "name": value.name, - } - - else: - return render._render_potential_expr( - value, autogen_context, wrap_in_text=False - ) diff --git a/venv/lib/python3.7/site-packages/alembic/ddl/sqlite.py b/venv/lib/python3.7/site-packages/alembic/ddl/sqlite.py deleted file mode 100644 index 84e9dd8..0000000 --- a/venv/lib/python3.7/site-packages/alembic/ddl/sqlite.py +++ /dev/null @@ -1,136 +0,0 @@ -import re - -from .impl import DefaultImpl -from .. import util - - -class SQLiteImpl(DefaultImpl): - __dialect__ = "sqlite" - - transactional_ddl = False - """SQLite supports transactional DDL, but pysqlite does not: - see: http://bugs.python.org/issue10740 - """ - - def requires_recreate_in_batch(self, batch_op): - """Return True if the given :class:`.BatchOperationsImpl` - would need the table to be recreated and copied in order to - proceed. - - Normally, only returns True on SQLite when operations other - than add_column are present. - - """ - for op in batch_op.batch: - if op[0] not in ("add_column", "create_index", "drop_index"): - return True - else: - return False - - def add_constraint(self, const): - # attempt to distinguish between an - # auto-gen constraint and an explicit one - if const._create_rule is None: - raise NotImplementedError( - "No support for ALTER of constraints in SQLite dialect" - "Please refer to the batch mode feature which allows for " - "SQLite migrations using a copy-and-move strategy." - ) - elif const._create_rule(self): - util.warn( - "Skipping unsupported ALTER for " - "creation of implicit constraint" - "Please refer to the batch mode feature which allows for " - "SQLite migrations using a copy-and-move strategy." - ) - - def drop_constraint(self, const): - if const._create_rule is None: - raise NotImplementedError( - "No support for ALTER of constraints in SQLite dialect" - "Please refer to the batch mode feature which allows for " - "SQLite migrations using a copy-and-move strategy." - ) - - def compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_inspector_default, - ): - - if rendered_metadata_default is not None: - rendered_metadata_default = re.sub( - r"^\((.+)\)$", r"\1", rendered_metadata_default - ) - - rendered_metadata_default = re.sub( - r"^\"?'(.+)'\"?$", r"\1", rendered_metadata_default - ) - - if rendered_inspector_default is not None: - rendered_inspector_default = re.sub( - r"^\"?'(.+)'\"?$", r"\1", rendered_inspector_default - ) - - return rendered_inspector_default != rendered_metadata_default - - def _guess_if_default_is_unparenthesized_sql_expr(self, expr): - """Determine if a server default is a SQL expression or a constant. - - There are too many assertions that expect server defaults to round-trip - identically without parenthesis added so we will add parens only in - very specific cases. - - """ - if not expr: - return False - elif re.match(r"^[0-9\.]$", expr): - return False - elif re.match(r"^'.+'$", expr): - return False - elif re.match(r"^\(.+\)$", expr): - return False - else: - return True - - def autogen_column_reflect(self, inspector, table, column_info): - # SQLite expression defaults require parenthesis when sent - # as DDL - if self._guess_if_default_is_unparenthesized_sql_expr( - column_info.get("default", None) - ): - column_info["default"] = "(%s)" % (column_info["default"],) - - def render_ddl_sql_expr(self, expr, is_server_default=False, **kw): - # SQLite expression defaults require parenthesis when sent - # as DDL - str_expr = super(SQLiteImpl, self).render_ddl_sql_expr( - expr, is_server_default=is_server_default, **kw - ) - - if ( - is_server_default - and self._guess_if_default_is_unparenthesized_sql_expr(str_expr) - ): - str_expr = "(%s)" % (str_expr,) - return str_expr - - -# @compiles(AddColumn, 'sqlite') -# def visit_add_column(element, compiler, **kw): -# return "%s %s" % ( -# alter_table(compiler, element.table_name, element.schema), -# add_column(compiler, element.column, **kw) -# ) - - -# def add_column(compiler, column, **kw): -# text = "ADD COLUMN %s" % compiler.get_column_specification(column, **kw) -# need to modify SQLAlchemy so that the CHECK associated with a Boolean -# or Enum gets placed as part of the column constraints, not the Table -# see ticket 98 -# for const in column.constraints: -# text += compiler.process(AddConstraint(const)) -# return text diff --git a/venv/lib/python3.7/site-packages/alembic/op.py b/venv/lib/python3.7/site-packages/alembic/op.py deleted file mode 100644 index f3f5fac..0000000 --- a/venv/lib/python3.7/site-packages/alembic/op.py +++ /dev/null @@ -1,5 +0,0 @@ -from .operations.base import Operations - -# create proxy functions for -# each method on the Operations class. -Operations.create_module_class_proxy(globals(), locals()) diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__init__.py b/venv/lib/python3.7/site-packages/alembic/operations/__init__.py deleted file mode 100644 index dc2d3a4..0000000 --- a/venv/lib/python3.7/site-packages/alembic/operations/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from . import toimpl # noqa -from .base import BatchOperations -from .base import Operations -from .ops import MigrateOperation - - -__all__ = ["Operations", "BatchOperations", "MigrateOperation"] diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 11685c5..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/base.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/base.cpython-37.pyc deleted file mode 100644 index e96a890..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/base.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/batch.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/batch.cpython-37.pyc deleted file mode 100644 index 7ea2f47..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/batch.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/ops.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/ops.cpython-37.pyc deleted file mode 100644 index c10f809..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/ops.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/schemaobj.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/schemaobj.cpython-37.pyc deleted file mode 100644 index 6a3d3ea..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/schemaobj.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/toimpl.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/toimpl.cpython-37.pyc deleted file mode 100644 index f3c6676..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/operations/__pycache__/toimpl.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/operations/base.py b/venv/lib/python3.7/site-packages/alembic/operations/base.py deleted file mode 100644 index 602b7c7..0000000 --- a/venv/lib/python3.7/site-packages/alembic/operations/base.py +++ /dev/null @@ -1,502 +0,0 @@ -from contextlib import contextmanager -import textwrap - -from . import batch -from . import schemaobj -from .. import util -from ..util import sqla_compat -from ..util.compat import exec_ -from ..util.compat import inspect_formatargspec -from ..util.compat import inspect_getargspec - -__all__ = ("Operations", "BatchOperations") - -try: - from sqlalchemy.sql.naming import conv -except: - conv = None - - -class Operations(util.ModuleClsProxy): - - """Define high level migration operations. - - Each operation corresponds to some schema migration operation, - executed against a particular :class:`.MigrationContext` - which in turn represents connectivity to a database, - or a file output stream. - - While :class:`.Operations` is normally configured as - part of the :meth:`.EnvironmentContext.run_migrations` - method called from an ``env.py`` script, a standalone - :class:`.Operations` instance can be - made for use cases external to regular Alembic - migrations by passing in a :class:`.MigrationContext`:: - - from alembic.migration import MigrationContext - from alembic.operations import Operations - - conn = myengine.connect() - ctx = MigrationContext.configure(conn) - op = Operations(ctx) - - op.alter_column("t", "c", nullable=True) - - Note that as of 0.8, most of the methods on this class are produced - dynamically using the :meth:`.Operations.register_operation` - method. - - """ - - _to_impl = util.Dispatcher() - - def __init__(self, migration_context, impl=None): - """Construct a new :class:`.Operations` - - :param migration_context: a :class:`.MigrationContext` - instance. - - """ - self.migration_context = migration_context - if impl is None: - self.impl = migration_context.impl - else: - self.impl = impl - - self.schema_obj = schemaobj.SchemaObjects(migration_context) - - @classmethod - def register_operation(cls, name, sourcename=None): - """Register a new operation for this class. - - This method is normally used to add new operations - to the :class:`.Operations` class, and possibly the - :class:`.BatchOperations` class as well. All Alembic migration - operations are implemented via this system, however the system - is also available as a public API to facilitate adding custom - operations. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :ref:`operation_plugins` - - - """ - - def register(op_cls): - if sourcename is None: - fn = getattr(op_cls, name) - source_name = fn.__name__ - else: - fn = getattr(op_cls, sourcename) - source_name = fn.__name__ - - spec = inspect_getargspec(fn) - - name_args = spec[0] - assert name_args[0:2] == ["cls", "operations"] - - name_args[0:2] = ["self"] - - args = inspect_formatargspec(*spec) - num_defaults = len(spec[3]) if spec[3] else 0 - if num_defaults: - defaulted_vals = name_args[0 - num_defaults :] - else: - defaulted_vals = () - - apply_kw = inspect_formatargspec( - name_args, - spec[1], - spec[2], - defaulted_vals, - formatvalue=lambda x: "=" + x, - ) - - func_text = textwrap.dedent( - """\ - def %(name)s%(args)s: - %(doc)r - return op_cls.%(source_name)s%(apply_kw)s - """ - % { - "name": name, - "source_name": source_name, - "args": args, - "apply_kw": apply_kw, - "doc": fn.__doc__, - "meth": fn.__name__, - } - ) - globals_ = {"op_cls": op_cls} - lcl = {} - exec_(func_text, globals_, lcl) - setattr(cls, name, lcl[name]) - fn.__func__.__doc__ = ( - "This method is proxied on " - "the :class:`.%s` class, via the :meth:`.%s.%s` method." - % (cls.__name__, cls.__name__, name) - ) - if hasattr(fn, "_legacy_translations"): - lcl[name]._legacy_translations = fn._legacy_translations - return op_cls - - return register - - @classmethod - def implementation_for(cls, op_cls): - """Register an implementation for a given :class:`.MigrateOperation`. - - This is part of the operation extensibility API. - - .. seealso:: - - :ref:`operation_plugins` - example of use - - """ - - def decorate(fn): - cls._to_impl.dispatch_for(op_cls)(fn) - return fn - - return decorate - - @classmethod - @contextmanager - def context(cls, migration_context): - op = Operations(migration_context) - op._install_proxy() - yield op - op._remove_proxy() - - @contextmanager - def batch_alter_table( - self, - table_name, - schema=None, - recreate="auto", - partial_reordering=None, - copy_from=None, - table_args=(), - table_kwargs=util.immutabledict(), - reflect_args=(), - reflect_kwargs=util.immutabledict(), - naming_convention=None, - ): - """Invoke a series of per-table migrations in batch. - - Batch mode allows a series of operations specific to a table - to be syntactically grouped together, and allows for alternate - modes of table migration, in particular the "recreate" style of - migration required by SQLite. - - "recreate" style is as follows: - - 1. A new table is created with the new specification, based on the - migration directives within the batch, using a temporary name. - - 2. the data copied from the existing table to the new table. - - 3. the existing table is dropped. - - 4. the new table is renamed to the existing table name. - - The directive by default will only use "recreate" style on the - SQLite backend, and only if directives are present which require - this form, e.g. anything other than ``add_column()``. The batch - operation on other backends will proceed using standard ALTER TABLE - operations. - - The method is used as a context manager, which returns an instance - of :class:`.BatchOperations`; this object is the same as - :class:`.Operations` except that table names and schema names - are omitted. E.g.:: - - with op.batch_alter_table("some_table") as batch_op: - batch_op.add_column(Column('foo', Integer)) - batch_op.drop_column('bar') - - The operations within the context manager are invoked at once - when the context is ended. When run against SQLite, if the - migrations include operations not supported by SQLite's ALTER TABLE, - the entire table will be copied to a new one with the new - specification, moving all data across as well. - - The copy operation by default uses reflection to retrieve the current - structure of the table, and therefore :meth:`.batch_alter_table` - in this mode requires that the migration is run in "online" mode. - The ``copy_from`` parameter may be passed which refers to an existing - :class:`.Table` object, which will bypass this reflection step. - - .. note:: The table copy operation will currently not copy - CHECK constraints, and may not copy UNIQUE constraints that are - unnamed, as is possible on SQLite. See the section - :ref:`sqlite_batch_constraints` for workarounds. - - :param table_name: name of table - :param schema: optional schema name. - :param recreate: under what circumstances the table should be - recreated. At its default of ``"auto"``, the SQLite dialect will - recreate the table if any operations other than ``add_column()``, - ``create_index()``, or ``drop_index()`` are - present. Other options include ``"always"`` and ``"never"``. - :param copy_from: optional :class:`~sqlalchemy.schema.Table` object - that will act as the structure of the table being copied. If omitted, - table reflection is used to retrieve the structure of the table. - - .. versionadded:: 0.7.6 Fully implemented the - :paramref:`~.Operations.batch_alter_table.copy_from` - parameter. - - .. seealso:: - - :ref:`batch_offline_mode` - - :paramref:`~.Operations.batch_alter_table.reflect_args` - - :paramref:`~.Operations.batch_alter_table.reflect_kwargs` - - :param reflect_args: a sequence of additional positional arguments that - will be applied to the table structure being reflected / copied; - this may be used to pass column and constraint overrides to the - table that will be reflected, in lieu of passing the whole - :class:`~sqlalchemy.schema.Table` using - :paramref:`~.Operations.batch_alter_table.copy_from`. - - .. versionadded:: 0.7.1 - - :param reflect_kwargs: a dictionary of additional keyword arguments - that will be applied to the table structure being copied; this may be - used to pass additional table and reflection options to the table that - will be reflected, in lieu of passing the whole - :class:`~sqlalchemy.schema.Table` using - :paramref:`~.Operations.batch_alter_table.copy_from`. - - .. versionadded:: 0.7.1 - - :param table_args: a sequence of additional positional arguments that - will be applied to the new :class:`~sqlalchemy.schema.Table` when - created, in addition to those copied from the source table. - This may be used to provide additional constraints such as CHECK - constraints that may not be reflected. - :param table_kwargs: a dictionary of additional keyword arguments - that will be applied to the new :class:`~sqlalchemy.schema.Table` - when created, in addition to those copied from the source table. - This may be used to provide for additional table options that may - not be reflected. - - .. versionadded:: 0.7.0 - - :param naming_convention: a naming convention dictionary of the form - described at :ref:`autogen_naming_conventions` which will be applied - to the :class:`~sqlalchemy.schema.MetaData` during the reflection - process. This is typically required if one wants to drop SQLite - constraints, as these constraints will not have names when - reflected on this backend. Requires SQLAlchemy **0.9.4** or greater. - - .. seealso:: - - :ref:`dropping_sqlite_foreign_keys` - - .. versionadded:: 0.7.1 - - :param partial_reordering: a list of tuples, each suggesting a desired - ordering of two or more columns in the newly created table. Requires - that :paramref:`.batch_alter_table.recreate` is set to ``"always"``. - Examples, given a table with columns "a", "b", "c", and "d": - - Specify the order of all columns:: - - with op.batch_alter_table( - "some_table", recreate="always", - partial_reordering=[("c", "d", "a", "b")] - ) as batch_op: - pass - - Ensure "d" appears before "c", and "b", appears before "a":: - - with op.batch_alter_table( - "some_table", recreate="always", - partial_reordering=[("d", "c"), ("b", "a")] - ) as batch_op: - pass - - The ordering of columns not included in the partial_reordering - set is undefined. Therefore it is best to specify the complete - ordering of all columns for best results. - - .. versionadded:: 1.4.0 - - .. note:: batch mode requires SQLAlchemy 0.8 or above. - - .. seealso:: - - :ref:`batch_migrations` - - """ - impl = batch.BatchOperationsImpl( - self, - table_name, - schema, - recreate, - copy_from, - table_args, - table_kwargs, - reflect_args, - reflect_kwargs, - naming_convention, - partial_reordering, - ) - batch_op = BatchOperations(self.migration_context, impl=impl) - yield batch_op - impl.flush() - - def get_context(self): - """Return the :class:`.MigrationContext` object that's - currently in use. - - """ - - return self.migration_context - - def invoke(self, operation): - """Given a :class:`.MigrateOperation`, invoke it in terms of - this :class:`.Operations` instance. - - .. versionadded:: 0.8.0 - - """ - fn = self._to_impl.dispatch( - operation, self.migration_context.impl.__dialect__ - ) - return fn(self, operation) - - def f(self, name): - """Indicate a string name that has already had a naming convention - applied to it. - - This feature combines with the SQLAlchemy ``naming_convention`` feature - to disambiguate constraint names that have already had naming - conventions applied to them, versus those that have not. This is - necessary in the case that the ``"%(constraint_name)s"`` token - is used within a naming convention, so that it can be identified - that this particular name should remain fixed. - - If the :meth:`.Operations.f` is used on a constraint, the naming - convention will not take effect:: - - op.add_column('t', 'x', Boolean(name=op.f('ck_bool_t_x'))) - - Above, the CHECK constraint generated will have the name - ``ck_bool_t_x`` regardless of whether or not a naming convention is - in use. - - Alternatively, if a naming convention is in use, and 'f' is not used, - names will be converted along conventions. If the ``target_metadata`` - contains the naming convention - ``{"ck": "ck_bool_%(table_name)s_%(constraint_name)s"}``, then the - output of the following: - - op.add_column('t', 'x', Boolean(name='x')) - - will be:: - - CONSTRAINT ck_bool_t_x CHECK (x in (1, 0))) - - The function is rendered in the output of autogenerate when - a particular constraint name is already converted, for SQLAlchemy - version **0.9.4 and greater only**. Even though ``naming_convention`` - was introduced in 0.9.2, the string disambiguation service is new - as of 0.9.4. - - .. versionadded:: 0.6.4 - - """ - if conv: - return conv(name) - else: - raise NotImplementedError( - "op.f() feature requires SQLAlchemy 0.9.4 or greater." - ) - - def inline_literal(self, value, type_=None): - r"""Produce an 'inline literal' expression, suitable for - using in an INSERT, UPDATE, or DELETE statement. - - When using Alembic in "offline" mode, CRUD operations - aren't compatible with SQLAlchemy's default behavior surrounding - literal values, - which is that they are converted into bound values and passed - separately into the ``execute()`` method of the DBAPI cursor. - An offline SQL - script needs to have these rendered inline. While it should - always be noted that inline literal values are an **enormous** - security hole in an application that handles untrusted input, - a schema migration is not run in this context, so - literals are safe to render inline, with the caveat that - advanced types like dates may not be supported directly - by SQLAlchemy. - - See :meth:`.execute` for an example usage of - :meth:`.inline_literal`. - - The environment can also be configured to attempt to render - "literal" values inline automatically, for those simple types - that are supported by the dialect; see - :paramref:`.EnvironmentContext.configure.literal_binds` for this - more recently added feature. - - :param value: The value to render. Strings, integers, and simple - numerics should be supported. Other types like boolean, - dates, etc. may or may not be supported yet by various - backends. - :param type\_: optional - a :class:`sqlalchemy.types.TypeEngine` - subclass stating the type of this value. In SQLAlchemy - expressions, this is usually derived automatically - from the Python type of the value itself, as well as - based on the context in which the value is used. - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.literal_binds` - - """ - return sqla_compat._literal_bindparam(None, value, type_=type_) - - def get_bind(self): - """Return the current 'bind'. - - Under normal circumstances, this is the - :class:`~sqlalchemy.engine.Connection` currently being used - to emit SQL to the database. - - In a SQL script context, this value is ``None``. [TODO: verify this] - - """ - return self.migration_context.impl.bind - - -class BatchOperations(Operations): - """Modifies the interface :class:`.Operations` for batch mode. - - This basically omits the ``table_name`` and ``schema`` parameters - from associated methods, as these are a given when running under batch - mode. - - .. seealso:: - - :meth:`.Operations.batch_alter_table` - - Note that as of 0.8, most of the methods on this class are produced - dynamically using the :meth:`.Operations.register_operation` - method. - - """ - - def _noop(self, operation): - raise NotImplementedError( - "The %s method does not apply to a batch table alter operation." - % operation - ) diff --git a/venv/lib/python3.7/site-packages/alembic/operations/batch.py b/venv/lib/python3.7/site-packages/alembic/operations/batch.py deleted file mode 100644 index 6ca6f90..0000000 --- a/venv/lib/python3.7/site-packages/alembic/operations/batch.py +++ /dev/null @@ -1,532 +0,0 @@ -from sqlalchemy import cast -from sqlalchemy import CheckConstraint -from sqlalchemy import Column -from sqlalchemy import ForeignKeyConstraint -from sqlalchemy import Index -from sqlalchemy import MetaData -from sqlalchemy import PrimaryKeyConstraint -from sqlalchemy import schema as sql_schema -from sqlalchemy import select -from sqlalchemy import Table -from sqlalchemy import types as sqltypes -from sqlalchemy.events import SchemaEventTarget -from sqlalchemy.util import OrderedDict -from sqlalchemy.util import topological - -from ..util import exc -from ..util.sqla_compat import _columns_for_constraint -from ..util.sqla_compat import _fk_is_self_referential -from ..util.sqla_compat import _is_type_bound -from ..util.sqla_compat import _remove_column_from_collection - - -class BatchOperationsImpl(object): - def __init__( - self, - operations, - table_name, - schema, - recreate, - copy_from, - table_args, - table_kwargs, - reflect_args, - reflect_kwargs, - naming_convention, - partial_reordering, - ): - self.operations = operations - self.table_name = table_name - self.schema = schema - if recreate not in ("auto", "always", "never"): - raise ValueError( - "recreate may be one of 'auto', 'always', or 'never'." - ) - self.recreate = recreate - self.copy_from = copy_from - self.table_args = table_args - self.table_kwargs = dict(table_kwargs) - self.reflect_args = reflect_args - self.reflect_kwargs = dict(reflect_kwargs) - self.reflect_kwargs.setdefault( - "listeners", list(self.reflect_kwargs.get("listeners", ())) - ) - self.reflect_kwargs["listeners"].append( - ("column_reflect", operations.impl.autogen_column_reflect) - ) - self.naming_convention = naming_convention - self.partial_reordering = partial_reordering - self.batch = [] - - @property - def dialect(self): - return self.operations.impl.dialect - - @property - def impl(self): - return self.operations.impl - - def _should_recreate(self): - if self.recreate == "auto": - return self.operations.impl.requires_recreate_in_batch(self) - elif self.recreate == "always": - return True - else: - return False - - def flush(self): - should_recreate = self._should_recreate() - - if not should_recreate: - for opname, arg, kw in self.batch: - fn = getattr(self.operations.impl, opname) - fn(*arg, **kw) - else: - if self.naming_convention: - m1 = MetaData(naming_convention=self.naming_convention) - else: - m1 = MetaData() - - if self.copy_from is not None: - existing_table = self.copy_from - reflected = False - else: - existing_table = Table( - self.table_name, - m1, - schema=self.schema, - autoload=True, - autoload_with=self.operations.get_bind(), - *self.reflect_args, - **self.reflect_kwargs - ) - reflected = True - - batch_impl = ApplyBatchImpl( - existing_table, - self.table_args, - self.table_kwargs, - reflected, - partial_reordering=self.partial_reordering, - ) - for opname, arg, kw in self.batch: - fn = getattr(batch_impl, opname) - fn(*arg, **kw) - - batch_impl._create(self.impl) - - def alter_column(self, *arg, **kw): - self.batch.append(("alter_column", arg, kw)) - - def add_column(self, *arg, **kw): - if ( - "insert_before" in kw or "insert_after" in kw - ) and not self._should_recreate(): - raise exc.CommandError( - "Can't specify insert_before or insert_after when using " - "ALTER; please specify recreate='always'" - ) - self.batch.append(("add_column", arg, kw)) - - def drop_column(self, *arg, **kw): - self.batch.append(("drop_column", arg, kw)) - - def add_constraint(self, const): - self.batch.append(("add_constraint", (const,), {})) - - def drop_constraint(self, const): - self.batch.append(("drop_constraint", (const,), {})) - - def rename_table(self, *arg, **kw): - self.batch.append(("rename_table", arg, kw)) - - def create_index(self, idx): - self.batch.append(("create_index", (idx,), {})) - - def drop_index(self, idx): - self.batch.append(("drop_index", (idx,), {})) - - def create_table(self, table): - raise NotImplementedError("Can't create table in batch mode") - - def drop_table(self, table): - raise NotImplementedError("Can't drop table in batch mode") - - -class ApplyBatchImpl(object): - def __init__( - self, table, table_args, table_kwargs, reflected, partial_reordering=() - ): - self.table = table # this is a Table object - self.table_args = table_args - self.table_kwargs = table_kwargs - self.temp_table_name = self._calc_temp_name(table.name) - self.new_table = None - - self.partial_reordering = partial_reordering # tuple of tuples - self.add_col_ordering = () # tuple of tuples - - self.column_transfers = OrderedDict( - (c.name, {"expr": c}) for c in self.table.c - ) - self.existing_ordering = list(self.column_transfers) - - self.reflected = reflected - self._grab_table_elements() - - @classmethod - def _calc_temp_name(cls, tablename): - return ("_alembic_tmp_%s" % tablename)[0:50] - - def _grab_table_elements(self): - schema = self.table.schema - self.columns = OrderedDict() - for c in self.table.c: - c_copy = c.copy(schema=schema) - c_copy.unique = c_copy.index = False - # ensure that the type object was copied, - # as we may need to modify it in-place - if isinstance(c.type, SchemaEventTarget): - assert c_copy.type is not c.type - self.columns[c.name] = c_copy - self.named_constraints = {} - self.unnamed_constraints = [] - self.indexes = {} - self.new_indexes = {} - for const in self.table.constraints: - if _is_type_bound(const): - continue - elif self.reflected and isinstance(const, CheckConstraint): - # TODO: we are skipping reflected CheckConstraint because - # we have no way to determine _is_type_bound() for these. - pass - elif const.name: - self.named_constraints[const.name] = const - else: - self.unnamed_constraints.append(const) - - for idx in self.table.indexes: - self.indexes[idx.name] = idx - - for k in self.table.kwargs: - self.table_kwargs.setdefault(k, self.table.kwargs[k]) - - def _adjust_self_columns_for_partial_reordering(self): - pairs = set() - - col_by_idx = list(self.columns) - - if self.partial_reordering: - for tuple_ in self.partial_reordering: - for index, elem in enumerate(tuple_): - if index > 0: - pairs.add((tuple_[index - 1], elem)) - else: - for index, elem in enumerate(self.existing_ordering): - if index > 0: - pairs.add((col_by_idx[index - 1], elem)) - - pairs.update(self.add_col_ordering) - - # this can happen if some columns were dropped and not removed - # from existing_ordering. this should be prevented already, but - # conservatively making sure this didn't happen - pairs = [p for p in pairs if p[0] != p[1]] - - sorted_ = list( - topological.sort(pairs, col_by_idx, deterministic_order=True) - ) - self.columns = OrderedDict((k, self.columns[k]) for k in sorted_) - self.column_transfers = OrderedDict( - (k, self.column_transfers[k]) for k in sorted_ - ) - - def _transfer_elements_to_new_table(self): - assert self.new_table is None, "Can only create new table once" - - m = MetaData() - schema = self.table.schema - - if self.partial_reordering or self.add_col_ordering: - self._adjust_self_columns_for_partial_reordering() - - self.new_table = new_table = Table( - self.temp_table_name, - m, - *(list(self.columns.values()) + list(self.table_args)), - schema=schema, - **self.table_kwargs - ) - - for const in ( - list(self.named_constraints.values()) + self.unnamed_constraints - ): - - const_columns = set( - [c.key for c in _columns_for_constraint(const)] - ) - - if not const_columns.issubset(self.column_transfers): - continue - - if isinstance(const, ForeignKeyConstraint): - if _fk_is_self_referential(const): - # for self-referential constraint, refer to the - # *original* table name, and not _alembic_batch_temp. - # This is consistent with how we're handling - # FK constraints from other tables; we assume SQLite - # no foreign keys just keeps the names unchanged, so - # when we rename back, they match again. - const_copy = const.copy( - schema=schema, target_table=self.table - ) - else: - # "target_table" for ForeignKeyConstraint.copy() is - # only used if the FK is detected as being - # self-referential, which we are handling above. - const_copy = const.copy(schema=schema) - else: - const_copy = const.copy(schema=schema, target_table=new_table) - if isinstance(const, ForeignKeyConstraint): - self._setup_referent(m, const) - new_table.append_constraint(const_copy) - - def _gather_indexes_from_both_tables(self): - idx = [] - idx.extend(self.indexes.values()) - for index in self.new_indexes.values(): - idx.append( - Index( - index.name, - unique=index.unique, - *[self.new_table.c[col] for col in index.columns.keys()], - **index.kwargs - ) - ) - return idx - - def _setup_referent(self, metadata, constraint): - spec = constraint.elements[0]._get_colspec() - parts = spec.split(".") - tname = parts[-2] - if len(parts) == 3: - referent_schema = parts[0] - else: - referent_schema = None - - if tname != self.temp_table_name: - key = sql_schema._get_table_key(tname, referent_schema) - if key in metadata.tables: - t = metadata.tables[key] - for elem in constraint.elements: - colname = elem._get_colspec().split(".")[-1] - if not t.c.contains_column(colname): - t.append_column(Column(colname, sqltypes.NULLTYPE)) - else: - Table( - tname, - metadata, - *[ - Column(n, sqltypes.NULLTYPE) - for n in [ - elem._get_colspec().split(".")[-1] - for elem in constraint.elements - ] - ], - schema=referent_schema - ) - - def _create(self, op_impl): - self._transfer_elements_to_new_table() - - op_impl.prep_table_for_batch(self.table) - op_impl.create_table(self.new_table) - - try: - op_impl._exec( - self.new_table.insert(inline=True).from_select( - list( - k - for k, transfer in self.column_transfers.items() - if "expr" in transfer - ), - select( - [ - transfer["expr"] - for transfer in self.column_transfers.values() - if "expr" in transfer - ] - ), - ) - ) - op_impl.drop_table(self.table) - except: - op_impl.drop_table(self.new_table) - raise - else: - op_impl.rename_table( - self.temp_table_name, self.table.name, schema=self.table.schema - ) - self.new_table.name = self.table.name - try: - for idx in self._gather_indexes_from_both_tables(): - op_impl.create_index(idx) - finally: - self.new_table.name = self.temp_table_name - - def alter_column( - self, - table_name, - column_name, - nullable=None, - server_default=False, - name=None, - type_=None, - autoincrement=None, - **kw - ): - existing = self.columns[column_name] - existing_transfer = self.column_transfers[column_name] - if name is not None and name != column_name: - # note that we don't change '.key' - we keep referring - # to the renamed column by its old key in _create(). neat! - existing.name = name - existing_transfer["name"] = name - - if type_ is not None: - type_ = sqltypes.to_instance(type_) - # old type is being discarded so turn off eventing - # rules. Alternatively we can - # erase the events set up by this type, but this is simpler. - # we also ignore the drop_constraint that will come here from - # Operations.implementation_for(alter_column) - if isinstance(existing.type, SchemaEventTarget): - existing.type._create_events = ( - existing.type.create_constraint - ) = False - - if existing.type._type_affinity is not type_._type_affinity: - existing_transfer["expr"] = cast( - existing_transfer["expr"], type_ - ) - - existing.type = type_ - - # we *dont* however set events for the new type, because - # alter_column is invoked from - # Operations.implementation_for(alter_column) which already - # will emit an add_constraint() - - if nullable is not None: - existing.nullable = nullable - if server_default is not False: - if server_default is None: - existing.server_default = None - else: - sql_schema.DefaultClause(server_default)._set_parent(existing) - if autoincrement is not None: - existing.autoincrement = bool(autoincrement) - - def _setup_dependencies_for_add_column( - self, colname, insert_before, insert_after - ): - index_cols = self.existing_ordering - col_indexes = {name: i for i, name in enumerate(index_cols)} - - if not self.partial_reordering: - if insert_after: - if not insert_before: - if insert_after in col_indexes: - # insert after an existing column - idx = col_indexes[insert_after] + 1 - if idx < len(index_cols): - insert_before = index_cols[idx] - else: - # insert after a column that is also new - insert_before = dict(self.add_col_ordering)[ - insert_after - ] - if insert_before: - if not insert_after: - if insert_before in col_indexes: - # insert before an existing column - idx = col_indexes[insert_before] - 1 - if idx >= 0: - insert_after = index_cols[idx] - else: - # insert before a column that is also new - insert_after = dict( - (b, a) for a, b in self.add_col_ordering - )[insert_before] - - if insert_before: - self.add_col_ordering += ((colname, insert_before),) - if insert_after: - self.add_col_ordering += ((insert_after, colname),) - - if ( - not self.partial_reordering - and not insert_before - and not insert_after - and col_indexes - ): - self.add_col_ordering += ((index_cols[-1], colname),) - - def add_column( - self, table_name, column, insert_before=None, insert_after=None, **kw - ): - self._setup_dependencies_for_add_column( - column.name, insert_before, insert_after - ) - # we copy the column because operations.add_column() - # gives us a Column that is part of a Table already. - self.columns[column.name] = column.copy(schema=self.table.schema) - self.column_transfers[column.name] = {} - - def drop_column(self, table_name, column, **kw): - if column.name in self.table.primary_key.columns: - _remove_column_from_collection( - self.table.primary_key.columns, column - ) - del self.columns[column.name] - del self.column_transfers[column.name] - self.existing_ordering.remove(column.name) - - def add_constraint(self, const): - if not const.name: - raise ValueError("Constraint must have a name") - if isinstance(const, sql_schema.PrimaryKeyConstraint): - if self.table.primary_key in self.unnamed_constraints: - self.unnamed_constraints.remove(self.table.primary_key) - - self.named_constraints[const.name] = const - - def drop_constraint(self, const): - if not const.name: - raise ValueError("Constraint must have a name") - try: - const = self.named_constraints.pop(const.name) - except KeyError: - if _is_type_bound(const): - # type-bound constraints are only included in the new - # table via their type object in any case, so ignore the - # drop_constraint() that comes here via the - # Operations.implementation_for(alter_column) - return - raise ValueError("No such constraint: '%s'" % const.name) - else: - if isinstance(const, PrimaryKeyConstraint): - for col in const.columns: - self.columns[col.name].primary_key = False - - def create_index(self, idx): - self.new_indexes[idx.name] = idx - - def drop_index(self, idx): - try: - del self.indexes[idx.name] - except KeyError: - raise ValueError("No such index: '%s'" % idx.name) - - def rename_table(self, *arg, **kw): - raise NotImplementedError("TODO") diff --git a/venv/lib/python3.7/site-packages/alembic/operations/ops.py b/venv/lib/python3.7/site-packages/alembic/operations/ops.py deleted file mode 100644 index 7129472..0000000 --- a/venv/lib/python3.7/site-packages/alembic/operations/ops.py +++ /dev/null @@ -1,2472 +0,0 @@ -import re - -from sqlalchemy.types import NULLTYPE - -from . import schemaobj -from .base import BatchOperations -from .base import Operations -from .. import util -from ..util import sqla_compat - - -class MigrateOperation(object): - """base class for migration command and organization objects. - - This system is part of the operation extensibility API. - - .. versionadded:: 0.8.0 - - .. seealso:: - - :ref:`operation_objects` - - :ref:`operation_plugins` - - :ref:`customizing_revision` - - """ - - @util.memoized_property - def info(self): - """A dictionary that may be used to store arbitrary information - along with this :class:`.MigrateOperation` object. - - """ - return {} - - _mutations = frozenset() - - -class AddConstraintOp(MigrateOperation): - """Represent an add constraint operation.""" - - add_constraint_ops = util.Dispatcher() - - @property - def constraint_type(self): - raise NotImplementedError() - - @classmethod - def register_add_constraint(cls, type_): - def go(klass): - cls.add_constraint_ops.dispatch_for(type_)(klass.from_constraint) - return klass - - return go - - @classmethod - def from_constraint(cls, constraint): - return cls.add_constraint_ops.dispatch(constraint.__visit_name__)( - constraint - ) - - def reverse(self): - return DropConstraintOp.from_constraint(self.to_constraint()) - - def to_diff_tuple(self): - return ("add_constraint", self.to_constraint()) - - -@Operations.register_operation("drop_constraint") -@BatchOperations.register_operation("drop_constraint", "batch_drop_constraint") -class DropConstraintOp(MigrateOperation): - """Represent a drop constraint operation.""" - - def __init__( - self, - constraint_name, - table_name, - type_=None, - schema=None, - _orig_constraint=None, - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.constraint_type = type_ - self.schema = schema - self._orig_constraint = _orig_constraint - - def reverse(self): - if self._orig_constraint is None: - raise ValueError( - "operation is not reversible; " - "original constraint is not present" - ) - return AddConstraintOp.from_constraint(self._orig_constraint) - - def to_diff_tuple(self): - if self.constraint_type == "foreignkey": - return ("remove_fk", self.to_constraint()) - else: - return ("remove_constraint", self.to_constraint()) - - @classmethod - def from_constraint(cls, constraint): - types = { - "unique_constraint": "unique", - "foreign_key_constraint": "foreignkey", - "primary_key_constraint": "primary", - "check_constraint": "check", - "column_check_constraint": "check", - "table_or_column_check_constraint": "check", - } - - constraint_table = sqla_compat._table_for_constraint(constraint) - return cls( - constraint.name, - constraint_table.name, - schema=constraint_table.schema, - type_=types[constraint.__visit_name__], - _orig_constraint=constraint, - ) - - def to_constraint(self): - if self._orig_constraint is not None: - return self._orig_constraint - else: - raise ValueError( - "constraint cannot be produced; " - "original constraint is not present" - ) - - @classmethod - @util._with_legacy_names([("type", "type_"), ("name", "constraint_name")]) - def drop_constraint( - cls, operations, constraint_name, table_name, type_=None, schema=None - ): - r"""Drop a constraint of the given name, typically via DROP CONSTRAINT. - - :param constraint_name: name of the constraint. - :param table_name: table name. - :param type\_: optional, required on MySQL. can be - 'foreignkey', 'primary', 'unique', or 'check'. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - - op = cls(constraint_name, table_name, type_=type_, schema=schema) - return operations.invoke(op) - - @classmethod - def batch_drop_constraint(cls, operations, constraint_name, type_=None): - """Issue a "drop constraint" instruction using the - current batch migration context. - - The batch form of this call omits the ``table_name`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.drop_constraint` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - op = cls( - constraint_name, - operations.impl.table_name, - type_=type_, - schema=operations.impl.schema, - ) - return operations.invoke(op) - - -@Operations.register_operation("create_primary_key") -@BatchOperations.register_operation( - "create_primary_key", "batch_create_primary_key" -) -@AddConstraintOp.register_add_constraint("primary_key_constraint") -class CreatePrimaryKeyOp(AddConstraintOp): - """Represent a create primary key operation.""" - - constraint_type = "primarykey" - - def __init__( - self, - constraint_name, - table_name, - columns, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.columns = columns - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - return cls( - constraint.name, - constraint_table.name, - constraint.columns, - schema=constraint_table.schema, - _orig_constraint=constraint, - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.primary_key_constraint( - self.constraint_name, - self.table_name, - self.columns, - schema=self.schema, - ) - - @classmethod - @util._with_legacy_names( - [("name", "constraint_name"), ("cols", "columns")] - ) - def create_primary_key( - cls, operations, constraint_name, table_name, columns, schema=None - ): - """Issue a "create primary key" instruction using the current - migration context. - - e.g.:: - - from alembic import op - op.create_primary_key( - "pk_my_table", "my_table", - ["id", "version"] - ) - - This internally generates a :class:`~sqlalchemy.schema.Table` object - containing the necessary columns, then generates a new - :class:`~sqlalchemy.schema.PrimaryKeyConstraint` - object which it then associates with the - :class:`~sqlalchemy.schema.Table`. - Any event listeners associated with this action will be fired - off normally. The :class:`~sqlalchemy.schema.AddConstraint` - construct is ultimately used to generate the ALTER statement. - - :param name: Name of the primary key constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions` - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param table_name: String name of the target table. - :param columns: a list of string column names to be applied to the - primary key constraint. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * cols -> columns - - """ - op = cls(constraint_name, table_name, columns, schema) - return operations.invoke(op) - - @classmethod - def batch_create_primary_key(cls, operations, constraint_name, columns): - """Issue a "create primary key" instruction using the - current batch migration context. - - The batch form of this call omits the ``table_name`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.create_primary_key` - - """ - op = cls( - constraint_name, - operations.impl.table_name, - columns, - schema=operations.impl.schema, - ) - return operations.invoke(op) - - -@Operations.register_operation("create_unique_constraint") -@BatchOperations.register_operation( - "create_unique_constraint", "batch_create_unique_constraint" -) -@AddConstraintOp.register_add_constraint("unique_constraint") -class CreateUniqueConstraintOp(AddConstraintOp): - """Represent a create unique constraint operation.""" - - constraint_type = "unique" - - def __init__( - self, - constraint_name, - table_name, - columns, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.columns = columns - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - kw = {} - if constraint.deferrable: - kw["deferrable"] = constraint.deferrable - if constraint.initially: - kw["initially"] = constraint.initially - - return cls( - constraint.name, - constraint_table.name, - [c.name for c in constraint.columns], - schema=constraint_table.schema, - _orig_constraint=constraint, - **kw - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.unique_constraint( - self.constraint_name, - self.table_name, - self.columns, - schema=self.schema, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [ - ("name", "constraint_name"), - ("source", "table_name"), - ("local_cols", "columns"), - ] - ) - def create_unique_constraint( - cls, - operations, - constraint_name, - table_name, - columns, - schema=None, - **kw - ): - """Issue a "create unique constraint" instruction using the - current migration context. - - e.g.:: - - from alembic import op - op.create_unique_constraint("uq_user_name", "user", ["name"]) - - This internally generates a :class:`~sqlalchemy.schema.Table` object - containing the necessary columns, then generates a new - :class:`~sqlalchemy.schema.UniqueConstraint` - object which it then associates with the - :class:`~sqlalchemy.schema.Table`. - Any event listeners associated with this action will be fired - off normally. The :class:`~sqlalchemy.schema.AddConstraint` - construct is ultimately used to generate the ALTER statement. - - :param name: Name of the unique constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions`, - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param table_name: String name of the source table. - :param columns: a list of string column names in the - source table. - :param deferrable: optional bool. If set, emit DEFERRABLE or - NOT DEFERRABLE when issuing DDL for this constraint. - :param initially: optional string. If set, emit INITIALLY - when issuing DDL for this constraint. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * source -> table_name - * local_cols -> columns - - """ - - op = cls(constraint_name, table_name, columns, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names([("name", "constraint_name")]) - def batch_create_unique_constraint( - cls, operations, constraint_name, columns, **kw - ): - """Issue a "create unique constraint" instruction using the - current batch migration context. - - The batch form of this call omits the ``source`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.create_unique_constraint` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - kw["schema"] = operations.impl.schema - op = cls(constraint_name, operations.impl.table_name, columns, **kw) - return operations.invoke(op) - - -@Operations.register_operation("create_foreign_key") -@BatchOperations.register_operation( - "create_foreign_key", "batch_create_foreign_key" -) -@AddConstraintOp.register_add_constraint("foreign_key_constraint") -class CreateForeignKeyOp(AddConstraintOp): - """Represent a create foreign key constraint operation.""" - - constraint_type = "foreignkey" - - def __init__( - self, - constraint_name, - source_table, - referent_table, - local_cols, - remote_cols, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.source_table = source_table - self.referent_table = referent_table - self.local_cols = local_cols - self.remote_cols = remote_cols - self._orig_constraint = _orig_constraint - self.kw = kw - - def to_diff_tuple(self): - return ("add_fk", self.to_constraint()) - - @classmethod - def from_constraint(cls, constraint): - kw = {} - if constraint.onupdate: - kw["onupdate"] = constraint.onupdate - if constraint.ondelete: - kw["ondelete"] = constraint.ondelete - if constraint.initially: - kw["initially"] = constraint.initially - if constraint.deferrable: - kw["deferrable"] = constraint.deferrable - if constraint.use_alter: - kw["use_alter"] = constraint.use_alter - - ( - source_schema, - source_table, - source_columns, - target_schema, - target_table, - target_columns, - onupdate, - ondelete, - deferrable, - initially, - ) = sqla_compat._fk_spec(constraint) - - kw["source_schema"] = source_schema - kw["referent_schema"] = target_schema - - return cls( - constraint.name, - source_table, - target_table, - source_columns, - target_columns, - _orig_constraint=constraint, - **kw - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.foreign_key_constraint( - self.constraint_name, - self.source_table, - self.referent_table, - self.local_cols, - self.remote_cols, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [ - ("name", "constraint_name"), - ("source", "source_table"), - ("referent", "referent_table"), - ] - ) - def create_foreign_key( - cls, - operations, - constraint_name, - source_table, - referent_table, - local_cols, - remote_cols, - onupdate=None, - ondelete=None, - deferrable=None, - initially=None, - match=None, - source_schema=None, - referent_schema=None, - **dialect_kw - ): - """Issue a "create foreign key" instruction using the - current migration context. - - e.g.:: - - from alembic import op - op.create_foreign_key( - "fk_user_address", "address", - "user", ["user_id"], ["id"]) - - This internally generates a :class:`~sqlalchemy.schema.Table` object - containing the necessary columns, then generates a new - :class:`~sqlalchemy.schema.ForeignKeyConstraint` - object which it then associates with the - :class:`~sqlalchemy.schema.Table`. - Any event listeners associated with this action will be fired - off normally. The :class:`~sqlalchemy.schema.AddConstraint` - construct is ultimately used to generate the ALTER statement. - - :param name: Name of the foreign key constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions`, - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param source_table: String name of the source table. - :param referent_table: String name of the destination table. - :param local_cols: a list of string column names in the - source table. - :param remote_cols: a list of string column names in the - remote table. - :param onupdate: Optional string. If set, emit ON UPDATE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - :param ondelete: Optional string. If set, emit ON DELETE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - :param deferrable: optional bool. If set, emit DEFERRABLE or NOT - DEFERRABLE when issuing DDL for this constraint. - :param source_schema: Optional schema name of the source table. - :param referent_schema: Optional schema name of the destination table. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * source -> source_table - * referent -> referent_table - - """ - - op = cls( - constraint_name, - source_table, - referent_table, - local_cols, - remote_cols, - onupdate=onupdate, - ondelete=ondelete, - deferrable=deferrable, - source_schema=source_schema, - referent_schema=referent_schema, - initially=initially, - match=match, - **dialect_kw - ) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names( - [("name", "constraint_name"), ("referent", "referent_table")] - ) - def batch_create_foreign_key( - cls, - operations, - constraint_name, - referent_table, - local_cols, - remote_cols, - referent_schema=None, - onupdate=None, - ondelete=None, - deferrable=None, - initially=None, - match=None, - **dialect_kw - ): - """Issue a "create foreign key" instruction using the - current batch migration context. - - The batch form of this call omits the ``source`` and ``source_schema`` - arguments from the call. - - e.g.:: - - with batch_alter_table("address") as batch_op: - batch_op.create_foreign_key( - "fk_user_address", - "user", ["user_id"], ["id"]) - - .. seealso:: - - :meth:`.Operations.create_foreign_key` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * referent -> referent_table - - """ - op = cls( - constraint_name, - operations.impl.table_name, - referent_table, - local_cols, - remote_cols, - onupdate=onupdate, - ondelete=ondelete, - deferrable=deferrable, - source_schema=operations.impl.schema, - referent_schema=referent_schema, - initially=initially, - match=match, - **dialect_kw - ) - return operations.invoke(op) - - -@Operations.register_operation("create_check_constraint") -@BatchOperations.register_operation( - "create_check_constraint", "batch_create_check_constraint" -) -@AddConstraintOp.register_add_constraint("check_constraint") -@AddConstraintOp.register_add_constraint("table_or_column_check_constraint") -@AddConstraintOp.register_add_constraint("column_check_constraint") -class CreateCheckConstraintOp(AddConstraintOp): - """Represent a create check constraint operation.""" - - constraint_type = "check" - - def __init__( - self, - constraint_name, - table_name, - condition, - schema=None, - _orig_constraint=None, - **kw - ): - self.constraint_name = constraint_name - self.table_name = table_name - self.condition = condition - self.schema = schema - self._orig_constraint = _orig_constraint - self.kw = kw - - @classmethod - def from_constraint(cls, constraint): - constraint_table = sqla_compat._table_for_constraint(constraint) - - return cls( - constraint.name, - constraint_table.name, - constraint.sqltext, - schema=constraint_table.schema, - _orig_constraint=constraint, - ) - - def to_constraint(self, migration_context=None): - if self._orig_constraint is not None: - return self._orig_constraint - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.check_constraint( - self.constraint_name, - self.table_name, - self.condition, - schema=self.schema, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [("name", "constraint_name"), ("source", "table_name")] - ) - def create_check_constraint( - cls, - operations, - constraint_name, - table_name, - condition, - schema=None, - **kw - ): - """Issue a "create check constraint" instruction using the - current migration context. - - e.g.:: - - from alembic import op - from sqlalchemy.sql import column, func - - op.create_check_constraint( - "ck_user_name_len", - "user", - func.len(column('name')) > 5 - ) - - CHECK constraints are usually against a SQL expression, so ad-hoc - table metadata is usually needed. The function will convert the given - arguments into a :class:`sqlalchemy.schema.CheckConstraint` bound - to an anonymous table in order to emit the CREATE statement. - - :param name: Name of the check constraint. The name is necessary - so that an ALTER statement can be emitted. For setups that - use an automated naming scheme such as that described at - :ref:`sqla:constraint_naming_conventions`, - ``name`` here can be ``None``, as the event listener will - apply the name to the constraint object when it is associated - with the table. - :param table_name: String name of the source table. - :param condition: SQL expression that's the condition of the - constraint. Can be a string or SQLAlchemy expression language - structure. - :param deferrable: optional bool. If set, emit DEFERRABLE or - NOT DEFERRABLE when issuing DDL for this constraint. - :param initially: optional string. If set, emit INITIALLY - when issuing DDL for this constraint. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - * source -> table_name - - """ - op = cls(constraint_name, table_name, condition, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names([("name", "constraint_name")]) - def batch_create_check_constraint( - cls, operations, constraint_name, condition, **kw - ): - """Issue a "create check constraint" instruction using the - current batch migration context. - - The batch form of this call omits the ``source`` and ``schema`` - arguments from the call. - - .. seealso:: - - :meth:`.Operations.create_check_constraint` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> constraint_name - - """ - op = cls( - constraint_name, - operations.impl.table_name, - condition, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("create_index") -@BatchOperations.register_operation("create_index", "batch_create_index") -class CreateIndexOp(MigrateOperation): - """Represent a create index operation.""" - - def __init__( - self, - index_name, - table_name, - columns, - schema=None, - unique=False, - _orig_index=None, - **kw - ): - self.index_name = index_name - self.table_name = table_name - self.columns = columns - self.schema = schema - self.unique = unique - self.kw = kw - self._orig_index = _orig_index - - def reverse(self): - return DropIndexOp.from_index(self.to_index()) - - def to_diff_tuple(self): - return ("add_index", self.to_index()) - - @classmethod - def from_index(cls, index): - return cls( - index.name, - index.table.name, - sqla_compat._get_index_expressions(index), - schema=index.table.schema, - unique=index.unique, - _orig_index=index, - **index.kwargs - ) - - def to_index(self, migration_context=None): - if self._orig_index: - return self._orig_index - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.index( - self.index_name, - self.table_name, - self.columns, - schema=self.schema, - unique=self.unique, - **self.kw - ) - - @classmethod - @util._with_legacy_names([("name", "index_name")]) - def create_index( - cls, - operations, - index_name, - table_name, - columns, - schema=None, - unique=False, - **kw - ): - r"""Issue a "create index" instruction using the current - migration context. - - e.g.:: - - from alembic import op - op.create_index('ik_test', 't1', ['foo', 'bar']) - - Functional indexes can be produced by using the - :func:`sqlalchemy.sql.expression.text` construct:: - - from alembic import op - from sqlalchemy import text - op.create_index('ik_test', 't1', [text('lower(foo)')]) - - .. versionadded:: 0.6.7 support for making use of the - :func:`~sqlalchemy.sql.expression.text` construct in - conjunction with - :meth:`.Operations.create_index` in - order to produce functional expressions within CREATE INDEX. - - :param index_name: name of the index. - :param table_name: name of the owning table. - :param columns: a list consisting of string column names and/or - :func:`~sqlalchemy.sql.expression.text` constructs. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param unique: If True, create a unique index. - - :param quote: - Force quoting of this column's name on or off, corresponding - to ``True`` or ``False``. When left at its default - of ``None``, the column identifier will be quoted according to - whether the name is case sensitive (identifiers with at least one - upper case character are treated as case sensitive), or if it's a - reserved word. This flag is only needed to force quoting of a - reserved word which is not known by the SQLAlchemy dialect. - - :param \**kw: Additional keyword arguments not mentioned above are - dialect specific, and passed in the form - ``_``. - See the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> index_name - - """ - op = cls( - index_name, table_name, columns, schema=schema, unique=unique, **kw - ) - return operations.invoke(op) - - @classmethod - def batch_create_index(cls, operations, index_name, columns, **kw): - """Issue a "create index" instruction using the - current batch migration context. - - .. seealso:: - - :meth:`.Operations.create_index` - - """ - - op = cls( - index_name, - operations.impl.table_name, - columns, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("drop_index") -@BatchOperations.register_operation("drop_index", "batch_drop_index") -class DropIndexOp(MigrateOperation): - """Represent a drop index operation.""" - - def __init__( - self, index_name, table_name=None, schema=None, _orig_index=None, **kw - ): - self.index_name = index_name - self.table_name = table_name - self.schema = schema - self._orig_index = _orig_index - self.kw = kw - - def to_diff_tuple(self): - return ("remove_index", self.to_index()) - - def reverse(self): - if self._orig_index is None: - raise ValueError( - "operation is not reversible; " "original index is not present" - ) - return CreateIndexOp.from_index(self._orig_index) - - @classmethod - def from_index(cls, index): - return cls( - index.name, - index.table.name, - schema=index.table.schema, - _orig_index=index, - **index.kwargs - ) - - def to_index(self, migration_context=None): - if self._orig_index is not None: - return self._orig_index - - schema_obj = schemaobj.SchemaObjects(migration_context) - - # need a dummy column name here since SQLAlchemy - # 0.7.6 and further raises on Index with no columns - return schema_obj.index( - self.index_name, - self.table_name, - ["x"], - schema=self.schema, - **self.kw - ) - - @classmethod - @util._with_legacy_names( - [("name", "index_name"), ("tablename", "table_name")] - ) - def drop_index( - cls, operations, index_name, table_name=None, schema=None, **kw - ): - r"""Issue a "drop index" instruction using the current - migration context. - - e.g.:: - - drop_index("accounts") - - :param index_name: name of the index. - :param table_name: name of the owning table. Some - backends such as Microsoft SQL Server require this. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param \**kw: Additional keyword arguments not mentioned above are - dialect specific, and passed in the form - ``_``. - See the documentation regarding an individual dialect at - :ref:`dialect_toplevel` for detail on documented arguments. - - .. versionadded:: 0.9.5 Support for dialect-specific keyword - arguments for DROP INDEX - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> index_name - - """ - op = cls(index_name, table_name=table_name, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - @util._with_legacy_names([("name", "index_name")]) - def batch_drop_index(cls, operations, index_name, **kw): - """Issue a "drop index" instruction using the - current batch migration context. - - .. seealso:: - - :meth:`.Operations.drop_index` - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> index_name - - """ - - op = cls( - index_name, - table_name=operations.impl.table_name, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("create_table") -class CreateTableOp(MigrateOperation): - """Represent a create table operation.""" - - def __init__( - self, table_name, columns, schema=None, _orig_table=None, **kw - ): - self.table_name = table_name - self.columns = columns - self.schema = schema - self.kw = kw - self._orig_table = _orig_table - - def reverse(self): - return DropTableOp.from_table(self.to_table()) - - def to_diff_tuple(self): - return ("add_table", self.to_table()) - - @classmethod - def from_table(cls, table): - return cls( - table.name, - list(table.c) + list(table.constraints), - schema=table.schema, - _orig_table=table, - **table.kwargs - ) - - def to_table(self, migration_context=None): - if self._orig_table is not None: - return self._orig_table - schema_obj = schemaobj.SchemaObjects(migration_context) - - return schema_obj.table( - self.table_name, *self.columns, schema=self.schema, **self.kw - ) - - @classmethod - @util._with_legacy_names([("name", "table_name")]) - def create_table(cls, operations, table_name, *columns, **kw): - r"""Issue a "create table" instruction using the current migration - context. - - This directive receives an argument list similar to that of the - traditional :class:`sqlalchemy.schema.Table` construct, but without the - metadata:: - - from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column - from alembic import op - - op.create_table( - 'account', - Column('id', INTEGER, primary_key=True), - Column('name', VARCHAR(50), nullable=False), - Column('description', NVARCHAR(200)), - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - Note that :meth:`.create_table` accepts - :class:`~sqlalchemy.schema.Column` - constructs directly from the SQLAlchemy library. In particular, - default values to be created on the database side are - specified using the ``server_default`` parameter, and not - ``default`` which only specifies Python-side defaults:: - - from alembic import op - from sqlalchemy import Column, TIMESTAMP, func - - # specify "DEFAULT NOW" along with the "timestamp" column - op.create_table('account', - Column('id', INTEGER, primary_key=True), - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - The function also returns a newly created - :class:`~sqlalchemy.schema.Table` object, corresponding to the table - specification given, which is suitable for - immediate SQL operations, in particular - :meth:`.Operations.bulk_insert`:: - - from sqlalchemy import INTEGER, VARCHAR, NVARCHAR, Column - from alembic import op - - account_table = op.create_table( - 'account', - Column('id', INTEGER, primary_key=True), - Column('name', VARCHAR(50), nullable=False), - Column('description', NVARCHAR(200)), - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - op.bulk_insert( - account_table, - [ - {"name": "A1", "description": "account 1"}, - {"name": "A2", "description": "account 2"}, - ] - ) - - .. versionadded:: 0.7.0 - - :param table_name: Name of the table - :param \*columns: collection of :class:`~sqlalchemy.schema.Column` - objects within - the table, as well as optional :class:`~sqlalchemy.schema.Constraint` - objects - and :class:`~.sqlalchemy.schema.Index` objects. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - :param \**kw: Other keyword arguments are passed to the underlying - :class:`sqlalchemy.schema.Table` object created for the command. - - :return: the :class:`~sqlalchemy.schema.Table` object corresponding - to the parameters given. - - .. versionadded:: 0.7.0 - the :class:`~sqlalchemy.schema.Table` - object is returned. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> table_name - - """ - op = cls(table_name, columns, **kw) - return operations.invoke(op) - - -@Operations.register_operation("drop_table") -class DropTableOp(MigrateOperation): - """Represent a drop table operation.""" - - def __init__( - self, table_name, schema=None, table_kw=None, _orig_table=None - ): - self.table_name = table_name - self.schema = schema - self.table_kw = table_kw or {} - self._orig_table = _orig_table - - def to_diff_tuple(self): - return ("remove_table", self.to_table()) - - def reverse(self): - if self._orig_table is None: - raise ValueError( - "operation is not reversible; " "original table is not present" - ) - return CreateTableOp.from_table(self._orig_table) - - @classmethod - def from_table(cls, table): - return cls(table.name, schema=table.schema, _orig_table=table) - - def to_table(self, migration_context=None): - if self._orig_table is not None: - return self._orig_table - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.table( - self.table_name, schema=self.schema, **self.table_kw - ) - - @classmethod - @util._with_legacy_names([("name", "table_name")]) - def drop_table(cls, operations, table_name, schema=None, **kw): - r"""Issue a "drop table" instruction using the current - migration context. - - - e.g.:: - - drop_table("accounts") - - :param table_name: Name of the table - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param \**kw: Other keyword arguments are passed to the underlying - :class:`sqlalchemy.schema.Table` object created for the command. - - .. versionchanged:: 0.8.0 The following positional argument names - have been changed: - - * name -> table_name - - """ - op = cls(table_name, schema=schema, table_kw=kw) - operations.invoke(op) - - -class AlterTableOp(MigrateOperation): - """Represent an alter table operation.""" - - def __init__(self, table_name, schema=None): - self.table_name = table_name - self.schema = schema - - -@Operations.register_operation("rename_table") -class RenameTableOp(AlterTableOp): - """Represent a rename table operation.""" - - def __init__(self, old_table_name, new_table_name, schema=None): - super(RenameTableOp, self).__init__(old_table_name, schema=schema) - self.new_table_name = new_table_name - - @classmethod - def rename_table( - cls, operations, old_table_name, new_table_name, schema=None - ): - """Emit an ALTER TABLE to rename a table. - - :param old_table_name: old name. - :param new_table_name: new name. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - """ - op = cls(old_table_name, new_table_name, schema=schema) - return operations.invoke(op) - - -@Operations.register_operation("create_table_comment") -class CreateTableCommentOp(AlterTableOp): - """Represent a COMMENT ON `table` operation. - """ - - def __init__( - self, table_name, comment, schema=None, existing_comment=None - ): - self.table_name = table_name - self.comment = comment - self.existing_comment = existing_comment - self.schema = schema - - @classmethod - def create_table_comment( - cls, - operations, - table_name, - comment, - existing_comment=None, - schema=None, - ): - """Emit a COMMENT ON operation to set the comment for a table. - - .. versionadded:: 1.0.6 - - :param table_name: string name of the target table. - :param comment: string value of the comment being registered against - the specified table. - :param existing_comment: String value of a comment - already registered on the specified table, used within autogenerate - so that the operation is reversible, but not required for direct - use. - - .. seealso:: - - :meth:`.Operations.drop_table_comment` - - :paramref:`.Operations.alter_column.comment` - - """ - - op = cls( - table_name, - comment, - existing_comment=existing_comment, - schema=schema, - ) - return operations.invoke(op) - - def reverse(self): - """Reverses the COMMENT ON operation against a table. - """ - if self.existing_comment is None: - return DropTableCommentOp( - self.table_name, - existing_comment=self.comment, - schema=self.schema, - ) - else: - return CreateTableCommentOp( - self.table_name, - self.existing_comment, - existing_comment=self.comment, - schema=self.schema, - ) - - def to_table(self, migration_context=None): - schema_obj = schemaobj.SchemaObjects(migration_context) - - return schema_obj.table( - self.table_name, schema=self.schema, comment=self.comment - ) - - def to_diff_tuple(self): - return ("add_table_comment", self.to_table(), self.existing_comment) - - -@Operations.register_operation("drop_table_comment") -class DropTableCommentOp(AlterTableOp): - """Represent an operation to remove the comment from a table. - """ - - def __init__(self, table_name, schema=None, existing_comment=None): - self.table_name = table_name - self.existing_comment = existing_comment - self.schema = schema - - @classmethod - def drop_table_comment( - cls, operations, table_name, existing_comment=None, schema=None - ): - """Issue a "drop table comment" operation to - remove an existing comment set on a table. - - .. versionadded:: 1.0.6 - - :param table_name: string name of the target table. - :param existing_comment: An optional string value of a comment already - registered on the specified table. - - .. seealso:: - - :meth:`.Operations.create_table_comment` - - :paramref:`.Operations.alter_column.comment` - - """ - - op = cls(table_name, existing_comment=existing_comment, schema=schema) - return operations.invoke(op) - - def reverse(self): - """Reverses the COMMENT ON operation against a table. - """ - return CreateTableCommentOp( - self.table_name, self.existing_comment, schema=self.schema - ) - - def to_table(self, migration_context=None): - schema_obj = schemaobj.SchemaObjects(migration_context) - - return schema_obj.table(self.table_name, schema=self.schema) - - def to_diff_tuple(self): - return ("remove_table_comment", self.to_table()) - - -@Operations.register_operation("alter_column") -@BatchOperations.register_operation("alter_column", "batch_alter_column") -class AlterColumnOp(AlterTableOp): - """Represent an alter column operation.""" - - def __init__( - self, - table_name, - column_name, - schema=None, - existing_type=None, - existing_server_default=False, - existing_nullable=None, - existing_comment=None, - modify_nullable=None, - modify_comment=False, - modify_server_default=False, - modify_name=None, - modify_type=None, - **kw - ): - super(AlterColumnOp, self).__init__(table_name, schema=schema) - self.column_name = column_name - self.existing_type = existing_type - self.existing_server_default = existing_server_default - self.existing_nullable = existing_nullable - self.existing_comment = existing_comment - self.modify_nullable = modify_nullable - self.modify_comment = modify_comment - self.modify_server_default = modify_server_default - self.modify_name = modify_name - self.modify_type = modify_type - self.kw = kw - - def to_diff_tuple(self): - col_diff = [] - schema, tname, cname = self.schema, self.table_name, self.column_name - - if self.modify_type is not None: - col_diff.append( - ( - "modify_type", - schema, - tname, - cname, - { - "existing_nullable": self.existing_nullable, - "existing_server_default": ( - self.existing_server_default - ), - "existing_comment": self.existing_comment, - }, - self.existing_type, - self.modify_type, - ) - ) - - if self.modify_nullable is not None: - col_diff.append( - ( - "modify_nullable", - schema, - tname, - cname, - { - "existing_type": self.existing_type, - "existing_server_default": ( - self.existing_server_default - ), - "existing_comment": self.existing_comment, - }, - self.existing_nullable, - self.modify_nullable, - ) - ) - - if self.modify_server_default is not False: - col_diff.append( - ( - "modify_default", - schema, - tname, - cname, - { - "existing_nullable": self.existing_nullable, - "existing_type": self.existing_type, - "existing_comment": self.existing_comment, - }, - self.existing_server_default, - self.modify_server_default, - ) - ) - - if self.modify_comment is not False: - col_diff.append( - ( - "modify_comment", - schema, - tname, - cname, - { - "existing_nullable": self.existing_nullable, - "existing_type": self.existing_type, - "existing_server_default": ( - self.existing_server_default - ), - }, - self.existing_comment, - self.modify_comment, - ) - ) - - return col_diff - - def has_changes(self): - hc1 = ( - self.modify_nullable is not None - or self.modify_server_default is not False - or self.modify_type is not None - or self.modify_comment is not False - ) - if hc1: - return True - for kw in self.kw: - if kw.startswith("modify_"): - return True - else: - return False - - def reverse(self): - - kw = self.kw.copy() - kw["existing_type"] = self.existing_type - kw["existing_nullable"] = self.existing_nullable - kw["existing_server_default"] = self.existing_server_default - kw["existing_comment"] = self.existing_comment - if self.modify_type is not None: - kw["modify_type"] = self.modify_type - if self.modify_nullable is not None: - kw["modify_nullable"] = self.modify_nullable - if self.modify_server_default is not False: - kw["modify_server_default"] = self.modify_server_default - if self.modify_comment is not False: - kw["modify_comment"] = self.modify_comment - - # TODO: make this a little simpler - all_keys = set( - m.group(1) - for m in [re.match(r"^(?:existing_|modify_)(.+)$", k) for k in kw] - if m - ) - - for k in all_keys: - if "modify_%s" % k in kw: - swap = kw["existing_%s" % k] - kw["existing_%s" % k] = kw["modify_%s" % k] - kw["modify_%s" % k] = swap - - return self.__class__( - self.table_name, self.column_name, schema=self.schema, **kw - ) - - @classmethod - @util._with_legacy_names([("name", "new_column_name")]) - def alter_column( - cls, - operations, - table_name, - column_name, - nullable=None, - comment=False, - server_default=False, - new_column_name=None, - type_=None, - existing_type=None, - existing_server_default=False, - existing_nullable=None, - existing_comment=None, - schema=None, - **kw - ): - r"""Issue an "alter column" instruction using the - current migration context. - - Generally, only that aspect of the column which - is being changed, i.e. name, type, nullability, - default, needs to be specified. Multiple changes - can also be specified at once and the backend should - "do the right thing", emitting each change either - separately or together as the backend allows. - - MySQL has special requirements here, since MySQL - cannot ALTER a column without a full specification. - When producing MySQL-compatible migration files, - it is recommended that the ``existing_type``, - ``existing_server_default``, and ``existing_nullable`` - parameters be present, if not being altered. - - Type changes which are against the SQLAlchemy - "schema" types :class:`~sqlalchemy.types.Boolean` - and :class:`~sqlalchemy.types.Enum` may also - add or drop constraints which accompany those - types on backends that don't support them natively. - The ``existing_type`` argument is - used in this case to identify and remove a previous - constraint that was bound to the type object. - - :param table_name: string name of the target table. - :param column_name: string name of the target column, - as it exists before the operation begins. - :param nullable: Optional; specify ``True`` or ``False`` - to alter the column's nullability. - :param server_default: Optional; specify a string - SQL expression, :func:`~sqlalchemy.sql.expression.text`, - or :class:`~sqlalchemy.schema.DefaultClause` to indicate - an alteration to the column's default value. - Set to ``None`` to have the default removed. - :param comment: optional string text of a new comment to add to the - column. - - .. versionadded:: 1.0.6 - - :param new_column_name: Optional; specify a string name here to - indicate the new name within a column rename operation. - :param type\_: Optional; a :class:`~sqlalchemy.types.TypeEngine` - type object to specify a change to the column's type. - For SQLAlchemy types that also indicate a constraint (i.e. - :class:`~sqlalchemy.types.Boolean`, :class:`~sqlalchemy.types.Enum`), - the constraint is also generated. - :param autoincrement: set the ``AUTO_INCREMENT`` flag of the column; - currently understood by the MySQL dialect. - :param existing_type: Optional; a - :class:`~sqlalchemy.types.TypeEngine` - type object to specify the previous type. This - is required for all MySQL column alter operations that - don't otherwise specify a new type, as well as for - when nullability is being changed on a SQL Server - column. It is also used if the type is a so-called - SQLlchemy "schema" type which may define a constraint (i.e. - :class:`~sqlalchemy.types.Boolean`, - :class:`~sqlalchemy.types.Enum`), - so that the constraint can be dropped. - :param existing_server_default: Optional; The existing - default value of the column. Required on MySQL if - an existing default is not being changed; else MySQL - removes the default. - :param existing_nullable: Optional; the existing nullability - of the column. Required on MySQL if the existing nullability - is not being changed; else MySQL sets this to NULL. - :param existing_autoincrement: Optional; the existing autoincrement - of the column. Used for MySQL's system of altering a column - that specifies ``AUTO_INCREMENT``. - :param existing_comment: string text of the existing comment on the - column to be maintained. Required on MySQL if the existing comment - on the column is not being changed. - - .. versionadded:: 1.0.6 - - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param postgresql_using: String argument which will indicate a - SQL expression to render within the Postgresql-specific USING clause - within ALTER COLUMN. This string is taken directly as raw SQL which - must explicitly include any necessary quoting or escaping of tokens - within the expression. - - .. versionadded:: 0.8.8 - - """ - - alt = cls( - table_name, - column_name, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - modify_name=new_column_name, - modify_type=type_, - modify_server_default=server_default, - modify_nullable=nullable, - modify_comment=comment, - **kw - ) - - return operations.invoke(alt) - - @classmethod - def batch_alter_column( - cls, - operations, - column_name, - nullable=None, - comment=False, - server_default=False, - new_column_name=None, - type_=None, - existing_type=None, - existing_server_default=False, - existing_nullable=None, - existing_comment=None, - insert_before=None, - insert_after=None, - **kw - ): - """Issue an "alter column" instruction using the current - batch migration context. - - Parameters are the same as that of :meth:`.Operations.alter_column`, - as well as the following option(s): - - :param insert_before: String name of an existing column which this - column should be placed before, when creating the new table. - - .. versionadded:: 1.4.0 - - :param insert_before: String name of an existing column which this - column should be placed after, when creating the new table. If - both :paramref:`.BatchOperations.alter_column.insert_before` - and :paramref:`.BatchOperations.alter_column.insert_after` are - omitted, the column is inserted after the last existing column - in the table. - - .. versionadded:: 1.4.0 - - .. seealso:: - - :meth:`.Operations.alter_column` - - - """ - alt = cls( - operations.impl.table_name, - column_name, - schema=operations.impl.schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - existing_comment=existing_comment, - modify_name=new_column_name, - modify_type=type_, - modify_server_default=server_default, - modify_nullable=nullable, - modify_comment=comment, - **kw - ) - - return operations.invoke(alt) - - -@Operations.register_operation("add_column") -@BatchOperations.register_operation("add_column", "batch_add_column") -class AddColumnOp(AlterTableOp): - """Represent an add column operation.""" - - def __init__(self, table_name, column, schema=None, **kw): - super(AddColumnOp, self).__init__(table_name, schema=schema) - self.column = column - self.kw = kw - - def reverse(self): - return DropColumnOp.from_column_and_tablename( - self.schema, self.table_name, self.column - ) - - def to_diff_tuple(self): - return ("add_column", self.schema, self.table_name, self.column) - - def to_column(self): - return self.column - - @classmethod - def from_column(cls, col): - return cls(col.table.name, col, schema=col.table.schema) - - @classmethod - def from_column_and_tablename(cls, schema, tname, col): - return cls(tname, col, schema=schema) - - @classmethod - def add_column(cls, operations, table_name, column, schema=None): - """Issue an "add column" instruction using the current - migration context. - - e.g.:: - - from alembic import op - from sqlalchemy import Column, String - - op.add_column('organization', - Column('name', String()) - ) - - The provided :class:`~sqlalchemy.schema.Column` object can also - specify a :class:`~sqlalchemy.schema.ForeignKey`, referencing - a remote table name. Alembic will automatically generate a stub - "referenced" table and emit a second ALTER statement in order - to add the constraint separately:: - - from alembic import op - from sqlalchemy import Column, INTEGER, ForeignKey - - op.add_column('organization', - Column('account_id', INTEGER, ForeignKey('accounts.id')) - ) - - Note that this statement uses the :class:`~sqlalchemy.schema.Column` - construct as is from the SQLAlchemy library. In particular, - default values to be created on the database side are - specified using the ``server_default`` parameter, and not - ``default`` which only specifies Python-side defaults:: - - from alembic import op - from sqlalchemy import Column, TIMESTAMP, func - - # specify "DEFAULT NOW" along with the column add - op.add_column('account', - Column('timestamp', TIMESTAMP, server_default=func.now()) - ) - - :param table_name: String name of the parent table. - :param column: a :class:`sqlalchemy.schema.Column` object - representing the new column. - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - - """ - - op = cls(table_name, column, schema=schema) - return operations.invoke(op) - - @classmethod - def batch_add_column( - cls, operations, column, insert_before=None, insert_after=None - ): - """Issue an "add column" instruction using the current - batch migration context. - - .. seealso:: - - :meth:`.Operations.add_column` - - """ - - kw = {} - if insert_before: - kw["insert_before"] = insert_before - if insert_after: - kw["insert_after"] = insert_after - - op = cls( - operations.impl.table_name, - column, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("drop_column") -@BatchOperations.register_operation("drop_column", "batch_drop_column") -class DropColumnOp(AlterTableOp): - """Represent a drop column operation.""" - - def __init__( - self, table_name, column_name, schema=None, _orig_column=None, **kw - ): - super(DropColumnOp, self).__init__(table_name, schema=schema) - self.column_name = column_name - self.kw = kw - self._orig_column = _orig_column - - def to_diff_tuple(self): - return ( - "remove_column", - self.schema, - self.table_name, - self.to_column(), - ) - - def reverse(self): - if self._orig_column is None: - raise ValueError( - "operation is not reversible; " - "original column is not present" - ) - - return AddColumnOp.from_column_and_tablename( - self.schema, self.table_name, self._orig_column - ) - - @classmethod - def from_column_and_tablename(cls, schema, tname, col): - return cls(tname, col.name, schema=schema, _orig_column=col) - - def to_column(self, migration_context=None): - if self._orig_column is not None: - return self._orig_column - schema_obj = schemaobj.SchemaObjects(migration_context) - return schema_obj.column(self.column_name, NULLTYPE) - - @classmethod - def drop_column( - cls, operations, table_name, column_name, schema=None, **kw - ): - """Issue a "drop column" instruction using the current - migration context. - - e.g.:: - - drop_column('organization', 'account_id') - - :param table_name: name of table - :param column_name: name of column - :param schema: Optional schema name to operate within. To control - quoting of the schema outside of the default behavior, use - the SQLAlchemy construct - :class:`~sqlalchemy.sql.elements.quoted_name`. - - .. versionadded:: 0.7.0 'schema' can now accept a - :class:`~sqlalchemy.sql.elements.quoted_name` construct. - - :param mssql_drop_check: Optional boolean. When ``True``, on - Microsoft SQL Server only, first - drop the CHECK constraint on the column using a - SQL-script-compatible - block that selects into a @variable from sys.check_constraints, - then exec's a separate DROP CONSTRAINT for that constraint. - :param mssql_drop_default: Optional boolean. When ``True``, on - Microsoft SQL Server only, first - drop the DEFAULT constraint on the column using a - SQL-script-compatible - block that selects into a @variable from sys.default_constraints, - then exec's a separate DROP CONSTRAINT for that default. - :param mssql_drop_foreign_key: Optional boolean. When ``True``, on - Microsoft SQL Server only, first - drop a single FOREIGN KEY constraint on the column using a - SQL-script-compatible - block that selects into a @variable from - sys.foreign_keys/sys.foreign_key_columns, - then exec's a separate DROP CONSTRAINT for that default. Only - works if the column has exactly one FK constraint which refers to - it, at the moment. - - .. versionadded:: 0.6.2 - - """ - - op = cls(table_name, column_name, schema=schema, **kw) - return operations.invoke(op) - - @classmethod - def batch_drop_column(cls, operations, column_name, **kw): - """Issue a "drop column" instruction using the current - batch migration context. - - .. seealso:: - - :meth:`.Operations.drop_column` - - """ - op = cls( - operations.impl.table_name, - column_name, - schema=operations.impl.schema, - **kw - ) - return operations.invoke(op) - - -@Operations.register_operation("bulk_insert") -class BulkInsertOp(MigrateOperation): - """Represent a bulk insert operation.""" - - def __init__(self, table, rows, multiinsert=True): - self.table = table - self.rows = rows - self.multiinsert = multiinsert - - @classmethod - def bulk_insert(cls, operations, table, rows, multiinsert=True): - """Issue a "bulk insert" operation using the current - migration context. - - This provides a means of representing an INSERT of multiple rows - which works equally well in the context of executing on a live - connection as well as that of generating a SQL script. In the - case of a SQL script, the values are rendered inline into the - statement. - - e.g.:: - - from alembic import op - from datetime import date - from sqlalchemy.sql import table, column - from sqlalchemy import String, Integer, Date - - # Create an ad-hoc table to use for the insert statement. - accounts_table = table('account', - column('id', Integer), - column('name', String), - column('create_date', Date) - ) - - op.bulk_insert(accounts_table, - [ - {'id':1, 'name':'John Smith', - 'create_date':date(2010, 10, 5)}, - {'id':2, 'name':'Ed Williams', - 'create_date':date(2007, 5, 27)}, - {'id':3, 'name':'Wendy Jones', - 'create_date':date(2008, 8, 15)}, - ] - ) - - When using --sql mode, some datatypes may not render inline - automatically, such as dates and other special types. When this - issue is present, :meth:`.Operations.inline_literal` may be used:: - - op.bulk_insert(accounts_table, - [ - {'id':1, 'name':'John Smith', - 'create_date':op.inline_literal("2010-10-05")}, - {'id':2, 'name':'Ed Williams', - 'create_date':op.inline_literal("2007-05-27")}, - {'id':3, 'name':'Wendy Jones', - 'create_date':op.inline_literal("2008-08-15")}, - ], - multiinsert=False - ) - - When using :meth:`.Operations.inline_literal` in conjunction with - :meth:`.Operations.bulk_insert`, in order for the statement to work - in "online" (e.g. non --sql) mode, the - :paramref:`~.Operations.bulk_insert.multiinsert` - flag should be set to ``False``, which will have the effect of - individual INSERT statements being emitted to the database, each - with a distinct VALUES clause, so that the "inline" values can - still be rendered, rather than attempting to pass the values - as bound parameters. - - .. versionadded:: 0.6.4 :meth:`.Operations.inline_literal` can now - be used with :meth:`.Operations.bulk_insert`, and the - :paramref:`~.Operations.bulk_insert.multiinsert` flag has - been added to assist in this usage when running in "online" - mode. - - :param table: a table object which represents the target of the INSERT. - - :param rows: a list of dictionaries indicating rows. - - :param multiinsert: when at its default of True and --sql mode is not - enabled, the INSERT statement will be executed using - "executemany()" style, where all elements in the list of - dictionaries are passed as bound parameters in a single - list. Setting this to False results in individual INSERT - statements being emitted per parameter set, and is needed - in those cases where non-literal values are present in the - parameter sets. - - .. versionadded:: 0.6.4 - - """ - - op = cls(table, rows, multiinsert=multiinsert) - operations.invoke(op) - - -@Operations.register_operation("execute") -class ExecuteSQLOp(MigrateOperation): - """Represent an execute SQL operation.""" - - def __init__(self, sqltext, execution_options=None): - self.sqltext = sqltext - self.execution_options = execution_options - - @classmethod - def execute(cls, operations, sqltext, execution_options=None): - r"""Execute the given SQL using the current migration context. - - The given SQL can be a plain string, e.g.:: - - op.execute("INSERT INTO table (foo) VALUES ('some value')") - - Or it can be any kind of Core SQL Expression construct, such as - below where we use an update construct:: - - from sqlalchemy.sql import table, column - from sqlalchemy import String - from alembic import op - - account = table('account', - column('name', String) - ) - op.execute( - account.update().\\ - where(account.c.name==op.inline_literal('account 1')).\\ - values({'name':op.inline_literal('account 2')}) - ) - - Above, we made use of the SQLAlchemy - :func:`sqlalchemy.sql.expression.table` and - :func:`sqlalchemy.sql.expression.column` constructs to make a brief, - ad-hoc table construct just for our UPDATE statement. A full - :class:`~sqlalchemy.schema.Table` construct of course works perfectly - fine as well, though note it's a recommended practice to at least - ensure the definition of a table is self-contained within the migration - script, rather than imported from a module that may break compatibility - with older migrations. - - In a SQL script context, the statement is emitted directly to the - output stream. There is *no* return result, however, as this - function is oriented towards generating a change script - that can run in "offline" mode. Additionally, parameterized - statements are discouraged here, as they *will not work* in offline - mode. Above, we use :meth:`.inline_literal` where parameters are - to be used. - - For full interaction with a connected database where parameters can - also be used normally, use the "bind" available from the context:: - - from alembic import op - connection = op.get_bind() - - connection.execute( - account.update().where(account.c.name=='account 1'). - values({"name": "account 2"}) - ) - - Additionally, when passing the statement as a plain string, it is first - coerceed into a :func:`sqlalchemy.sql.expression.text` construct - before being passed along. In the less likely case that the - literal SQL string contains a colon, it must be escaped with a - backslash, as:: - - op.execute("INSERT INTO table (foo) VALUES ('\:colon_value')") - - - :param sql: Any legal SQLAlchemy expression, including: - - * a string - * a :func:`sqlalchemy.sql.expression.text` construct. - * a :func:`sqlalchemy.sql.expression.insert` construct. - * a :func:`sqlalchemy.sql.expression.update`, - :func:`sqlalchemy.sql.expression.insert`, - or :func:`sqlalchemy.sql.expression.delete` construct. - * Pretty much anything that's "executable" as described - in :ref:`sqlexpression_toplevel`. - - .. note:: when passing a plain string, the statement is coerced into - a :func:`sqlalchemy.sql.expression.text` construct. This construct - considers symbols with colons, e.g. ``:foo`` to be bound parameters. - To avoid this, ensure that colon symbols are escaped, e.g. - ``\:foo``. - - :param execution_options: Optional dictionary of - execution options, will be passed to - :meth:`sqlalchemy.engine.Connection.execution_options`. - """ - op = cls(sqltext, execution_options=execution_options) - return operations.invoke(op) - - -class OpContainer(MigrateOperation): - """Represent a sequence of operations operation.""" - - def __init__(self, ops=()): - self.ops = ops - - def is_empty(self): - return not self.ops - - def as_diffs(self): - return list(OpContainer._ops_as_diffs(self)) - - @classmethod - def _ops_as_diffs(cls, migrations): - for op in migrations.ops: - if hasattr(op, "ops"): - for sub_op in cls._ops_as_diffs(op): - yield sub_op - else: - yield op.to_diff_tuple() - - -class ModifyTableOps(OpContainer): - """Contains a sequence of operations that all apply to a single Table.""" - - def __init__(self, table_name, ops, schema=None): - super(ModifyTableOps, self).__init__(ops) - self.table_name = table_name - self.schema = schema - - def reverse(self): - return ModifyTableOps( - self.table_name, - ops=list(reversed([op.reverse() for op in self.ops])), - schema=self.schema, - ) - - -class UpgradeOps(OpContainer): - """contains a sequence of operations that would apply to the - 'upgrade' stream of a script. - - .. seealso:: - - :ref:`customizing_revision` - - """ - - def __init__(self, ops=(), upgrade_token="upgrades"): - super(UpgradeOps, self).__init__(ops=ops) - self.upgrade_token = upgrade_token - - def reverse_into(self, downgrade_ops): - downgrade_ops.ops[:] = list( - reversed([op.reverse() for op in self.ops]) - ) - return downgrade_ops - - def reverse(self): - return self.reverse_into(DowngradeOps(ops=[])) - - -class DowngradeOps(OpContainer): - """contains a sequence of operations that would apply to the - 'downgrade' stream of a script. - - .. seealso:: - - :ref:`customizing_revision` - - """ - - def __init__(self, ops=(), downgrade_token="downgrades"): - super(DowngradeOps, self).__init__(ops=ops) - self.downgrade_token = downgrade_token - - def reverse(self): - return UpgradeOps( - ops=list(reversed([op.reverse() for op in self.ops])) - ) - - -class MigrationScript(MigrateOperation): - """represents a migration script. - - E.g. when autogenerate encounters this object, this corresponds to the - production of an actual script file. - - A normal :class:`.MigrationScript` object would contain a single - :class:`.UpgradeOps` and a single :class:`.DowngradeOps` directive. - These are accessible via the ``.upgrade_ops`` and ``.downgrade_ops`` - attributes. - - In the case of an autogenerate operation that runs multiple times, - such as the multiple database example in the "multidb" template, - the ``.upgrade_ops`` and ``.downgrade_ops`` attributes are disabled, - and instead these objects should be accessed via the ``.upgrade_ops_list`` - and ``.downgrade_ops_list`` list-based attributes. These latter - attributes are always available at the very least as single-element lists. - - .. versionchanged:: 0.8.1 the ``.upgrade_ops`` and ``.downgrade_ops`` - attributes should be accessed via the ``.upgrade_ops_list`` - and ``.downgrade_ops_list`` attributes if multiple autogenerate - passes proceed on the same :class:`.MigrationScript` object. - - .. seealso:: - - :ref:`customizing_revision` - - """ - - def __init__( - self, - rev_id, - upgrade_ops, - downgrade_ops, - message=None, - imports=set(), - head=None, - splice=None, - branch_label=None, - version_path=None, - depends_on=None, - ): - self.rev_id = rev_id - self.message = message - self.imports = imports - self.head = head - self.splice = splice - self.branch_label = branch_label - self.version_path = version_path - self.depends_on = depends_on - self.upgrade_ops = upgrade_ops - self.downgrade_ops = downgrade_ops - - @property - def upgrade_ops(self): - """An instance of :class:`.UpgradeOps`. - - .. seealso:: - - :attr:`.MigrationScript.upgrade_ops_list` - """ - if len(self._upgrade_ops) > 1: - raise ValueError( - "This MigrationScript instance has a multiple-entry " - "list for UpgradeOps; please use the " - "upgrade_ops_list attribute." - ) - elif not self._upgrade_ops: - return None - else: - return self._upgrade_ops[0] - - @upgrade_ops.setter - def upgrade_ops(self, upgrade_ops): - self._upgrade_ops = util.to_list(upgrade_ops) - for elem in self._upgrade_ops: - assert isinstance(elem, UpgradeOps) - - @property - def downgrade_ops(self): - """An instance of :class:`.DowngradeOps`. - - .. seealso:: - - :attr:`.MigrationScript.downgrade_ops_list` - """ - if len(self._downgrade_ops) > 1: - raise ValueError( - "This MigrationScript instance has a multiple-entry " - "list for DowngradeOps; please use the " - "downgrade_ops_list attribute." - ) - elif not self._downgrade_ops: - return None - else: - return self._downgrade_ops[0] - - @downgrade_ops.setter - def downgrade_ops(self, downgrade_ops): - self._downgrade_ops = util.to_list(downgrade_ops) - for elem in self._downgrade_ops: - assert isinstance(elem, DowngradeOps) - - @property - def upgrade_ops_list(self): - """A list of :class:`.UpgradeOps` instances. - - This is used in place of the :attr:`.MigrationScript.upgrade_ops` - attribute when dealing with a revision operation that does - multiple autogenerate passes. - - .. versionadded:: 0.8.1 - - """ - return self._upgrade_ops - - @property - def downgrade_ops_list(self): - """A list of :class:`.DowngradeOps` instances. - - This is used in place of the :attr:`.MigrationScript.downgrade_ops` - attribute when dealing with a revision operation that does - multiple autogenerate passes. - - .. versionadded:: 0.8.1 - - """ - return self._downgrade_ops diff --git a/venv/lib/python3.7/site-packages/alembic/operations/schemaobj.py b/venv/lib/python3.7/site-packages/alembic/operations/schemaobj.py deleted file mode 100644 index d90b5e6..0000000 --- a/venv/lib/python3.7/site-packages/alembic/operations/schemaobj.py +++ /dev/null @@ -1,181 +0,0 @@ -from sqlalchemy import schema as sa_schema -from sqlalchemy.types import Integer -from sqlalchemy.types import NULLTYPE - -from .. import util -from ..util.compat import string_types - - -class SchemaObjects(object): - def __init__(self, migration_context=None): - self.migration_context = migration_context - - def primary_key_constraint(self, name, table_name, cols, schema=None): - m = self.metadata() - columns = [sa_schema.Column(n, NULLTYPE) for n in cols] - t = sa_schema.Table(table_name, m, *columns, schema=schema) - p = sa_schema.PrimaryKeyConstraint(*[t.c[n] for n in cols], name=name) - t.append_constraint(p) - return p - - def foreign_key_constraint( - self, - name, - source, - referent, - local_cols, - remote_cols, - onupdate=None, - ondelete=None, - deferrable=None, - source_schema=None, - referent_schema=None, - initially=None, - match=None, - **dialect_kw - ): - m = self.metadata() - if source == referent and source_schema == referent_schema: - t1_cols = local_cols + remote_cols - else: - t1_cols = local_cols - sa_schema.Table( - referent, - m, - *[sa_schema.Column(n, NULLTYPE) for n in remote_cols], - schema=referent_schema - ) - - t1 = sa_schema.Table( - source, - m, - *[sa_schema.Column(n, NULLTYPE) for n in t1_cols], - schema=source_schema - ) - - tname = ( - "%s.%s" % (referent_schema, referent) - if referent_schema - else referent - ) - - dialect_kw["match"] = match - - f = sa_schema.ForeignKeyConstraint( - local_cols, - ["%s.%s" % (tname, n) for n in remote_cols], - name=name, - onupdate=onupdate, - ondelete=ondelete, - deferrable=deferrable, - initially=initially, - **dialect_kw - ) - t1.append_constraint(f) - - return f - - def unique_constraint(self, name, source, local_cols, schema=None, **kw): - t = sa_schema.Table( - source, - self.metadata(), - *[sa_schema.Column(n, NULLTYPE) for n in local_cols], - schema=schema - ) - kw["name"] = name - uq = sa_schema.UniqueConstraint(*[t.c[n] for n in local_cols], **kw) - # TODO: need event tests to ensure the event - # is fired off here - t.append_constraint(uq) - return uq - - def check_constraint(self, name, source, condition, schema=None, **kw): - t = sa_schema.Table( - source, - self.metadata(), - sa_schema.Column("x", Integer), - schema=schema, - ) - ck = sa_schema.CheckConstraint(condition, name=name, **kw) - t.append_constraint(ck) - return ck - - def generic_constraint(self, name, table_name, type_, schema=None, **kw): - t = self.table(table_name, schema=schema) - types = { - "foreignkey": lambda name: sa_schema.ForeignKeyConstraint( - [], [], name=name - ), - "primary": sa_schema.PrimaryKeyConstraint, - "unique": sa_schema.UniqueConstraint, - "check": lambda name: sa_schema.CheckConstraint("", name=name), - None: sa_schema.Constraint, - } - try: - const = types[type_] - except KeyError: - raise TypeError( - "'type' can be one of %s" - % ", ".join(sorted(repr(x) for x in types)) - ) - else: - const = const(name=name) - t.append_constraint(const) - return const - - def metadata(self): - kw = {} - if ( - self.migration_context is not None - and "target_metadata" in self.migration_context.opts - ): - mt = self.migration_context.opts["target_metadata"] - if hasattr(mt, "naming_convention"): - kw["naming_convention"] = mt.naming_convention - return sa_schema.MetaData(**kw) - - def table(self, name, *columns, **kw): - m = self.metadata() - t = sa_schema.Table(name, m, *columns, **kw) - for f in t.foreign_keys: - self._ensure_table_for_fk(m, f) - return t - - def column(self, name, type_, **kw): - return sa_schema.Column(name, type_, **kw) - - def index(self, name, tablename, columns, schema=None, **kw): - t = sa_schema.Table( - tablename or "no_table", self.metadata(), schema=schema - ) - idx = sa_schema.Index( - name, - *[util.sqla_compat._textual_index_column(t, n) for n in columns], - **kw - ) - return idx - - def _parse_table_key(self, table_key): - if "." in table_key: - tokens = table_key.split(".") - sname = ".".join(tokens[0:-1]) - tname = tokens[-1] - else: - tname = table_key - sname = None - return (sname, tname) - - def _ensure_table_for_fk(self, metadata, fk): - """create a placeholder Table object for the referent of a - ForeignKey. - - """ - if isinstance(fk._colspec, string_types): - table_key, cname = fk._colspec.rsplit(".", 1) - sname, tname = self._parse_table_key(table_key) - if table_key not in metadata.tables: - rel_t = sa_schema.Table(tname, metadata, schema=sname) - else: - rel_t = metadata.tables[table_key] - if cname not in rel_t.c: - rel_t.append_column(sa_schema.Column(cname, NULLTYPE)) diff --git a/venv/lib/python3.7/site-packages/alembic/operations/toimpl.py b/venv/lib/python3.7/site-packages/alembic/operations/toimpl.py deleted file mode 100644 index 3114a66..0000000 --- a/venv/lib/python3.7/site-packages/alembic/operations/toimpl.py +++ /dev/null @@ -1,179 +0,0 @@ -from sqlalchemy import schema as sa_schema - -from . import ops -from .base import Operations -from ..util import sqla_compat - - -@Operations.implementation_for(ops.AlterColumnOp) -def alter_column(operations, operation): - - compiler = operations.impl.dialect.statement_compiler( - operations.impl.dialect, None - ) - - existing_type = operation.existing_type - existing_nullable = operation.existing_nullable - existing_server_default = operation.existing_server_default - type_ = operation.modify_type - column_name = operation.column_name - table_name = operation.table_name - schema = operation.schema - server_default = operation.modify_server_default - new_column_name = operation.modify_name - nullable = operation.modify_nullable - comment = operation.modify_comment - existing_comment = operation.existing_comment - - def _count_constraint(constraint): - return not isinstance(constraint, sa_schema.PrimaryKeyConstraint) and ( - not constraint._create_rule or constraint._create_rule(compiler) - ) - - if existing_type and type_: - t = operations.schema_obj.table( - table_name, - sa_schema.Column(column_name, existing_type), - schema=schema, - ) - for constraint in t.constraints: - if _count_constraint(constraint): - operations.impl.drop_constraint(constraint) - - operations.impl.alter_column( - table_name, - column_name, - nullable=nullable, - server_default=server_default, - name=new_column_name, - type_=type_, - schema=schema, - existing_type=existing_type, - existing_server_default=existing_server_default, - existing_nullable=existing_nullable, - comment=comment, - existing_comment=existing_comment, - **operation.kw - ) - - if type_: - t = operations.schema_obj.table( - table_name, - operations.schema_obj.column(column_name, type_), - schema=schema, - ) - for constraint in t.constraints: - if _count_constraint(constraint): - operations.impl.add_constraint(constraint) - - -@Operations.implementation_for(ops.DropTableOp) -def drop_table(operations, operation): - operations.impl.drop_table( - operation.to_table(operations.migration_context) - ) - - -@Operations.implementation_for(ops.DropColumnOp) -def drop_column(operations, operation): - column = operation.to_column(operations.migration_context) - operations.impl.drop_column( - operation.table_name, column, schema=operation.schema, **operation.kw - ) - - -@Operations.implementation_for(ops.CreateIndexOp) -def create_index(operations, operation): - idx = operation.to_index(operations.migration_context) - operations.impl.create_index(idx) - - -@Operations.implementation_for(ops.DropIndexOp) -def drop_index(operations, operation): - operations.impl.drop_index( - operation.to_index(operations.migration_context) - ) - - -@Operations.implementation_for(ops.CreateTableOp) -def create_table(operations, operation): - table = operation.to_table(operations.migration_context) - operations.impl.create_table(table) - return table - - -@Operations.implementation_for(ops.RenameTableOp) -def rename_table(operations, operation): - operations.impl.rename_table( - operation.table_name, operation.new_table_name, schema=operation.schema - ) - - -@Operations.implementation_for(ops.CreateTableCommentOp) -def create_table_comment(operations, operation): - table = operation.to_table(operations.migration_context) - operations.impl.create_table_comment(table) - - -@Operations.implementation_for(ops.DropTableCommentOp) -def drop_table_comment(operations, operation): - table = operation.to_table(operations.migration_context) - operations.impl.drop_table_comment(table) - - -@Operations.implementation_for(ops.AddColumnOp) -def add_column(operations, operation): - table_name = operation.table_name - column = operation.column - schema = operation.schema - kw = operation.kw - - t = operations.schema_obj.table(table_name, column, schema=schema) - operations.impl.add_column(table_name, column, schema=schema, **kw) - - for constraint in t.constraints: - if not isinstance(constraint, sa_schema.PrimaryKeyConstraint): - operations.impl.add_constraint(constraint) - for index in t.indexes: - operations.impl.create_index(index) - - with_comment = ( - sqla_compat._dialect_supports_comments(operations.impl.dialect) - and not operations.impl.dialect.inline_comments - ) - comment = sqla_compat._comment_attribute(column) - if comment and with_comment: - operations.impl.create_column_comment(column) - - -@Operations.implementation_for(ops.AddConstraintOp) -def create_constraint(operations, operation): - operations.impl.add_constraint( - operation.to_constraint(operations.migration_context) - ) - - -@Operations.implementation_for(ops.DropConstraintOp) -def drop_constraint(operations, operation): - operations.impl.drop_constraint( - operations.schema_obj.generic_constraint( - operation.constraint_name, - operation.table_name, - operation.constraint_type, - schema=operation.schema, - ) - ) - - -@Operations.implementation_for(ops.BulkInsertOp) -def bulk_insert(operations, operation): - operations.impl.bulk_insert( - operation.table, operation.rows, multiinsert=operation.multiinsert - ) - - -@Operations.implementation_for(ops.ExecuteSQLOp) -def execute_sql(operations, operation): - operations.migration_context.impl.execute( - operation.sqltext, execution_options=operation.execution_options - ) diff --git a/venv/lib/python3.7/site-packages/alembic/runtime/__init__.py b/venv/lib/python3.7/site-packages/alembic/runtime/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index bd9c975..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/environment.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/environment.cpython-37.pyc deleted file mode 100644 index 9526672..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/environment.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/migration.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/migration.cpython-37.pyc deleted file mode 100644 index ebcba25..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/runtime/__pycache__/migration.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/runtime/environment.py b/venv/lib/python3.7/site-packages/alembic/runtime/environment.py deleted file mode 100644 index c9d623c..0000000 --- a/venv/lib/python3.7/site-packages/alembic/runtime/environment.py +++ /dev/null @@ -1,945 +0,0 @@ -from .migration import MigrationContext -from .. import util -from ..operations import Operations - - -class EnvironmentContext(util.ModuleClsProxy): - - """A configurational facade made available in an ``env.py`` script. - - The :class:`.EnvironmentContext` acts as a *facade* to the more - nuts-and-bolts objects of :class:`.MigrationContext` as well as certain - aspects of :class:`.Config`, - within the context of the ``env.py`` script that is invoked by - most Alembic commands. - - :class:`.EnvironmentContext` is normally instantiated - when a command in :mod:`alembic.command` is run. It then makes - itself available in the ``alembic.context`` module for the scope - of the command. From within an ``env.py`` script, the current - :class:`.EnvironmentContext` is available by importing this module. - - :class:`.EnvironmentContext` also supports programmatic usage. - At this level, it acts as a Python context manager, that is, is - intended to be used using the - ``with:`` statement. A typical use of :class:`.EnvironmentContext`:: - - from alembic.config import Config - from alembic.script import ScriptDirectory - - config = Config() - config.set_main_option("script_location", "myapp:migrations") - script = ScriptDirectory.from_config(config) - - def my_function(rev, context): - '''do something with revision "rev", which - will be the current database revision, - and "context", which is the MigrationContext - that the env.py will create''' - - with EnvironmentContext( - config, - script, - fn = my_function, - as_sql = False, - starting_rev = 'base', - destination_rev = 'head', - tag = "sometag" - ): - script.run_env() - - The above script will invoke the ``env.py`` script - within the migration environment. If and when ``env.py`` - calls :meth:`.MigrationContext.run_migrations`, the - ``my_function()`` function above will be called - by the :class:`.MigrationContext`, given the context - itself as well as the current revision in the database. - - .. note:: - - For most API usages other than full blown - invocation of migration scripts, the :class:`.MigrationContext` - and :class:`.ScriptDirectory` objects can be created and - used directly. The :class:`.EnvironmentContext` object - is *only* needed when you need to actually invoke the - ``env.py`` module present in the migration environment. - - """ - - _migration_context = None - - config = None - """An instance of :class:`.Config` representing the - configuration file contents as well as other variables - set programmatically within it.""" - - script = None - """An instance of :class:`.ScriptDirectory` which provides - programmatic access to version files within the ``versions/`` - directory. - - """ - - def __init__(self, config, script, **kw): - r"""Construct a new :class:`.EnvironmentContext`. - - :param config: a :class:`.Config` instance. - :param script: a :class:`.ScriptDirectory` instance. - :param \**kw: keyword options that will be ultimately - passed along to the :class:`.MigrationContext` when - :meth:`.EnvironmentContext.configure` is called. - - """ - self.config = config - self.script = script - self.context_opts = kw - - def __enter__(self): - """Establish a context which provides a - :class:`.EnvironmentContext` object to - env.py scripts. - - The :class:`.EnvironmentContext` will - be made available as ``from alembic import context``. - - """ - self._install_proxy() - return self - - def __exit__(self, *arg, **kw): - self._remove_proxy() - - def is_offline_mode(self): - """Return True if the current migrations environment - is running in "offline mode". - - This is ``True`` or ``False`` depending - on the the ``--sql`` flag passed. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - """ - return self.context_opts.get("as_sql", False) - - def is_transactional_ddl(self): - """Return True if the context is configured to expect a - transactional DDL capable backend. - - This defaults to the type of database in use, and - can be overridden by the ``transactional_ddl`` argument - to :meth:`.configure` - - This function requires that a :class:`.MigrationContext` - has first been made available via :meth:`.configure`. - - """ - return self.get_context().impl.transactional_ddl - - def requires_connection(self): - return not self.is_offline_mode() - - def get_head_revision(self): - """Return the hex identifier of the 'head' script revision. - - If the script directory has multiple heads, this - method raises a :class:`.CommandError`; - :meth:`.EnvironmentContext.get_head_revisions` should be preferred. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. seealso:: :meth:`.EnvironmentContext.get_head_revisions` - - """ - return self.script.as_revision_number("head") - - def get_head_revisions(self): - """Return the hex identifier of the 'heads' script revision(s). - - This returns a tuple containing the version number of all - heads in the script directory. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. versionadded:: 0.7.0 - - """ - return self.script.as_revision_number("heads") - - def get_starting_revision_argument(self): - """Return the 'starting revision' argument, - if the revision was passed using ``start:end``. - - This is only meaningful in "offline" mode. - Returns ``None`` if no value is available - or was configured. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - """ - if self._migration_context is not None: - return self.script.as_revision_number( - self.get_context()._start_from_rev - ) - elif "starting_rev" in self.context_opts: - return self.script.as_revision_number( - self.context_opts["starting_rev"] - ) - else: - # this should raise only in the case that a command - # is being run where the "starting rev" is never applicable; - # this is to catch scripts which rely upon this in - # non-sql mode or similar - raise util.CommandError( - "No starting revision argument is available." - ) - - def get_revision_argument(self): - """Get the 'destination' revision argument. - - This is typically the argument passed to the - ``upgrade`` or ``downgrade`` command. - - If it was specified as ``head``, the actual - version number is returned; if specified - as ``base``, ``None`` is returned. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - """ - return self.script.as_revision_number( - self.context_opts["destination_rev"] - ) - - def get_tag_argument(self): - """Return the value passed for the ``--tag`` argument, if any. - - The ``--tag`` argument is not used directly by Alembic, - but is available for custom ``env.py`` configurations that - wish to use it; particularly for offline generation scripts - that wish to generate tagged filenames. - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. seealso:: - - :meth:`.EnvironmentContext.get_x_argument` - a newer and more - open ended system of extending ``env.py`` scripts via the command - line. - - """ - return self.context_opts.get("tag", None) - - def get_x_argument(self, as_dictionary=False): - """Return the value(s) passed for the ``-x`` argument, if any. - - The ``-x`` argument is an open ended flag that allows any user-defined - value or values to be passed on the command line, then available - here for consumption by a custom ``env.py`` script. - - The return value is a list, returned directly from the ``argparse`` - structure. If ``as_dictionary=True`` is passed, the ``x`` arguments - are parsed using ``key=value`` format into a dictionary that is - then returned. - - For example, to support passing a database URL on the command line, - the standard ``env.py`` script can be modified like this:: - - cmd_line_url = context.get_x_argument( - as_dictionary=True).get('dbname') - if cmd_line_url: - engine = create_engine(cmd_line_url) - else: - engine = engine_from_config( - config.get_section(config.config_ini_section), - prefix='sqlalchemy.', - poolclass=pool.NullPool) - - This then takes effect by running the ``alembic`` script as:: - - alembic -x dbname=postgresql://user:pass@host/dbname upgrade head - - This function does not require that the :class:`.MigrationContext` - has been configured. - - .. versionadded:: 0.6.0 - - .. seealso:: - - :meth:`.EnvironmentContext.get_tag_argument` - - :attr:`.Config.cmd_opts` - - """ - if self.config.cmd_opts is not None: - value = self.config.cmd_opts.x or [] - else: - value = [] - if as_dictionary: - value = dict(arg.split("=", 1) for arg in value) - return value - - def configure( - self, - connection=None, - url=None, - dialect_name=None, - dialect_opts=None, - transactional_ddl=None, - transaction_per_migration=False, - output_buffer=None, - starting_rev=None, - tag=None, - template_args=None, - render_as_batch=False, - target_metadata=None, - include_symbol=None, - include_object=None, - include_schemas=False, - process_revision_directives=None, - compare_type=False, - compare_server_default=False, - render_item=None, - literal_binds=False, - upgrade_token="upgrades", - downgrade_token="downgrades", - alembic_module_prefix="op.", - sqlalchemy_module_prefix="sa.", - user_module_prefix=None, - on_version_apply=None, - **kw - ): - """Configure a :class:`.MigrationContext` within this - :class:`.EnvironmentContext` which will provide database - connectivity and other configuration to a series of - migration scripts. - - Many methods on :class:`.EnvironmentContext` require that - this method has been called in order to function, as they - ultimately need to have database access or at least access - to the dialect in use. Those which do are documented as such. - - The important thing needed by :meth:`.configure` is a - means to determine what kind of database dialect is in use. - An actual connection to that database is needed only if - the :class:`.MigrationContext` is to be used in - "online" mode. - - If the :meth:`.is_offline_mode` function returns ``True``, - then no connection is needed here. Otherwise, the - ``connection`` parameter should be present as an - instance of :class:`sqlalchemy.engine.Connection`. - - This function is typically called from the ``env.py`` - script within a migration environment. It can be called - multiple times for an invocation. The most recent - :class:`~sqlalchemy.engine.Connection` - for which it was called is the one that will be operated upon - by the next call to :meth:`.run_migrations`. - - General parameters: - - :param connection: a :class:`~sqlalchemy.engine.Connection` - to use - for SQL execution in "online" mode. When present, is also - used to determine the type of dialect in use. - :param url: a string database url, or a - :class:`sqlalchemy.engine.url.URL` object. - The type of dialect to be used will be derived from this if - ``connection`` is not passed. - :param dialect_name: string name of a dialect, such as - "postgresql", "mssql", etc. - The type of dialect to be used will be derived from this if - ``connection`` and ``url`` are not passed. - :param dialect_opts: dictionary of options to be passed to dialect - constructor. - - .. versionadded:: 1.0.12 - - :param transactional_ddl: Force the usage of "transactional" - DDL on or off; - this otherwise defaults to whether or not the dialect in - use supports it. - :param transaction_per_migration: if True, nest each migration script - in a transaction rather than the full series of migrations to - run. - - .. versionadded:: 0.6.5 - - :param output_buffer: a file-like object that will be used - for textual output - when the ``--sql`` option is used to generate SQL scripts. - Defaults to - ``sys.stdout`` if not passed here and also not present on - the :class:`.Config` - object. The value here overrides that of the :class:`.Config` - object. - :param output_encoding: when using ``--sql`` to generate SQL - scripts, apply this encoding to the string output. - :param literal_binds: when using ``--sql`` to generate SQL - scripts, pass through the ``literal_binds`` flag to the compiler - so that any literal values that would ordinarily be bound - parameters are converted to plain strings. - - .. warning:: Dialects can typically only handle simple datatypes - like strings and numbers for auto-literal generation. Datatypes - like dates, intervals, and others may still require manual - formatting, typically using :meth:`.Operations.inline_literal`. - - .. note:: the ``literal_binds`` flag is ignored on SQLAlchemy - versions prior to 0.8 where this feature is not supported. - - .. versionadded:: 0.7.6 - - .. seealso:: - - :meth:`.Operations.inline_literal` - - :param starting_rev: Override the "starting revision" argument - when using ``--sql`` mode. - :param tag: a string tag for usage by custom ``env.py`` scripts. - Set via the ``--tag`` option, can be overridden here. - :param template_args: dictionary of template arguments which - will be added to the template argument environment when - running the "revision" command. Note that the script environment - is only run within the "revision" command if the --autogenerate - option is used, or if the option "revision_environment=true" - is present in the alembic.ini file. - - :param version_table: The name of the Alembic version table. - The default is ``'alembic_version'``. - :param version_table_schema: Optional schema to place version - table within. - :param version_table_pk: boolean, whether the Alembic version table - should use a primary key constraint for the "value" column; this - only takes effect when the table is first created. - Defaults to True; setting to False should not be necessary and is - here for backwards compatibility reasons. - - .. versionadded:: 0.8.10 Added the - :paramref:`.EnvironmentContext.configure.version_table_pk` - flag and additionally established that the Alembic version table - has a primary key constraint by default. - - :param on_version_apply: a callable or collection of callables to be - run for each migration step. - The callables will be run in the order they are given, once for - each migration step, after the respective operation has been - applied but before its transaction is finalized. - Each callable accepts no positional arguments and the following - keyword arguments: - - * ``ctx``: the :class:`.MigrationContext` running the migration, - * ``step``: a :class:`.MigrationInfo` representing the - step currently being applied, - * ``heads``: a collection of version strings representing the - current heads, - * ``run_args``: the ``**kwargs`` passed to :meth:`.run_migrations`. - - .. versionadded:: 0.9.3 - - - Parameters specific to the autogenerate feature, when - ``alembic revision`` is run with the ``--autogenerate`` feature: - - :param target_metadata: a :class:`sqlalchemy.schema.MetaData` - object, or a sequence of :class:`~sqlalchemy.schema.MetaData` - objects, that will be consulted during autogeneration. - The tables present in each :class:`~sqlalchemy.schema.MetaData` - will be compared against - what is locally available on the target - :class:`~sqlalchemy.engine.Connection` - to produce candidate upgrade/downgrade operations. - - .. versionchanged:: 0.9.0 the - :paramref:`.EnvironmentContext.configure.target_metadata` - parameter may now be passed a sequence of - :class:`~sqlalchemy.schema.MetaData` objects to support - autogeneration of multiple :class:`~sqlalchemy.schema.MetaData` - collections. - - :param compare_type: Indicates type comparison behavior during - an autogenerate - operation. Defaults to ``False`` which disables type - comparison. Set to - ``True`` to turn on default type comparison, which has varied - accuracy depending on backend. See :ref:`compare_types` - for an example as well as information on other type - comparison options. - - .. seealso:: - - :ref:`compare_types` - - :paramref:`.EnvironmentContext.configure.compare_server_default` - - :param compare_server_default: Indicates server default comparison - behavior during - an autogenerate operation. Defaults to ``False`` which disables - server default - comparison. Set to ``True`` to turn on server default comparison, - which has - varied accuracy depending on backend. - - To customize server default comparison behavior, a callable may - be specified - which can filter server default comparisons during an - autogenerate operation. - defaults during an autogenerate operation. The format of this - callable is:: - - def my_compare_server_default(context, inspected_column, - metadata_column, inspected_default, metadata_default, - rendered_metadata_default): - # return True if the defaults are different, - # False if not, or None to allow the default implementation - # to compare these defaults - return None - - context.configure( - # ... - compare_server_default = my_compare_server_default - ) - - ``inspected_column`` is a dictionary structure as returned by - :meth:`sqlalchemy.engine.reflection.Inspector.get_columns`, whereas - ``metadata_column`` is a :class:`sqlalchemy.schema.Column` from - the local model environment. - - A return value of ``None`` indicates to allow default server default - comparison - to proceed. Note that some backends such as Postgresql actually - execute - the two defaults on the database side to compare for equivalence. - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.compare_type` - - :param include_object: A callable function which is given - the chance to return ``True`` or ``False`` for any object, - indicating if the given object should be considered in the - autogenerate sweep. - - The function accepts the following positional arguments: - - * ``object``: a :class:`~sqlalchemy.schema.SchemaItem` object such - as a :class:`~sqlalchemy.schema.Table`, - :class:`~sqlalchemy.schema.Column`, - :class:`~sqlalchemy.schema.Index` - :class:`~sqlalchemy.schema.UniqueConstraint`, - or :class:`~sqlalchemy.schema.ForeignKeyConstraint` object - * ``name``: the name of the object. This is typically available - via ``object.name``. - * ``type``: a string describing the type of object; currently - ``"table"``, ``"column"``, ``"index"``, ``"unique_constraint"``, - or ``"foreign_key_constraint"`` - - .. versionadded:: 0.7.0 Support for indexes and unique constraints - within the - :paramref:`~.EnvironmentContext.configure.include_object` hook. - - .. versionadded:: 0.7.1 Support for foreign keys within the - :paramref:`~.EnvironmentContext.configure.include_object` hook. - - * ``reflected``: ``True`` if the given object was produced based on - table reflection, ``False`` if it's from a local :class:`.MetaData` - object. - * ``compare_to``: the object being compared against, if available, - else ``None``. - - E.g.:: - - def include_object(object, name, type_, reflected, compare_to): - if (type_ == "column" and - not reflected and - object.info.get("skip_autogenerate", False)): - return False - else: - return True - - context.configure( - # ... - include_object = include_object - ) - - :paramref:`.EnvironmentContext.configure.include_object` can also - be used to filter on specific schemas to include or omit, when - the :paramref:`.EnvironmentContext.configure.include_schemas` - flag is set to ``True``. The :attr:`.Table.schema` attribute - on each :class:`.Table` object reflected will indicate the name of the - schema from which the :class:`.Table` originates. - - .. versionadded:: 0.6.0 - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.include_schemas` - - :param include_symbol: A callable function which, given a table name - and schema name (may be ``None``), returns ``True`` or ``False``, - indicating if the given table should be considered in the - autogenerate sweep. - - .. deprecated:: 0.6.0 - :paramref:`.EnvironmentContext.configure.include_symbol` - is superceded by the more generic - :paramref:`.EnvironmentContext.configure.include_object` - parameter. - - E.g.:: - - def include_symbol(tablename, schema): - return tablename not in ("skip_table_one", "skip_table_two") - - context.configure( - # ... - include_symbol = include_symbol - ) - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.include_schemas` - - :paramref:`.EnvironmentContext.configure.include_object` - - :param render_as_batch: if True, commands which alter elements - within a table will be placed under a ``with batch_alter_table():`` - directive, so that batch migrations will take place. - - .. versionadded:: 0.7.0 - - .. seealso:: - - :ref:`batch_migrations` - - :param include_schemas: If True, autogenerate will scan across - all schemas located by the SQLAlchemy - :meth:`~sqlalchemy.engine.reflection.Inspector.get_schema_names` - method, and include all differences in tables found across all - those schemas. When using this option, you may want to also - use the :paramref:`.EnvironmentContext.configure.include_object` - option to specify a callable which - can filter the tables/schemas that get included. - - .. seealso:: - - :paramref:`.EnvironmentContext.configure.include_object` - - :param render_item: Callable that can be used to override how - any schema item, i.e. column, constraint, type, - etc., is rendered for autogenerate. The callable receives a - string describing the type of object, the object, and - the autogen context. If it returns False, the - default rendering method will be used. If it returns None, - the item will not be rendered in the context of a Table - construct, that is, can be used to skip columns or constraints - within op.create_table():: - - def my_render_column(type_, col, autogen_context): - if type_ == "column" and isinstance(col, MySpecialCol): - return repr(col) - else: - return False - - context.configure( - # ... - render_item = my_render_column - ) - - Available values for the type string include: ``"column"``, - ``"primary_key"``, ``"foreign_key"``, ``"unique"``, ``"check"``, - ``"type"``, ``"server_default"``. - - .. seealso:: - - :ref:`autogen_render_types` - - :param upgrade_token: When autogenerate completes, the text of the - candidate upgrade operations will be present in this template - variable when ``script.py.mako`` is rendered. Defaults to - ``upgrades``. - :param downgrade_token: When autogenerate completes, the text of the - candidate downgrade operations will be present in this - template variable when ``script.py.mako`` is rendered. Defaults to - ``downgrades``. - - :param alembic_module_prefix: When autogenerate refers to Alembic - :mod:`alembic.operations` constructs, this prefix will be used - (i.e. ``op.create_table``) Defaults to "``op.``". - Can be ``None`` to indicate no prefix. - - :param sqlalchemy_module_prefix: When autogenerate refers to - SQLAlchemy - :class:`~sqlalchemy.schema.Column` or type classes, this prefix - will be used - (i.e. ``sa.Column("somename", sa.Integer)``) Defaults to "``sa.``". - Can be ``None`` to indicate no prefix. - Note that when dialect-specific types are rendered, autogenerate - will render them using the dialect module name, i.e. ``mssql.BIT()``, - ``postgresql.UUID()``. - - :param user_module_prefix: When autogenerate refers to a SQLAlchemy - type (e.g. :class:`.TypeEngine`) where the module name is not - under the ``sqlalchemy`` namespace, this prefix will be used - within autogenerate. If left at its default of - ``None``, the ``__module__`` attribute of the type is used to - render the import module. It's a good practice to set this - and to have all custom types be available from a fixed module space, - in order to future-proof migration files against reorganizations - in modules. - - .. versionchanged:: 0.7.0 - :paramref:`.EnvironmentContext.configure.user_module_prefix` - no longer defaults to the value of - :paramref:`.EnvironmentContext.configure.sqlalchemy_module_prefix` - when left at ``None``; the ``__module__`` attribute is now used. - - .. versionadded:: 0.6.3 added - :paramref:`.EnvironmentContext.configure.user_module_prefix` - - .. seealso:: - - :ref:`autogen_module_prefix` - - :param process_revision_directives: a callable function that will - be passed a structure representing the end result of an autogenerate - or plain "revision" operation, which can be manipulated to affect - how the ``alembic revision`` command ultimately outputs new - revision scripts. The structure of the callable is:: - - def process_revision_directives(context, revision, directives): - pass - - The ``directives`` parameter is a Python list containing - a single :class:`.MigrationScript` directive, which represents - the revision file to be generated. This list as well as its - contents may be freely modified to produce any set of commands. - The section :ref:`customizing_revision` shows an example of - doing this. The ``context`` parameter is the - :class:`.MigrationContext` in use, - and ``revision`` is a tuple of revision identifiers representing the - current revision of the database. - - The callable is invoked at all times when the ``--autogenerate`` - option is passed to ``alembic revision``. If ``--autogenerate`` - is not passed, the callable is invoked only if the - ``revision_environment`` variable is set to True in the Alembic - configuration, in which case the given ``directives`` collection - will contain empty :class:`.UpgradeOps` and :class:`.DowngradeOps` - collections for ``.upgrade_ops`` and ``.downgrade_ops``. The - ``--autogenerate`` option itself can be inferred by inspecting - ``context.config.cmd_opts.autogenerate``. - - The callable function may optionally be an instance of - a :class:`.Rewriter` object. This is a helper object that - assists in the production of autogenerate-stream rewriter functions. - - - .. versionadded:: 0.8.0 - - .. versionchanged:: 0.8.1 - The - :paramref:`.EnvironmentContext.configure.process_revision_directives` - hook can append op directives into :class:`.UpgradeOps` and - :class:`.DowngradeOps` which will be rendered in Python regardless - of whether the ``--autogenerate`` option is in use or not; - the ``revision_environment`` configuration variable should be - set to "true" in the config to enable this. - - - .. seealso:: - - :ref:`customizing_revision` - - :ref:`autogen_rewriter` - - :paramref:`.command.revision.process_revision_directives` - - Parameters specific to individual backends: - - :param mssql_batch_separator: The "batch separator" which will - be placed between each statement when generating offline SQL Server - migrations. Defaults to ``GO``. Note this is in addition to the - customary semicolon ``;`` at the end of each statement; SQL Server - considers the "batch separator" to denote the end of an - individual statement execution, and cannot group certain - dependent operations in one step. - :param oracle_batch_separator: The "batch separator" which will - be placed between each statement when generating offline - Oracle migrations. Defaults to ``/``. Oracle doesn't add a - semicolon between statements like most other backends. - - """ - opts = self.context_opts - if transactional_ddl is not None: - opts["transactional_ddl"] = transactional_ddl - if output_buffer is not None: - opts["output_buffer"] = output_buffer - elif self.config.output_buffer is not None: - opts["output_buffer"] = self.config.output_buffer - if starting_rev: - opts["starting_rev"] = starting_rev - if tag: - opts["tag"] = tag - if template_args and "template_args" in opts: - opts["template_args"].update(template_args) - opts["transaction_per_migration"] = transaction_per_migration - opts["target_metadata"] = target_metadata - opts["include_symbol"] = include_symbol - opts["include_object"] = include_object - opts["include_schemas"] = include_schemas - opts["render_as_batch"] = render_as_batch - opts["upgrade_token"] = upgrade_token - opts["downgrade_token"] = downgrade_token - opts["sqlalchemy_module_prefix"] = sqlalchemy_module_prefix - opts["alembic_module_prefix"] = alembic_module_prefix - opts["user_module_prefix"] = user_module_prefix - opts["literal_binds"] = literal_binds - opts["process_revision_directives"] = process_revision_directives - opts["on_version_apply"] = util.to_tuple(on_version_apply, default=()) - - if render_item is not None: - opts["render_item"] = render_item - if compare_type is not None: - opts["compare_type"] = compare_type - if compare_server_default is not None: - opts["compare_server_default"] = compare_server_default - opts["script"] = self.script - - opts.update(kw) - - self._migration_context = MigrationContext.configure( - connection=connection, - url=url, - dialect_name=dialect_name, - environment_context=self, - dialect_opts=dialect_opts, - opts=opts, - ) - - def run_migrations(self, **kw): - """Run migrations as determined by the current command line - configuration - as well as versioning information present (or not) in the current - database connection (if one is present). - - The function accepts optional ``**kw`` arguments. If these are - passed, they are sent directly to the ``upgrade()`` and - ``downgrade()`` - functions within each target revision file. By modifying the - ``script.py.mako`` file so that the ``upgrade()`` and ``downgrade()`` - functions accept arguments, parameters can be passed here so that - contextual information, usually information to identify a particular - database in use, can be passed from a custom ``env.py`` script - to the migration functions. - - This function requires that a :class:`.MigrationContext` has - first been made available via :meth:`.configure`. - - """ - with Operations.context(self._migration_context): - self.get_context().run_migrations(**kw) - - def execute(self, sql, execution_options=None): - """Execute the given SQL using the current change context. - - The behavior of :meth:`.execute` is the same - as that of :meth:`.Operations.execute`. Please see that - function's documentation for full detail including - caveats and limitations. - - This function requires that a :class:`.MigrationContext` has - first been made available via :meth:`.configure`. - - """ - self.get_context().execute(sql, execution_options=execution_options) - - def static_output(self, text): - """Emit text directly to the "offline" SQL stream. - - Typically this is for emitting comments that - start with --. The statement is not treated - as a SQL execution, no ; or batch separator - is added, etc. - - """ - self.get_context().impl.static_output(text) - - def begin_transaction(self): - """Return a context manager that will - enclose an operation within a "transaction", - as defined by the environment's offline - and transactional DDL settings. - - e.g.:: - - with context.begin_transaction(): - context.run_migrations() - - :meth:`.begin_transaction` is intended to - "do the right thing" regardless of - calling context: - - * If :meth:`.is_transactional_ddl` is ``False``, - returns a "do nothing" context manager - which otherwise produces no transactional - state or directives. - * If :meth:`.is_offline_mode` is ``True``, - returns a context manager that will - invoke the :meth:`.DefaultImpl.emit_begin` - and :meth:`.DefaultImpl.emit_commit` - methods, which will produce the string - directives ``BEGIN`` and ``COMMIT`` on - the output stream, as rendered by the - target backend (e.g. SQL Server would - emit ``BEGIN TRANSACTION``). - * Otherwise, calls :meth:`sqlalchemy.engine.Connection.begin` - on the current online connection, which - returns a :class:`sqlalchemy.engine.Transaction` - object. This object demarcates a real - transaction and is itself a context manager, - which will roll back if an exception - is raised. - - Note that a custom ``env.py`` script which - has more specific transactional needs can of course - manipulate the :class:`~sqlalchemy.engine.Connection` - directly to produce transactional state in "online" - mode. - - """ - - return self.get_context().begin_transaction() - - def get_context(self): - """Return the current :class:`.MigrationContext` object. - - If :meth:`.EnvironmentContext.configure` has not been - called yet, raises an exception. - - """ - - if self._migration_context is None: - raise Exception("No context has been configured yet.") - return self._migration_context - - def get_bind(self): - """Return the current 'bind'. - - In "online" mode, this is the - :class:`sqlalchemy.engine.Connection` currently being used - to emit SQL to the database. - - This function requires that a :class:`.MigrationContext` - has first been made available via :meth:`.configure`. - - """ - return self.get_context().bind - - def get_impl(self): - return self.get_context().impl diff --git a/venv/lib/python3.7/site-packages/alembic/runtime/migration.py b/venv/lib/python3.7/site-packages/alembic/runtime/migration.py deleted file mode 100644 index 48408a4..0000000 --- a/venv/lib/python3.7/site-packages/alembic/runtime/migration.py +++ /dev/null @@ -1,1239 +0,0 @@ -from contextlib import contextmanager -import logging -import sys - -from sqlalchemy import Column -from sqlalchemy import literal_column -from sqlalchemy import MetaData -from sqlalchemy import PrimaryKeyConstraint -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy.engine import Connection -from sqlalchemy.engine import url as sqla_url -from sqlalchemy.engine.strategies import MockEngineStrategy - -from .. import ddl -from .. import util -from ..util import sqla_compat -from ..util.compat import callable -from ..util.compat import EncodedIO - -log = logging.getLogger(__name__) - - -class _ProxyTransaction(object): - def __init__(self, migration_context): - self.migration_context = migration_context - - @property - def _proxied_transaction(self): - return self.migration_context._transaction - - def rollback(self): - self._proxied_transaction.rollback() - - def commit(self): - self._proxied_transaction.commit() - - def __enter__(self): - return self - - def __exit__(self, type_, value, traceback): - self._proxied_transaction.__exit__(type_, value, traceback) - - -class MigrationContext(object): - - """Represent the database state made available to a migration - script. - - :class:`.MigrationContext` is the front end to an actual - database connection, or alternatively a string output - stream given a particular database dialect, - from an Alembic perspective. - - When inside the ``env.py`` script, the :class:`.MigrationContext` - is available via the - :meth:`.EnvironmentContext.get_context` method, - which is available at ``alembic.context``:: - - # from within env.py script - from alembic import context - migration_context = context.get_context() - - For usage outside of an ``env.py`` script, such as for - utility routines that want to check the current version - in the database, the :meth:`.MigrationContext.configure` - method to create new :class:`.MigrationContext` objects. - For example, to get at the current revision in the - database using :meth:`.MigrationContext.get_current_revision`:: - - # in any application, outside of an env.py script - from alembic.migration import MigrationContext - from sqlalchemy import create_engine - - engine = create_engine("postgresql://mydatabase") - conn = engine.connect() - - context = MigrationContext.configure(conn) - current_rev = context.get_current_revision() - - The above context can also be used to produce - Alembic migration operations with an :class:`.Operations` - instance:: - - # in any application, outside of the normal Alembic environment - from alembic.operations import Operations - op = Operations(context) - op.alter_column("mytable", "somecolumn", nullable=True) - - """ - - def __init__(self, dialect, connection, opts, environment_context=None): - self.environment_context = environment_context - self.opts = opts - self.dialect = dialect - self.script = opts.get("script") - as_sql = opts.get("as_sql", False) - transactional_ddl = opts.get("transactional_ddl") - self._transaction_per_migration = opts.get( - "transaction_per_migration", False - ) - self.on_version_apply_callbacks = opts.get("on_version_apply", ()) - self._transaction = None - - if as_sql: - self.connection = self._stdout_connection(connection) - assert self.connection is not None - else: - self.connection = connection - self._migrations_fn = opts.get("fn") - self.as_sql = as_sql - - self.purge = opts.get("purge", False) - - if "output_encoding" in opts: - self.output_buffer = EncodedIO( - opts.get("output_buffer") or sys.stdout, - opts["output_encoding"], - ) - else: - self.output_buffer = opts.get("output_buffer", sys.stdout) - - self._user_compare_type = opts.get("compare_type", False) - self._user_compare_server_default = opts.get( - "compare_server_default", False - ) - self.version_table = version_table = opts.get( - "version_table", "alembic_version" - ) - self.version_table_schema = version_table_schema = opts.get( - "version_table_schema", None - ) - self._version = Table( - version_table, - MetaData(), - Column("version_num", String(32), nullable=False), - schema=version_table_schema, - ) - if opts.get("version_table_pk", True): - self._version.append_constraint( - PrimaryKeyConstraint( - "version_num", name="%s_pkc" % version_table - ) - ) - - self._start_from_rev = opts.get("starting_rev") - self.impl = ddl.DefaultImpl.get_by_dialect(dialect)( - dialect, - self.connection, - self.as_sql, - transactional_ddl, - self.output_buffer, - opts, - ) - log.info("Context impl %s.", self.impl.__class__.__name__) - if self.as_sql: - log.info("Generating static SQL") - log.info( - "Will assume %s DDL.", - "transactional" - if self.impl.transactional_ddl - else "non-transactional", - ) - - @classmethod - def configure( - cls, - connection=None, - url=None, - dialect_name=None, - dialect=None, - environment_context=None, - dialect_opts=None, - opts=None, - ): - """Create a new :class:`.MigrationContext`. - - This is a factory method usually called - by :meth:`.EnvironmentContext.configure`. - - :param connection: a :class:`~sqlalchemy.engine.Connection` - to use for SQL execution in "online" mode. When present, - is also used to determine the type of dialect in use. - :param url: a string database url, or a - :class:`sqlalchemy.engine.url.URL` object. - The type of dialect to be used will be derived from this if - ``connection`` is not passed. - :param dialect_name: string name of a dialect, such as - "postgresql", "mssql", etc. The type of dialect to be used will be - derived from this if ``connection`` and ``url`` are not passed. - :param opts: dictionary of options. Most other options - accepted by :meth:`.EnvironmentContext.configure` are passed via - this dictionary. - - """ - if opts is None: - opts = {} - if dialect_opts is None: - dialect_opts = {} - - if connection: - if not isinstance(connection, Connection): - util.warn( - "'connection' argument to configure() is expected " - "to be a sqlalchemy.engine.Connection instance, " - "got %r" % connection, - stacklevel=3, - ) - - dialect = connection.dialect - elif url: - url = sqla_url.make_url(url) - dialect = url.get_dialect()(**dialect_opts) - elif dialect_name: - url = sqla_url.make_url("%s://" % dialect_name) - dialect = url.get_dialect()(**dialect_opts) - elif not dialect: - raise Exception("Connection, url, or dialect_name is required.") - - return MigrationContext(dialect, connection, opts, environment_context) - - @contextmanager - def autocommit_block(self): - """Enter an "autocommit" block, for databases that support AUTOCOMMIT - isolation levels. - - This special directive is intended to support the occasional database - DDL or system operation that specifically has to be run outside of - any kind of transaction block. The PostgreSQL database platform - is the most common target for this style of operation, as many - of its DDL operations must be run outside of transaction blocks, even - though the database overall supports transactional DDL. - - The method is used as a context manager within a migration script, by - calling on :meth:`.Operations.get_context` to retrieve the - :class:`.MigrationContext`, then invoking - :meth:`.MigrationContext.autocommit_block` using the ``with:`` - statement:: - - def upgrade(): - with op.get_context().autocommit_block(): - op.execute("ALTER TYPE mood ADD VALUE 'soso'") - - Above, a PostgreSQL "ALTER TYPE..ADD VALUE" directive is emitted, - which must be run outside of a transaction block at the database level. - The :meth:`.MigrationContext.autocommit_block` method makes use of the - SQLAlchemy ``AUTOCOMMIT`` isolation level setting, which against the - psycogp2 DBAPI corresponds to the ``connection.autocommit`` setting, - to ensure that the database driver is not inside of a DBAPI level - transaction block. - - .. warning:: - - As is necessary, **the database transaction preceding the block is - unconditionally committed**. This means that the run of migrations - preceding the operation will be committed, before the overall - migration operation is complete. - - It is recommended that when an application includes migrations with - "autocommit" blocks, that - :paramref:`.EnvironmentContext.transaction_per_migration` be used - so that the calling environment is tuned to expect short per-file - migrations whether or not one of them has an autocommit block. - - - .. versionadded:: 1.2.0 - - """ - _in_connection_transaction = self._in_connection_transaction() - - if self.impl.transactional_ddl: - if self.as_sql: - self.impl.emit_commit() - - elif _in_connection_transaction: - assert self._transaction is not None - - self._transaction.commit() - self._transaction = None - - if not self.as_sql: - current_level = self.connection.get_isolation_level() - self.connection.execution_options(isolation_level="AUTOCOMMIT") - try: - yield - finally: - if not self.as_sql: - self.connection.execution_options( - isolation_level=current_level - ) - - if self.impl.transactional_ddl: - if self.as_sql: - self.impl.emit_begin() - - elif _in_connection_transaction: - self._transaction = self.bind.begin() - - def begin_transaction(self, _per_migration=False): - """Begin a logical transaction for migration operations. - - This method is used within an ``env.py`` script to demarcate where - the outer "transaction" for a series of migrations begins. Example:: - - def run_migrations_online(): - connectable = create_engine(...) - - with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - Above, :meth:`.MigrationContext.begin_transaction` is used to demarcate - where the outer logical transaction occurs around the - :meth:`.MigrationContext.run_migrations` operation. - - A "Logical" transaction means that the operation may or may not - correspond to a real database transaction. If the target database - supports transactional DDL (or - :paramref:`.EnvironmentContext.configure.transactional_ddl` is true), - the :paramref:`.EnvironmentContext.configure.transaction_per_migration` - flag is not set, and the migration is against a real database - connection (as opposed to using "offline" ``--sql`` mode), a real - transaction will be started. If ``--sql`` mode is in effect, the - operation would instead correspond to a string such as "BEGIN" being - emitted to the string output. - - The returned object is a Python context manager that should only be - used in the context of a ``with:`` statement as indicated above. - The object has no other guaranteed API features present. - - .. seealso:: - - :meth:`.MigrationContext.autocommit_block` - - """ - transaction_now = _per_migration == self._transaction_per_migration - - if not transaction_now: - - @contextmanager - def do_nothing(): - yield - - return do_nothing() - - elif not self.impl.transactional_ddl: - - @contextmanager - def do_nothing(): - yield - - return do_nothing() - elif self.as_sql: - - @contextmanager - def begin_commit(): - self.impl.emit_begin() - yield - self.impl.emit_commit() - - return begin_commit() - else: - self._transaction = self.bind.begin() - return _ProxyTransaction(self) - - def get_current_revision(self): - """Return the current revision, usually that which is present - in the ``alembic_version`` table in the database. - - This method intends to be used only for a migration stream that - does not contain unmerged branches in the target database; - if there are multiple branches present, an exception is raised. - The :meth:`.MigrationContext.get_current_heads` should be preferred - over this method going forward in order to be compatible with - branch migration support. - - If this :class:`.MigrationContext` was configured in "offline" - mode, that is with ``as_sql=True``, the ``starting_rev`` - parameter is returned instead, if any. - - """ - heads = self.get_current_heads() - if len(heads) == 0: - return None - elif len(heads) > 1: - raise util.CommandError( - "Version table '%s' has more than one head present; " - "please use get_current_heads()" % self.version_table - ) - else: - return heads[0] - - def get_current_heads(self): - """Return a tuple of the current 'head versions' that are represented - in the target database. - - For a migration stream without branches, this will be a single - value, synonymous with that of - :meth:`.MigrationContext.get_current_revision`. However when multiple - unmerged branches exist within the target database, the returned tuple - will contain a value for each head. - - If this :class:`.MigrationContext` was configured in "offline" - mode, that is with ``as_sql=True``, the ``starting_rev`` - parameter is returned in a one-length tuple. - - If no version table is present, or if there are no revisions - present, an empty tuple is returned. - - .. versionadded:: 0.7.0 - - """ - if self.as_sql: - start_from_rev = self._start_from_rev - if start_from_rev == "base": - start_from_rev = None - elif start_from_rev is not None and self.script: - - start_from_rev = [ - self.script.get_revision(sfr).revision - for sfr in util.to_list(start_from_rev) - if sfr not in (None, "base") - ] - return util.to_tuple(start_from_rev, default=()) - else: - if self._start_from_rev: - raise util.CommandError( - "Can't specify current_rev to context " - "when using a database connection" - ) - if not self._has_version_table(): - return () - return tuple( - row[0] for row in self.connection.execute(self._version.select()) - ) - - def _ensure_version_table(self, purge=False): - self._version.create(self.connection, checkfirst=True) - if purge: - self.connection.execute(self._version.delete()) - - def _has_version_table(self): - return sqla_compat._connectable_has_table( - self.connection, self.version_table, self.version_table_schema - ) - - def stamp(self, script_directory, revision): - """Stamp the version table with a specific revision. - - This method calculates those branches to which the given revision - can apply, and updates those branches as though they were migrated - towards that revision (either up or down). If no current branches - include the revision, it is added as a new branch head. - - .. versionadded:: 0.7.0 - - """ - heads = self.get_current_heads() - if not self.as_sql and not heads: - self._ensure_version_table() - head_maintainer = HeadMaintainer(self, heads) - for step in script_directory._stamp_revs(revision, heads): - head_maintainer.update_to_step(step) - - def run_migrations(self, **kw): - r"""Run the migration scripts established for this - :class:`.MigrationContext`, if any. - - The commands in :mod:`alembic.command` will set up a function - that is ultimately passed to the :class:`.MigrationContext` - as the ``fn`` argument. This function represents the "work" - that will be done when :meth:`.MigrationContext.run_migrations` - is called, typically from within the ``env.py`` script of the - migration environment. The "work function" then provides an iterable - of version callables and other version information which - in the case of the ``upgrade`` or ``downgrade`` commands are the - list of version scripts to invoke. Other commands yield nothing, - in the case that a command wants to run some other operation - against the database such as the ``current`` or ``stamp`` commands. - - :param \**kw: keyword arguments here will be passed to each - migration callable, that is the ``upgrade()`` or ``downgrade()`` - method within revision scripts. - - """ - self.impl.start_migrations() - - if self.purge: - if self.as_sql: - raise util.CommandError("Can't use --purge with --sql mode") - self._ensure_version_table(purge=True) - heads = () - else: - heads = self.get_current_heads() - - if not self.as_sql and not heads: - self._ensure_version_table() - - head_maintainer = HeadMaintainer(self, heads) - - starting_in_transaction = ( - not self.as_sql and self._in_connection_transaction() - ) - - for step in self._migrations_fn(heads, self): - with self.begin_transaction(_per_migration=True): - if self.as_sql and not head_maintainer.heads: - # for offline mode, include a CREATE TABLE from - # the base - self._version.create(self.connection) - log.info("Running %s", step) - if self.as_sql: - self.impl.static_output( - "-- Running %s" % (step.short_log,) - ) - step.migration_fn(**kw) - - # previously, we wouldn't stamp per migration - # if we were in a transaction, however given the more - # complex model that involves any number of inserts - # and row-targeted updates and deletes, it's simpler for now - # just to run the operations on every version - head_maintainer.update_to_step(step) - for callback in self.on_version_apply_callbacks: - callback( - ctx=self, - step=step.info, - heads=set(head_maintainer.heads), - run_args=kw, - ) - - if ( - not starting_in_transaction - and not self.as_sql - and not self.impl.transactional_ddl - and self._in_connection_transaction() - ): - raise util.CommandError( - 'Migration "%s" has left an uncommitted ' - "transaction opened; transactional_ddl is False so " - "Alembic is not committing transactions" % step - ) - - if self.as_sql and not head_maintainer.heads: - self._version.drop(self.connection) - - def _in_connection_transaction(self): - try: - meth = self.connection.in_transaction - except AttributeError: - return False - else: - return meth() - - def execute(self, sql, execution_options=None): - """Execute a SQL construct or string statement. - - The underlying execution mechanics are used, that is - if this is "offline mode" the SQL is written to the - output buffer, otherwise the SQL is emitted on - the current SQLAlchemy connection. - - """ - self.impl._exec(sql, execution_options) - - def _stdout_connection(self, connection): - def dump(construct, *multiparams, **params): - self.impl._exec(construct) - - return MockEngineStrategy.MockConnection(self.dialect, dump) - - @property - def bind(self): - """Return the current "bind". - - In online mode, this is an instance of - :class:`sqlalchemy.engine.Connection`, and is suitable - for ad-hoc execution of any kind of usage described - in :ref:`sqlexpression_toplevel` as well as - for usage with the :meth:`sqlalchemy.schema.Table.create` - and :meth:`sqlalchemy.schema.MetaData.create_all` methods - of :class:`~sqlalchemy.schema.Table`, - :class:`~sqlalchemy.schema.MetaData`. - - Note that when "standard output" mode is enabled, - this bind will be a "mock" connection handler that cannot - return results and is only appropriate for a very limited - subset of commands. - - """ - return self.connection - - @property - def config(self): - """Return the :class:`.Config` used by the current environment, if any. - - .. versionadded:: 0.6.6 - - """ - if self.environment_context: - return self.environment_context.config - else: - return None - - def _compare_type(self, inspector_column, metadata_column): - if self._user_compare_type is False: - return False - - if callable(self._user_compare_type): - user_value = self._user_compare_type( - self, - inspector_column, - metadata_column, - inspector_column.type, - metadata_column.type, - ) - if user_value is not None: - return user_value - - return self.impl.compare_type(inspector_column, metadata_column) - - def _compare_server_default( - self, - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_column_default, - ): - - if self._user_compare_server_default is False: - return False - - if callable(self._user_compare_server_default): - user_value = self._user_compare_server_default( - self, - inspector_column, - metadata_column, - rendered_column_default, - metadata_column.server_default, - rendered_metadata_default, - ) - if user_value is not None: - return user_value - - return self.impl.compare_server_default( - inspector_column, - metadata_column, - rendered_metadata_default, - rendered_column_default, - ) - - -class HeadMaintainer(object): - def __init__(self, context, heads): - self.context = context - self.heads = set(heads) - - def _insert_version(self, version): - assert version not in self.heads - self.heads.add(version) - - self.context.impl._exec( - self.context._version.insert().values( - version_num=literal_column("'%s'" % version) - ) - ) - - def _delete_version(self, version): - self.heads.remove(version) - - ret = self.context.impl._exec( - self.context._version.delete().where( - self.context._version.c.version_num - == literal_column("'%s'" % version) - ) - ) - if ( - not self.context.as_sql - and self.context.dialect.supports_sane_rowcount - and ret.rowcount != 1 - ): - raise util.CommandError( - "Online migration expected to match one " - "row when deleting '%s' in '%s'; " - "%d found" - % (version, self.context.version_table, ret.rowcount) - ) - - def _update_version(self, from_, to_): - assert to_ not in self.heads - self.heads.remove(from_) - self.heads.add(to_) - - ret = self.context.impl._exec( - self.context._version.update() - .values(version_num=literal_column("'%s'" % to_)) - .where( - self.context._version.c.version_num - == literal_column("'%s'" % from_) - ) - ) - if ( - not self.context.as_sql - and self.context.dialect.supports_sane_rowcount - and ret.rowcount != 1 - ): - raise util.CommandError( - "Online migration expected to match one " - "row when updating '%s' to '%s' in '%s'; " - "%d found" - % (from_, to_, self.context.version_table, ret.rowcount) - ) - - def update_to_step(self, step): - if step.should_delete_branch(self.heads): - vers = step.delete_version_num - log.debug("branch delete %s", vers) - self._delete_version(vers) - elif step.should_create_branch(self.heads): - vers = step.insert_version_num - log.debug("new branch insert %s", vers) - self._insert_version(vers) - elif step.should_merge_branches(self.heads): - # delete revs, update from rev, update to rev - ( - delete_revs, - update_from_rev, - update_to_rev, - ) = step.merge_branch_idents(self.heads) - log.debug( - "merge, delete %s, update %s to %s", - delete_revs, - update_from_rev, - update_to_rev, - ) - for delrev in delete_revs: - self._delete_version(delrev) - self._update_version(update_from_rev, update_to_rev) - elif step.should_unmerge_branches(self.heads): - ( - update_from_rev, - update_to_rev, - insert_revs, - ) = step.unmerge_branch_idents(self.heads) - log.debug( - "unmerge, insert %s, update %s to %s", - insert_revs, - update_from_rev, - update_to_rev, - ) - for insrev in insert_revs: - self._insert_version(insrev) - self._update_version(update_from_rev, update_to_rev) - else: - from_, to_ = step.update_version_num(self.heads) - log.debug("update %s to %s", from_, to_) - self._update_version(from_, to_) - - -class MigrationInfo(object): - """Exposes information about a migration step to a callback listener. - - The :class:`.MigrationInfo` object is available exclusively for the - benefit of the :paramref:`.EnvironmentContext.on_version_apply` - callback hook. - - .. versionadded:: 0.9.3 - - """ - - is_upgrade = None - """True/False: indicates whether this operation ascends or descends the - version tree.""" - - is_stamp = None - """True/False: indicates whether this operation is a stamp (i.e. whether - it results in any actual database operations).""" - - up_revision_id = None - """Version string corresponding to :attr:`.Revision.revision`. - - In the case of a stamp operation, it is advised to use the - :attr:`.MigrationInfo.up_revision_ids` tuple as a stamp operation can - make a single movement from one or more branches down to a single - branchpoint, in which case there will be multiple "up" revisions. - - .. seealso:: - - :attr:`.MigrationInfo.up_revision_ids` - - """ - - up_revision_ids = None - """Tuple of version strings corresponding to :attr:`.Revision.revision`. - - In the majority of cases, this tuple will be a single value, synonomous - with the scalar value of :attr:`.MigrationInfo.up_revision_id`. - It can be multiple revision identifiers only in the case of an - ``alembic stamp`` operation which is moving downwards from multiple - branches down to their common branch point. - - .. versionadded:: 0.9.4 - - """ - - down_revision_ids = None - """Tuple of strings representing the base revisions of this migration step. - - If empty, this represents a root revision; otherwise, the first item - corresponds to :attr:`.Revision.down_revision`, and the rest are inferred - from dependencies. - """ - - revision_map = None - """The revision map inside of which this operation occurs.""" - - def __init__( - self, revision_map, is_upgrade, is_stamp, up_revisions, down_revisions - ): - self.revision_map = revision_map - self.is_upgrade = is_upgrade - self.is_stamp = is_stamp - self.up_revision_ids = util.to_tuple(up_revisions, default=()) - if self.up_revision_ids: - self.up_revision_id = self.up_revision_ids[0] - else: - # this should never be the case with - # "upgrade", "downgrade", or "stamp" as we are always - # measuring movement in terms of at least one upgrade version - self.up_revision_id = None - self.down_revision_ids = util.to_tuple(down_revisions, default=()) - - @property - def is_migration(self): - """True/False: indicates whether this operation is a migration. - - At present this is true if and only the migration is not a stamp. - If other operation types are added in the future, both this attribute - and :attr:`~.MigrationInfo.is_stamp` will be false. - """ - return not self.is_stamp - - @property - def source_revision_ids(self): - """Active revisions before this migration step is applied.""" - return ( - self.down_revision_ids if self.is_upgrade else self.up_revision_ids - ) - - @property - def destination_revision_ids(self): - """Active revisions after this migration step is applied.""" - return ( - self.up_revision_ids if self.is_upgrade else self.down_revision_ids - ) - - @property - def up_revision(self): - """Get :attr:`~.MigrationInfo.up_revision_id` as - a :class:`.Revision`. - - """ - return self.revision_map.get_revision(self.up_revision_id) - - @property - def up_revisions(self): - """Get :attr:`~.MigrationInfo.up_revision_ids` as a :class:`.Revision`. - - .. versionadded:: 0.9.4 - - """ - return self.revision_map.get_revisions(self.up_revision_ids) - - @property - def down_revisions(self): - """Get :attr:`~.MigrationInfo.down_revision_ids` as a tuple of - :class:`Revisions <.Revision>`.""" - return self.revision_map.get_revisions(self.down_revision_ids) - - @property - def source_revisions(self): - """Get :attr:`~MigrationInfo.source_revision_ids` as a tuple of - :class:`Revisions <.Revision>`.""" - return self.revision_map.get_revisions(self.source_revision_ids) - - @property - def destination_revisions(self): - """Get :attr:`~MigrationInfo.destination_revision_ids` as a tuple of - :class:`Revisions <.Revision>`.""" - return self.revision_map.get_revisions(self.destination_revision_ids) - - -class MigrationStep(object): - @property - def name(self): - return self.migration_fn.__name__ - - @classmethod - def upgrade_from_script(cls, revision_map, script): - return RevisionStep(revision_map, script, True) - - @classmethod - def downgrade_from_script(cls, revision_map, script): - return RevisionStep(revision_map, script, False) - - @property - def is_downgrade(self): - return not self.is_upgrade - - @property - def short_log(self): - return "%s %s -> %s" % ( - self.name, - util.format_as_comma(self.from_revisions_no_deps), - util.format_as_comma(self.to_revisions_no_deps), - ) - - def __str__(self): - if self.doc: - return "%s %s -> %s, %s" % ( - self.name, - util.format_as_comma(self.from_revisions_no_deps), - util.format_as_comma(self.to_revisions_no_deps), - self.doc, - ) - else: - return self.short_log - - -class RevisionStep(MigrationStep): - def __init__(self, revision_map, revision, is_upgrade): - self.revision_map = revision_map - self.revision = revision - self.is_upgrade = is_upgrade - if is_upgrade: - self.migration_fn = revision.module.upgrade - else: - self.migration_fn = revision.module.downgrade - - def __repr__(self): - return "RevisionStep(%r, is_upgrade=%r)" % ( - self.revision.revision, - self.is_upgrade, - ) - - def __eq__(self, other): - return ( - isinstance(other, RevisionStep) - and other.revision == self.revision - and self.is_upgrade == other.is_upgrade - ) - - @property - def doc(self): - return self.revision.doc - - @property - def from_revisions(self): - if self.is_upgrade: - return self.revision._all_down_revisions - else: - return (self.revision.revision,) - - @property - def from_revisions_no_deps(self): - if self.is_upgrade: - return self.revision._versioned_down_revisions - else: - return (self.revision.revision,) - - @property - def to_revisions(self): - if self.is_upgrade: - return (self.revision.revision,) - else: - return self.revision._all_down_revisions - - @property - def to_revisions_no_deps(self): - if self.is_upgrade: - return (self.revision.revision,) - else: - return self.revision._versioned_down_revisions - - @property - def _has_scalar_down_revision(self): - return len(self.revision._all_down_revisions) == 1 - - def should_delete_branch(self, heads): - """A delete is when we are a. in a downgrade and b. - we are going to the "base" or we are going to a version that - is implied as a dependency on another version that is remaining. - - """ - if not self.is_downgrade: - return False - - if self.revision.revision not in heads: - return False - - downrevs = self.revision._all_down_revisions - - if not downrevs: - # is a base - return True - else: - # determine what the ultimate "to_revisions" for an - # unmerge would be. If there are none, then we're a delete. - to_revisions = self._unmerge_to_revisions(heads) - return not to_revisions - - def merge_branch_idents(self, heads): - other_heads = set(heads).difference(self.from_revisions) - - if other_heads: - ancestors = set( - r.revision - for r in self.revision_map._get_ancestor_nodes( - self.revision_map.get_revisions(other_heads), check=False - ) - ) - from_revisions = list( - set(self.from_revisions).difference(ancestors) - ) - else: - from_revisions = list(self.from_revisions) - - return ( - # delete revs, update from rev, update to rev - list(from_revisions[0:-1]), - from_revisions[-1], - self.to_revisions[0], - ) - - def _unmerge_to_revisions(self, heads): - other_heads = set(heads).difference([self.revision.revision]) - if other_heads: - ancestors = set( - r.revision - for r in self.revision_map._get_ancestor_nodes( - self.revision_map.get_revisions(other_heads), check=False - ) - ) - return list(set(self.to_revisions).difference(ancestors)) - else: - return self.to_revisions - - def unmerge_branch_idents(self, heads): - to_revisions = self._unmerge_to_revisions(heads) - - return ( - # update from rev, update to rev, insert revs - self.from_revisions[0], - to_revisions[-1], - to_revisions[0:-1], - ) - - def should_create_branch(self, heads): - if not self.is_upgrade: - return False - - downrevs = self.revision._all_down_revisions - - if not downrevs: - # is a base - return True - else: - # none of our downrevs are present, so... - # we have to insert our version. This is true whether - # or not there is only one downrev, or multiple (in the latter - # case, we're a merge point.) - if not heads.intersection(downrevs): - return True - else: - return False - - def should_merge_branches(self, heads): - if not self.is_upgrade: - return False - - downrevs = self.revision._all_down_revisions - - if len(downrevs) > 1 and len(heads.intersection(downrevs)) > 1: - return True - - return False - - def should_unmerge_branches(self, heads): - if not self.is_downgrade: - return False - - downrevs = self.revision._all_down_revisions - - if self.revision.revision in heads and len(downrevs) > 1: - return True - - return False - - def update_version_num(self, heads): - if not self._has_scalar_down_revision: - downrev = heads.intersection(self.revision._all_down_revisions) - assert ( - len(downrev) == 1 - ), "Can't do an UPDATE because downrevision is ambiguous" - down_revision = list(downrev)[0] - else: - down_revision = self.revision._all_down_revisions[0] - - if self.is_upgrade: - return down_revision, self.revision.revision - else: - return self.revision.revision, down_revision - - @property - def delete_version_num(self): - return self.revision.revision - - @property - def insert_version_num(self): - return self.revision.revision - - @property - def info(self): - return MigrationInfo( - revision_map=self.revision_map, - up_revisions=self.revision.revision, - down_revisions=self.revision._all_down_revisions, - is_upgrade=self.is_upgrade, - is_stamp=False, - ) - - -class StampStep(MigrationStep): - def __init__(self, from_, to_, is_upgrade, branch_move, revision_map=None): - self.from_ = util.to_tuple(from_, default=()) - self.to_ = util.to_tuple(to_, default=()) - self.is_upgrade = is_upgrade - self.branch_move = branch_move - self.migration_fn = self.stamp_revision - self.revision_map = revision_map - - doc = None - - def stamp_revision(self, **kw): - return None - - def __eq__(self, other): - return ( - isinstance(other, StampStep) - and other.from_revisions == self.revisions - and other.to_revisions == self.to_revisions - and other.branch_move == self.branch_move - and self.is_upgrade == other.is_upgrade - ) - - @property - def from_revisions(self): - return self.from_ - - @property - def to_revisions(self): - return self.to_ - - @property - def from_revisions_no_deps(self): - return self.from_ - - @property - def to_revisions_no_deps(self): - return self.to_ - - @property - def delete_version_num(self): - assert len(self.from_) == 1 - return self.from_[0] - - @property - def insert_version_num(self): - assert len(self.to_) == 1 - return self.to_[0] - - def update_version_num(self, heads): - assert len(self.from_) == 1 - assert len(self.to_) == 1 - return self.from_[0], self.to_[0] - - def merge_branch_idents(self, heads): - return ( - # delete revs, update from rev, update to rev - list(self.from_[0:-1]), - self.from_[-1], - self.to_[0], - ) - - def unmerge_branch_idents(self, heads): - return ( - # update from rev, update to rev, insert revs - self.from_[0], - self.to_[-1], - list(self.to_[0:-1]), - ) - - def should_delete_branch(self, heads): - # TODO: we probably need to look for self.to_ inside of heads, - # in a similar manner as should_create_branch, however we have - # no tests for this yet (stamp downgrades w/ branches) - return self.is_downgrade and self.branch_move - - def should_create_branch(self, heads): - return ( - self.is_upgrade - and (self.branch_move or set(self.from_).difference(heads)) - and set(self.to_).difference(heads) - ) - - def should_merge_branches(self, heads): - return len(self.from_) > 1 - - def should_unmerge_branches(self, heads): - return len(self.to_) > 1 - - @property - def info(self): - up, down = ( - (self.to_, self.from_) - if self.is_upgrade - else (self.from_, self.to_) - ) - return MigrationInfo( - revision_map=self.revision_map, - up_revisions=up, - down_revisions=down, - is_upgrade=self.is_upgrade, - is_stamp=True, - ) diff --git a/venv/lib/python3.7/site-packages/alembic/script/__init__.py b/venv/lib/python3.7/site-packages/alembic/script/__init__.py deleted file mode 100644 index 540d627..0000000 --- a/venv/lib/python3.7/site-packages/alembic/script/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .base import Script # noqa -from .base import ScriptDirectory # noqa - -__all__ = ["ScriptDirectory", "Script"] diff --git a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/script/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 2a86a45..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/base.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/script/__pycache__/base.cpython-37.pyc deleted file mode 100644 index 2399e1e..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/base.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/revision.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/script/__pycache__/revision.cpython-37.pyc deleted file mode 100644 index e9e63d7..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/revision.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/write_hooks.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/script/__pycache__/write_hooks.cpython-37.pyc deleted file mode 100644 index 385ba1c..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/script/__pycache__/write_hooks.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/script/base.py b/venv/lib/python3.7/site-packages/alembic/script/base.py deleted file mode 100644 index fea9e87..0000000 --- a/venv/lib/python3.7/site-packages/alembic/script/base.py +++ /dev/null @@ -1,924 +0,0 @@ -from contextlib import contextmanager -import datetime -import os -import re -import shutil - -from dateutil import tz - -from . import revision -from . import write_hooks -from .. import util -from ..runtime import migration -from ..util import compat - -_sourceless_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)(c|o)?$") -_only_source_rev_file = re.compile(r"(?!\.\#|__init__)(.*\.py)$") -_legacy_rev = re.compile(r"([a-f0-9]+)\.py$") -_mod_def_re = re.compile(r"(upgrade|downgrade)_([a-z0-9]+)") -_slug_re = re.compile(r"\w+") -_default_file_template = "%(rev)s_%(slug)s" -_split_on_space_comma = re.compile(r", *|(?: +)") - - -class ScriptDirectory(object): - - """Provides operations upon an Alembic script directory. - - This object is useful to get information as to current revisions, - most notably being able to get at the "head" revision, for schemes - that want to test if the current revision in the database is the most - recent:: - - from alembic.script import ScriptDirectory - from alembic.config import Config - config = Config() - config.set_main_option("script_location", "myapp:migrations") - script = ScriptDirectory.from_config(config) - - head_revision = script.get_current_head() - - - - """ - - def __init__( - self, - dir, # noqa - file_template=_default_file_template, - truncate_slug_length=40, - version_locations=None, - sourceless=False, - output_encoding="utf-8", - timezone=None, - hook_config=None, - ): - self.dir = dir - self.file_template = file_template - self.version_locations = version_locations - self.truncate_slug_length = truncate_slug_length or 40 - self.sourceless = sourceless - self.output_encoding = output_encoding - self.revision_map = revision.RevisionMap(self._load_revisions) - self.timezone = timezone - self.hook_config = hook_config - - if not os.access(dir, os.F_OK): - raise util.CommandError( - "Path doesn't exist: %r. Please use " - "the 'init' command to create a new " - "scripts folder." % os.path.abspath(dir) - ) - - @property - def versions(self): - loc = self._version_locations - if len(loc) > 1: - raise util.CommandError("Multiple version_locations present") - else: - return loc[0] - - @util.memoized_property - def _version_locations(self): - if self.version_locations: - return [ - os.path.abspath(util.coerce_resource_to_filename(location)) - for location in self.version_locations - ] - else: - return (os.path.abspath(os.path.join(self.dir, "versions")),) - - def _load_revisions(self): - if self.version_locations: - paths = [ - vers - for vers in self._version_locations - if os.path.exists(vers) - ] - else: - paths = [self.versions] - - dupes = set() - for vers in paths: - for file_ in Script._list_py_dir(self, vers): - path = os.path.realpath(os.path.join(vers, file_)) - if path in dupes: - util.warn( - "File %s loaded twice! ignoring. Please ensure " - "version_locations is unique." % path - ) - continue - dupes.add(path) - script = Script._from_filename(self, vers, file_) - if script is None: - continue - yield script - - @classmethod - def from_config(cls, config): - """Produce a new :class:`.ScriptDirectory` given a :class:`.Config` - instance. - - The :class:`.Config` need only have the ``script_location`` key - present. - - """ - script_location = config.get_main_option("script_location") - if script_location is None: - raise util.CommandError( - "No 'script_location' key " "found in configuration." - ) - truncate_slug_length = config.get_main_option("truncate_slug_length") - if truncate_slug_length is not None: - truncate_slug_length = int(truncate_slug_length) - - version_locations = config.get_main_option("version_locations") - if version_locations: - version_locations = _split_on_space_comma.split(version_locations) - - return ScriptDirectory( - util.coerce_resource_to_filename(script_location), - file_template=config.get_main_option( - "file_template", _default_file_template - ), - truncate_slug_length=truncate_slug_length, - sourceless=config.get_main_option("sourceless") == "true", - output_encoding=config.get_main_option("output_encoding", "utf-8"), - version_locations=version_locations, - timezone=config.get_main_option("timezone"), - hook_config=config.get_section("post_write_hooks", {}), - ) - - @contextmanager - def _catch_revision_errors( - self, - ancestor=None, - multiple_heads=None, - start=None, - end=None, - resolution=None, - ): - try: - yield - except revision.RangeNotAncestorError as rna: - if start is None: - start = rna.lower - if end is None: - end = rna.upper - if not ancestor: - ancestor = ( - "Requested range %(start)s:%(end)s does not refer to " - "ancestor/descendant revisions along the same branch" - ) - ancestor = ancestor % {"start": start, "end": end} - compat.raise_from_cause(util.CommandError(ancestor)) - except revision.MultipleHeads as mh: - if not multiple_heads: - multiple_heads = ( - "Multiple head revisions are present for given " - "argument '%(head_arg)s'; please " - "specify a specific target revision, " - "'@%(head_arg)s' to " - "narrow to a specific head, or 'heads' for all heads" - ) - multiple_heads = multiple_heads % { - "head_arg": end or mh.argument, - "heads": util.format_as_comma(mh.heads), - } - compat.raise_from_cause(util.CommandError(multiple_heads)) - except revision.ResolutionError as re: - if resolution is None: - resolution = "Can't locate revision identified by '%s'" % ( - re.argument - ) - compat.raise_from_cause(util.CommandError(resolution)) - except revision.RevisionError as err: - compat.raise_from_cause(util.CommandError(err.args[0])) - - def walk_revisions(self, base="base", head="heads"): - """Iterate through all revisions. - - :param base: the base revision, or "base" to start from the - empty revision. - - :param head: the head revision; defaults to "heads" to indicate - all head revisions. May also be "head" to indicate a single - head revision. - - .. versionchanged:: 0.7.0 the "head" identifier now refers to - the head of a non-branched repository only; use "heads" to - refer to the set of all head branches simultaneously. - - """ - with self._catch_revision_errors(start=base, end=head): - for rev in self.revision_map.iterate_revisions( - head, base, inclusive=True, assert_relative_length=False - ): - yield rev - - def get_revisions(self, id_): - """Return the :class:`.Script` instance with the given rev identifier, - symbolic name, or sequence of identifiers. - - .. versionadded:: 0.7.0 - - """ - with self._catch_revision_errors(): - return self.revision_map.get_revisions(id_) - - def get_all_current(self, id_): - with self._catch_revision_errors(): - top_revs = set(self.revision_map.get_revisions(id_)) - top_revs.update( - self.revision_map._get_ancestor_nodes( - list(top_revs), include_dependencies=True - ) - ) - top_revs = self.revision_map._filter_into_branch_heads(top_revs) - return top_revs - - def get_revision(self, id_): - """Return the :class:`.Script` instance with the given rev id. - - .. seealso:: - - :meth:`.ScriptDirectory.get_revisions` - - """ - - with self._catch_revision_errors(): - return self.revision_map.get_revision(id_) - - def as_revision_number(self, id_): - """Convert a symbolic revision, i.e. 'head' or 'base', into - an actual revision number.""" - - with self._catch_revision_errors(): - rev, branch_name = self.revision_map._resolve_revision_number(id_) - - if not rev: - # convert () to None - return None - elif id_ == "heads": - return rev - else: - return rev[0] - - def iterate_revisions(self, upper, lower): - """Iterate through script revisions, starting at the given - upper revision identifier and ending at the lower. - - The traversal uses strictly the `down_revision` - marker inside each migration script, so - it is a requirement that upper >= lower, - else you'll get nothing back. - - The iterator yields :class:`.Script` objects. - - .. seealso:: - - :meth:`.RevisionMap.iterate_revisions` - - """ - return self.revision_map.iterate_revisions(upper, lower) - - def get_current_head(self): - """Return the current head revision. - - If the script directory has multiple heads - due to branching, an error is raised; - :meth:`.ScriptDirectory.get_heads` should be - preferred. - - :return: a string revision number. - - .. seealso:: - - :meth:`.ScriptDirectory.get_heads` - - """ - with self._catch_revision_errors( - multiple_heads=( - "The script directory has multiple heads (due to branching)." - "Please use get_heads(), or merge the branches using " - "alembic merge." - ) - ): - return self.revision_map.get_current_head() - - def get_heads(self): - """Return all "versioned head" revisions as strings. - - This is normally a list of length one, - unless branches are present. The - :meth:`.ScriptDirectory.get_current_head()` method - can be used normally when a script directory - has only one head. - - :return: a tuple of string revision numbers. - """ - return list(self.revision_map.heads) - - def get_base(self): - """Return the "base" revision as a string. - - This is the revision number of the script that - has a ``down_revision`` of None. - - If the script directory has multiple bases, an error is raised; - :meth:`.ScriptDirectory.get_bases` should be - preferred. - - """ - bases = self.get_bases() - if len(bases) > 1: - raise util.CommandError( - "The script directory has multiple bases. " - "Please use get_bases()." - ) - elif bases: - return bases[0] - else: - return None - - def get_bases(self): - """return all "base" revisions as strings. - - This is the revision number of all scripts that - have a ``down_revision`` of None. - - .. versionadded:: 0.7.0 - - """ - return list(self.revision_map.bases) - - def _upgrade_revs(self, destination, current_rev): - with self._catch_revision_errors( - ancestor="Destination %(end)s is not a valid upgrade " - "target from current head(s)", - end=destination, - ): - revs = self.revision_map.iterate_revisions( - destination, current_rev, implicit_base=True - ) - revs = list(revs) - return [ - migration.MigrationStep.upgrade_from_script( - self.revision_map, script - ) - for script in reversed(list(revs)) - ] - - def _downgrade_revs(self, destination, current_rev): - with self._catch_revision_errors( - ancestor="Destination %(end)s is not a valid downgrade " - "target from current head(s)", - end=destination, - ): - revs = self.revision_map.iterate_revisions( - current_rev, destination, select_for_downgrade=True - ) - return [ - migration.MigrationStep.downgrade_from_script( - self.revision_map, script - ) - for script in revs - ] - - def _stamp_revs(self, revision, heads): - with self._catch_revision_errors( - multiple_heads="Multiple heads are present; please specify a " - "single target revision" - ): - - heads = self.get_revisions(heads) - - steps = [] - - if not revision: - revision = "base" - - filtered_heads = [] - for rev in util.to_tuple(revision): - if rev: - filtered_heads.extend( - self.revision_map.filter_for_lineage( - heads, rev, include_dependencies=True - ) - ) - filtered_heads = util.unique_list(filtered_heads) - - dests = self.get_revisions(revision) or [None] - - for dest in dests: - - if dest is None: - # dest is 'base'. Return a "delete branch" migration - # for all applicable heads. - steps.extend( - [ - migration.StampStep( - head.revision, - None, - False, - True, - self.revision_map, - ) - for head in filtered_heads - ] - ) - continue - elif dest in filtered_heads: - # the dest is already in the version table, do nothing. - continue - - # figure out if the dest is a descendant or an - # ancestor of the selected nodes - descendants = set( - self.revision_map._get_descendant_nodes([dest]) - ) - ancestors = set(self.revision_map._get_ancestor_nodes([dest])) - - if descendants.intersection(filtered_heads): - # heads are above the target, so this is a downgrade. - # we can treat them as a "merge", single step. - assert not ancestors.intersection(filtered_heads) - todo_heads = [head.revision for head in filtered_heads] - step = migration.StampStep( - todo_heads, - dest.revision, - False, - False, - self.revision_map, - ) - steps.append(step) - continue - elif ancestors.intersection(filtered_heads): - # heads are below the target, so this is an upgrade. - # we can treat them as a "merge", single step. - todo_heads = [head.revision for head in filtered_heads] - step = migration.StampStep( - todo_heads, - dest.revision, - True, - False, - self.revision_map, - ) - steps.append(step) - continue - else: - # destination is in a branch not represented, - # treat it as new branch - step = migration.StampStep( - (), dest.revision, True, True, self.revision_map - ) - steps.append(step) - continue - - return steps - - def run_env(self): - """Run the script environment. - - This basically runs the ``env.py`` script present - in the migration environment. It is called exclusively - by the command functions in :mod:`alembic.command`. - - - """ - util.load_python_file(self.dir, "env.py") - - @property - def env_py_location(self): - return os.path.abspath(os.path.join(self.dir, "env.py")) - - def _generate_template(self, src, dest, **kw): - util.status( - "Generating %s" % os.path.abspath(dest), - util.template_to_file, - src, - dest, - self.output_encoding, - **kw - ) - - def _copy_file(self, src, dest): - util.status( - "Generating %s" % os.path.abspath(dest), shutil.copy, src, dest - ) - - def _ensure_directory(self, path): - path = os.path.abspath(path) - if not os.path.exists(path): - util.status("Creating directory %s" % path, os.makedirs, path) - - def _generate_create_date(self): - if self.timezone is not None: - # First, assume correct capitalization - tzinfo = tz.gettz(self.timezone) - if tzinfo is None: - # Fall back to uppercase - tzinfo = tz.gettz(self.timezone.upper()) - if tzinfo is None: - raise util.CommandError( - "Can't locate timezone: %s" % self.timezone - ) - create_date = ( - datetime.datetime.utcnow() - .replace(tzinfo=tz.tzutc()) - .astimezone(tzinfo) - ) - else: - create_date = datetime.datetime.now() - return create_date - - def generate_revision( - self, - revid, - message, - head=None, - refresh=False, - splice=False, - branch_labels=None, - version_path=None, - depends_on=None, - **kw - ): - """Generate a new revision file. - - This runs the ``script.py.mako`` template, given - template arguments, and creates a new file. - - :param revid: String revision id. Typically this - comes from ``alembic.util.rev_id()``. - :param message: the revision message, the one passed - by the -m argument to the ``revision`` command. - :param head: the head revision to generate against. Defaults - to the current "head" if no branches are present, else raises - an exception. - - .. versionadded:: 0.7.0 - - :param splice: if True, allow the "head" version to not be an - actual head; otherwise, the selected head must be a head - (e.g. endpoint) revision. - :param refresh: deprecated. - - """ - if head is None: - head = "head" - - try: - Script.verify_rev_id(revid) - except revision.RevisionError as err: - compat.raise_from_cause(util.CommandError(err.args[0])) - - with self._catch_revision_errors( - multiple_heads=( - "Multiple heads are present; please specify the head " - "revision on which the new revision should be based, " - "or perform a merge." - ) - ): - heads = self.revision_map.get_revisions(head) - - if len(set(heads)) != len(heads): - raise util.CommandError("Duplicate head revisions specified") - - create_date = self._generate_create_date() - - if version_path is None: - if len(self._version_locations) > 1: - for head in heads: - if head is not None: - version_path = os.path.dirname(head.path) - break - else: - raise util.CommandError( - "Multiple version locations present, " - "please specify --version-path" - ) - else: - version_path = self.versions - - norm_path = os.path.normpath(os.path.abspath(version_path)) - for vers_path in self._version_locations: - if os.path.normpath(vers_path) == norm_path: - break - else: - raise util.CommandError( - "Path %s is not represented in current " - "version locations" % version_path - ) - - if self.version_locations: - self._ensure_directory(version_path) - - path = self._rev_path(version_path, revid, message, create_date) - - if not splice: - for head in heads: - if head is not None and not head.is_head: - raise util.CommandError( - "Revision %s is not a head revision; please specify " - "--splice to create a new branch from this revision" - % head.revision - ) - - if depends_on: - with self._catch_revision_errors(): - depends_on = [ - dep - if dep in rev.branch_labels # maintain branch labels - else rev.revision # resolve partial revision identifiers - for rev, dep in [ - (self.revision_map.get_revision(dep), dep) - for dep in util.to_list(depends_on) - ] - ] - - self._generate_template( - os.path.join(self.dir, "script.py.mako"), - path, - up_revision=str(revid), - down_revision=revision.tuple_rev_as_scalar( - tuple(h.revision if h is not None else None for h in heads) - ), - branch_labels=util.to_tuple(branch_labels), - depends_on=revision.tuple_rev_as_scalar(depends_on), - create_date=create_date, - comma=util.format_as_comma, - message=message if message is not None else ("empty message"), - **kw - ) - - post_write_hooks = self.hook_config - if post_write_hooks: - write_hooks._run_hooks(path, post_write_hooks) - - try: - script = Script._from_path(self, path) - except revision.RevisionError as err: - compat.raise_from_cause(util.CommandError(err.args[0])) - if branch_labels and not script.branch_labels: - raise util.CommandError( - "Version %s specified branch_labels %s, however the " - "migration file %s does not have them; have you upgraded " - "your script.py.mako to include the " - "'branch_labels' section?" - % (script.revision, branch_labels, script.path) - ) - - self.revision_map.add_revision(script) - return script - - def _rev_path(self, path, rev_id, message, create_date): - slug = "_".join(_slug_re.findall(message or "")).lower() - if len(slug) > self.truncate_slug_length: - slug = slug[: self.truncate_slug_length].rsplit("_", 1)[0] + "_" - filename = "%s.py" % ( - self.file_template - % { - "rev": rev_id, - "slug": slug, - "year": create_date.year, - "month": create_date.month, - "day": create_date.day, - "hour": create_date.hour, - "minute": create_date.minute, - "second": create_date.second, - } - ) - return os.path.join(path, filename) - - -class Script(revision.Revision): - - """Represent a single revision file in a ``versions/`` directory. - - The :class:`.Script` instance is returned by methods - such as :meth:`.ScriptDirectory.iterate_revisions`. - - """ - - def __init__(self, module, rev_id, path): - self.module = module - self.path = path - super(Script, self).__init__( - rev_id, - module.down_revision, - branch_labels=util.to_tuple( - getattr(module, "branch_labels", None), default=() - ), - dependencies=util.to_tuple( - getattr(module, "depends_on", None), default=() - ), - ) - - module = None - """The Python module representing the actual script itself.""" - - path = None - """Filesystem path of the script.""" - - _db_current_indicator = None - """Utility variable which when set will cause string output to indicate - this is a "current" version in some database""" - - @property - def doc(self): - """Return the docstring given in the script.""" - - return re.split("\n\n", self.longdoc)[0] - - @property - def longdoc(self): - """Return the docstring given in the script.""" - - doc = self.module.__doc__ - if doc: - if hasattr(self.module, "_alembic_source_encoding"): - doc = doc.decode(self.module._alembic_source_encoding) - return doc.strip() - else: - return "" - - @property - def log_entry(self): - entry = "Rev: %s%s%s%s%s\n" % ( - self.revision, - " (head)" if self.is_head else "", - " (branchpoint)" if self.is_branch_point else "", - " (mergepoint)" if self.is_merge_point else "", - " (current)" if self._db_current_indicator else "", - ) - if self.is_merge_point: - entry += "Merges: %s\n" % (self._format_down_revision(),) - else: - entry += "Parent: %s\n" % (self._format_down_revision(),) - - if self.dependencies: - entry += "Also depends on: %s\n" % ( - util.format_as_comma(self.dependencies) - ) - - if self.is_branch_point: - entry += "Branches into: %s\n" % ( - util.format_as_comma(self.nextrev) - ) - - if self.branch_labels: - entry += "Branch names: %s\n" % ( - util.format_as_comma(self.branch_labels), - ) - - entry += "Path: %s\n" % (self.path,) - - entry += "\n%s\n" % ( - "\n".join(" %s" % para for para in self.longdoc.splitlines()) - ) - return entry - - def __str__(self): - return "%s -> %s%s%s%s, %s" % ( - self._format_down_revision(), - self.revision, - " (head)" if self.is_head else "", - " (branchpoint)" if self.is_branch_point else "", - " (mergepoint)" if self.is_merge_point else "", - self.doc, - ) - - def _head_only( - self, - include_branches=False, - include_doc=False, - include_parents=False, - tree_indicators=True, - head_indicators=True, - ): - text = self.revision - if include_parents: - if self.dependencies: - text = "%s (%s) -> %s" % ( - self._format_down_revision(), - util.format_as_comma(self.dependencies), - text, - ) - else: - text = "%s -> %s" % (self._format_down_revision(), text) - if include_branches and self.branch_labels: - text += " (%s)" % util.format_as_comma(self.branch_labels) - if head_indicators or tree_indicators: - text += "%s%s%s" % ( - " (head)" if self._is_real_head else "", - " (effective head)" - if self.is_head and not self._is_real_head - else "", - " (current)" if self._db_current_indicator else "", - ) - if tree_indicators: - text += "%s%s" % ( - " (branchpoint)" if self.is_branch_point else "", - " (mergepoint)" if self.is_merge_point else "", - ) - if include_doc: - text += ", %s" % self.doc - return text - - def cmd_format( - self, - verbose, - include_branches=False, - include_doc=False, - include_parents=False, - tree_indicators=True, - ): - if verbose: - return self.log_entry - else: - return self._head_only( - include_branches, include_doc, include_parents, tree_indicators - ) - - def _format_down_revision(self): - if not self.down_revision: - return "" - else: - return util.format_as_comma(self._versioned_down_revisions) - - @classmethod - def _from_path(cls, scriptdir, path): - dir_, filename = os.path.split(path) - return cls._from_filename(scriptdir, dir_, filename) - - @classmethod - def _list_py_dir(cls, scriptdir, path): - if scriptdir.sourceless: - # read files in version path, e.g. pyc or pyo files - # in the immediate path - paths = os.listdir(path) - - names = set(fname.split(".")[0] for fname in paths) - - # look for __pycache__ - if os.path.exists(os.path.join(path, "__pycache__")): - # add all files from __pycache__ whose filename is not - # already in the names we got from the version directory. - # add as relative paths including __pycache__ token - paths.extend( - os.path.join("__pycache__", pyc) - for pyc in os.listdir(os.path.join(path, "__pycache__")) - if pyc.split(".")[0] not in names - ) - return paths - else: - return os.listdir(path) - - @classmethod - def _from_filename(cls, scriptdir, dir_, filename): - if scriptdir.sourceless: - py_match = _sourceless_rev_file.match(filename) - else: - py_match = _only_source_rev_file.match(filename) - - if not py_match: - return None - - py_filename = py_match.group(1) - - if scriptdir.sourceless: - is_c = py_match.group(2) == "c" - is_o = py_match.group(2) == "o" - else: - is_c = is_o = False - - if is_o or is_c: - py_exists = os.path.exists(os.path.join(dir_, py_filename)) - pyc_exists = os.path.exists(os.path.join(dir_, py_filename + "c")) - - # prefer .py over .pyc because we'd like to get the - # source encoding; prefer .pyc over .pyo because we'd like to - # have the docstrings which a -OO file would not have - if py_exists or is_o and pyc_exists: - return None - - module = util.load_python_file(dir_, filename) - - if not hasattr(module, "revision"): - # attempt to get the revision id from the script name, - # this for legacy only - m = _legacy_rev.match(filename) - if not m: - raise util.CommandError( - "Could not determine revision id from filename %s. " - "Be sure the 'revision' variable is " - "declared inside the script (please see 'Upgrading " - "from Alembic 0.1 to 0.2' in the documentation)." - % filename - ) - else: - revision = m.group(1) - else: - revision = module.revision - return Script(module, revision, os.path.join(dir_, filename)) diff --git a/venv/lib/python3.7/site-packages/alembic/script/revision.py b/venv/lib/python3.7/site-packages/alembic/script/revision.py deleted file mode 100644 index 22481a0..0000000 --- a/venv/lib/python3.7/site-packages/alembic/script/revision.py +++ /dev/null @@ -1,1063 +0,0 @@ -import collections -import re - -from sqlalchemy import util as sqlautil - -from .. import util -from ..util import compat - -_relative_destination = re.compile(r"(?:(.+?)@)?(\w+)?((?:\+|-)\d+)") -_revision_illegal_chars = ["@", "-", "+"] - - -class RevisionError(Exception): - pass - - -class RangeNotAncestorError(RevisionError): - def __init__(self, lower, upper): - self.lower = lower - self.upper = upper - super(RangeNotAncestorError, self).__init__( - "Revision %s is not an ancestor of revision %s" - % (lower or "base", upper or "base") - ) - - -class MultipleHeads(RevisionError): - def __init__(self, heads, argument): - self.heads = heads - self.argument = argument - super(MultipleHeads, self).__init__( - "Multiple heads are present for given argument '%s'; " - "%s" % (argument, ", ".join(heads)) - ) - - -class ResolutionError(RevisionError): - def __init__(self, message, argument): - super(ResolutionError, self).__init__(message) - self.argument = argument - - -class RevisionMap(object): - """Maintains a map of :class:`.Revision` objects. - - :class:`.RevisionMap` is used by :class:`.ScriptDirectory` to maintain - and traverse the collection of :class:`.Script` objects, which are - themselves instances of :class:`.Revision`. - - """ - - def __init__(self, generator): - """Construct a new :class:`.RevisionMap`. - - :param generator: a zero-arg callable that will generate an iterable - of :class:`.Revision` instances to be used. These are typically - :class:`.Script` subclasses within regular Alembic use. - - """ - self._generator = generator - - @util.memoized_property - def heads(self): - """All "head" revisions as strings. - - This is normally a tuple of length one, - unless unmerged branches are present. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self.heads - - @util.memoized_property - def bases(self): - """All "base" revisions as strings. - - These are revisions that have a ``down_revision`` of None, - or empty tuple. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self.bases - - @util.memoized_property - def _real_heads(self): - """All "real" head revisions as strings. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self._real_heads - - @util.memoized_property - def _real_bases(self): - """All "real" base revisions as strings. - - :return: a tuple of string revision numbers. - - """ - self._revision_map - return self._real_bases - - @util.memoized_property - def _revision_map(self): - """memoized attribute, initializes the revision map from the - initial collection. - - """ - map_ = {} - - heads = sqlautil.OrderedSet() - _real_heads = sqlautil.OrderedSet() - self.bases = () - self._real_bases = () - - has_branch_labels = set() - has_depends_on = set() - for revision in self._generator(): - - if revision.revision in map_: - util.warn( - "Revision %s is present more than once" % revision.revision - ) - map_[revision.revision] = revision - if revision.branch_labels: - has_branch_labels.add(revision) - if revision.dependencies: - has_depends_on.add(revision) - heads.add(revision.revision) - _real_heads.add(revision.revision) - if revision.is_base: - self.bases += (revision.revision,) - if revision._is_real_base: - self._real_bases += (revision.revision,) - - # add the branch_labels to the map_. We'll need these - # to resolve the dependencies. - for revision in has_branch_labels: - self._map_branch_labels(revision, map_) - - for revision in has_depends_on: - self._add_depends_on(revision, map_) - - for rev in map_.values(): - for downrev in rev._all_down_revisions: - if downrev not in map_: - util.warn( - "Revision %s referenced from %s is not present" - % (downrev, rev) - ) - down_revision = map_[downrev] - down_revision.add_nextrev(rev) - if downrev in rev._versioned_down_revisions: - heads.discard(downrev) - _real_heads.discard(downrev) - - map_[None] = map_[()] = None - self.heads = tuple(heads) - self._real_heads = tuple(_real_heads) - - for revision in has_branch_labels: - self._add_branches(revision, map_, map_branch_labels=False) - return map_ - - def _map_branch_labels(self, revision, map_): - if revision.branch_labels: - for branch_label in revision._orig_branch_labels: - if branch_label in map_: - raise RevisionError( - "Branch name '%s' in revision %s already " - "used by revision %s" - % ( - branch_label, - revision.revision, - map_[branch_label].revision, - ) - ) - map_[branch_label] = revision - - def _add_branches(self, revision, map_, map_branch_labels=True): - if map_branch_labels: - self._map_branch_labels(revision, map_) - - if revision.branch_labels: - revision.branch_labels.update(revision.branch_labels) - for node in self._get_descendant_nodes( - [revision], map_, include_dependencies=False - ): - node.branch_labels.update(revision.branch_labels) - - parent = node - while ( - parent - and not parent._is_real_branch_point - and not parent.is_merge_point - ): - - parent.branch_labels.update(revision.branch_labels) - if parent.down_revision: - parent = map_[parent.down_revision] - else: - break - - def _add_depends_on(self, revision, map_): - if revision.dependencies: - deps = [map_[dep] for dep in util.to_tuple(revision.dependencies)] - revision._resolved_dependencies = tuple([d.revision for d in deps]) - - def add_revision(self, revision, _replace=False): - """add a single revision to an existing map. - - This method is for single-revision use cases, it's not - appropriate for fully populating an entire revision map. - - """ - map_ = self._revision_map - if not _replace and revision.revision in map_: - util.warn( - "Revision %s is present more than once" % revision.revision - ) - elif _replace and revision.revision not in map_: - raise Exception("revision %s not in map" % revision.revision) - - map_[revision.revision] = revision - self._add_branches(revision, map_) - self._add_depends_on(revision, map_) - - if revision.is_base: - self.bases += (revision.revision,) - if revision._is_real_base: - self._real_bases += (revision.revision,) - for downrev in revision._all_down_revisions: - if downrev not in map_: - util.warn( - "Revision %s referenced from %s is not present" - % (downrev, revision) - ) - map_[downrev].add_nextrev(revision) - if revision._is_real_head: - self._real_heads = tuple( - head - for head in self._real_heads - if head - not in set(revision._all_down_revisions).union( - [revision.revision] - ) - ) + (revision.revision,) - if revision.is_head: - self.heads = tuple( - head - for head in self.heads - if head - not in set(revision._versioned_down_revisions).union( - [revision.revision] - ) - ) + (revision.revision,) - - def get_current_head(self, branch_label=None): - """Return the current head revision. - - If the script directory has multiple heads - due to branching, an error is raised; - :meth:`.ScriptDirectory.get_heads` should be - preferred. - - :param branch_label: optional branch name which will limit the - heads considered to those which include that branch_label. - - :return: a string revision number. - - .. seealso:: - - :meth:`.ScriptDirectory.get_heads` - - """ - current_heads = self.heads - if branch_label: - current_heads = self.filter_for_lineage( - current_heads, branch_label - ) - if len(current_heads) > 1: - raise MultipleHeads( - current_heads, - "%s@head" % branch_label if branch_label else "head", - ) - - if current_heads: - return current_heads[0] - else: - return None - - def _get_base_revisions(self, identifier): - return self.filter_for_lineage(self.bases, identifier) - - def get_revisions(self, id_): - """Return the :class:`.Revision` instances with the given rev id - or identifiers. - - May be given a single identifier, a sequence of identifiers, or the - special symbols "head" or "base". The result is a tuple of one - or more identifiers, or an empty tuple in the case of "base". - - In the cases where 'head', 'heads' is requested and the - revision map is empty, returns an empty tuple. - - Supports partial identifiers, where the given identifier - is matched against all identifiers that start with the given - characters; if there is exactly one match, that determines the - full revision. - - """ - - if isinstance(id_, (list, tuple, set, frozenset)): - return sum([self.get_revisions(id_elem) for id_elem in id_], ()) - else: - resolved_id, branch_label = self._resolve_revision_number(id_) - return tuple( - self._revision_for_ident(rev_id, branch_label) - for rev_id in resolved_id - ) - - def get_revision(self, id_): - """Return the :class:`.Revision` instance with the given rev id. - - If a symbolic name such as "head" or "base" is given, resolves - the identifier into the current head or base revision. If the symbolic - name refers to multiples, :class:`.MultipleHeads` is raised. - - Supports partial identifiers, where the given identifier - is matched against all identifiers that start with the given - characters; if there is exactly one match, that determines the - full revision. - - """ - - resolved_id, branch_label = self._resolve_revision_number(id_) - if len(resolved_id) > 1: - raise MultipleHeads(resolved_id, id_) - elif resolved_id: - resolved_id = resolved_id[0] - - return self._revision_for_ident(resolved_id, branch_label) - - def _resolve_branch(self, branch_label): - try: - branch_rev = self._revision_map[branch_label] - except KeyError: - try: - nonbranch_rev = self._revision_for_ident(branch_label) - except ResolutionError: - raise ResolutionError( - "No such branch: '%s'" % branch_label, branch_label - ) - else: - return nonbranch_rev - else: - return branch_rev - - def _revision_for_ident(self, resolved_id, check_branch=None): - if check_branch: - branch_rev = self._resolve_branch(check_branch) - else: - branch_rev = None - - try: - revision = self._revision_map[resolved_id] - except KeyError: - # break out to avoid misleading py3k stack traces - revision = False - if revision is False: - # do a partial lookup - revs = [ - x - for x in self._revision_map - if x and len(x) > 3 and x.startswith(resolved_id) - ] - - if branch_rev: - revs = self.filter_for_lineage(revs, check_branch) - if not revs: - raise ResolutionError( - "No such revision or branch '%s'%s" - % ( - resolved_id, - ( - "; please ensure at least four characters are " - "present for partial revision identifier matches" - if len(resolved_id) < 4 - else "" - ), - ), - resolved_id, - ) - elif len(revs) > 1: - raise ResolutionError( - "Multiple revisions start " - "with '%s': %s..." - % (resolved_id, ", ".join("'%s'" % r for r in revs[0:3])), - resolved_id, - ) - else: - revision = self._revision_map[revs[0]] - - if check_branch and revision is not None: - if not self._shares_lineage( - revision.revision, branch_rev.revision - ): - raise ResolutionError( - "Revision %s is not a member of branch '%s'" - % (revision.revision, check_branch), - resolved_id, - ) - return revision - - def _filter_into_branch_heads(self, targets): - targets = set(targets) - - for rev in list(targets): - if targets.intersection( - self._get_descendant_nodes([rev], include_dependencies=False) - ).difference([rev]): - targets.discard(rev) - return targets - - def filter_for_lineage( - self, targets, check_against, include_dependencies=False - ): - id_, branch_label = self._resolve_revision_number(check_against) - - shares = [] - if branch_label: - shares.append(branch_label) - if id_: - shares.extend(id_) - - return [ - tg - for tg in targets - if self._shares_lineage( - tg, shares, include_dependencies=include_dependencies - ) - ] - - def _shares_lineage( - self, target, test_against_revs, include_dependencies=False - ): - if not test_against_revs: - return True - if not isinstance(target, Revision): - target = self._revision_for_ident(target) - - test_against_revs = [ - self._revision_for_ident(test_against_rev) - if not isinstance(test_against_rev, Revision) - else test_against_rev - for test_against_rev in util.to_tuple( - test_against_revs, default=() - ) - ] - - return bool( - set( - self._get_descendant_nodes( - [target], include_dependencies=include_dependencies - ) - ) - .union( - self._get_ancestor_nodes( - [target], include_dependencies=include_dependencies - ) - ) - .intersection(test_against_revs) - ) - - def _resolve_revision_number(self, id_): - if isinstance(id_, compat.string_types) and "@" in id_: - branch_label, id_ = id_.split("@", 1) - - elif id_ is not None and ( - ( - isinstance(id_, tuple) - and id_ - and not isinstance(id_[0], compat.string_types) - ) - or not isinstance(id_, compat.string_types + (tuple,)) - ): - raise RevisionError( - "revision identifier %r is not a string; ensure database " - "driver settings are correct" % (id_,) - ) - - else: - branch_label = None - - # ensure map is loaded - self._revision_map - if id_ == "heads": - if branch_label: - return ( - self.filter_for_lineage(self.heads, branch_label), - branch_label, - ) - else: - return self._real_heads, branch_label - elif id_ == "head": - current_head = self.get_current_head(branch_label) - if current_head: - return (current_head,), branch_label - else: - return (), branch_label - elif id_ == "base" or id_ is None: - return (), branch_label - else: - return util.to_tuple(id_, default=None), branch_label - - def _relative_iterate( - self, - destination, - source, - is_upwards, - implicit_base, - inclusive, - assert_relative_length, - ): - if isinstance(destination, compat.string_types): - match = _relative_destination.match(destination) - if not match: - return None - else: - return None - - relative = int(match.group(3)) - symbol = match.group(2) - branch_label = match.group(1) - - reldelta = 1 if inclusive and not symbol else 0 - - if is_upwards: - if branch_label: - from_ = "%s@head" % branch_label - elif symbol: - if symbol.startswith("head"): - from_ = symbol - else: - from_ = "%s@head" % symbol - else: - from_ = "head" - to_ = source - else: - if branch_label: - to_ = "%s@base" % branch_label - elif symbol: - to_ = "%s@base" % symbol - else: - to_ = "base" - from_ = source - - revs = list( - self._iterate_revisions( - from_, to_, inclusive=inclusive, implicit_base=implicit_base - ) - ) - - if symbol: - if branch_label: - symbol_rev = self.get_revision( - "%s@%s" % (branch_label, symbol) - ) - else: - symbol_rev = self.get_revision(symbol) - if symbol.startswith("head"): - index = 0 - elif symbol == "base": - index = len(revs) - 1 - else: - range_ = compat.range(len(revs) - 1, 0, -1) - for index in range_: - if symbol_rev.revision == revs[index].revision: - break - else: - index = 0 - else: - index = 0 - if is_upwards: - revs = revs[index - relative - reldelta :] - if ( - not index - and assert_relative_length - and len(revs) < abs(relative - reldelta) - ): - raise RevisionError( - "Relative revision %s didn't " - "produce %d migrations" % (destination, abs(relative)) - ) - else: - revs = revs[0 : index - relative + reldelta] - if ( - not index - and assert_relative_length - and len(revs) != abs(relative) + reldelta - ): - raise RevisionError( - "Relative revision %s didn't " - "produce %d migrations" % (destination, abs(relative)) - ) - - return iter(revs) - - def iterate_revisions( - self, - upper, - lower, - implicit_base=False, - inclusive=False, - assert_relative_length=True, - select_for_downgrade=False, - ): - """Iterate through script revisions, starting at the given - upper revision identifier and ending at the lower. - - The traversal uses strictly the `down_revision` - marker inside each migration script, so - it is a requirement that upper >= lower, - else you'll get nothing back. - - The iterator yields :class:`.Revision` objects. - - """ - - relative_upper = self._relative_iterate( - upper, - lower, - True, - implicit_base, - inclusive, - assert_relative_length, - ) - if relative_upper: - return relative_upper - - relative_lower = self._relative_iterate( - lower, - upper, - False, - implicit_base, - inclusive, - assert_relative_length, - ) - if relative_lower: - return relative_lower - - return self._iterate_revisions( - upper, - lower, - inclusive=inclusive, - implicit_base=implicit_base, - select_for_downgrade=select_for_downgrade, - ) - - def _get_descendant_nodes( - self, - targets, - map_=None, - check=False, - omit_immediate_dependencies=False, - include_dependencies=True, - ): - - if omit_immediate_dependencies: - - def fn(rev): - if rev not in targets: - return rev._all_nextrev - else: - return rev.nextrev - - elif include_dependencies: - - def fn(rev): - return rev._all_nextrev - - else: - - def fn(rev): - return rev.nextrev - - return self._iterate_related_revisions( - fn, targets, map_=map_, check=check - ) - - def _get_ancestor_nodes( - self, targets, map_=None, check=False, include_dependencies=True - ): - - if include_dependencies: - - def fn(rev): - return rev._all_down_revisions - - else: - - def fn(rev): - return rev._versioned_down_revisions - - return self._iterate_related_revisions( - fn, targets, map_=map_, check=check - ) - - def _iterate_related_revisions(self, fn, targets, map_, check=False): - if map_ is None: - map_ = self._revision_map - - seen = set() - todo = collections.deque() - for target in targets: - - todo.append(target) - if check: - per_target = set() - - while todo: - rev = todo.pop() - if check: - per_target.add(rev) - - if rev in seen: - continue - seen.add(rev) - todo.extend(map_[rev_id] for rev_id in fn(rev)) - yield rev - if check: - overlaps = per_target.intersection(targets).difference( - [target] - ) - if overlaps: - raise RevisionError( - "Requested revision %s overlaps with " - "other requested revisions %s" - % ( - target.revision, - ", ".join(r.revision for r in overlaps), - ) - ) - - def _iterate_revisions( - self, - upper, - lower, - inclusive=True, - implicit_base=False, - select_for_downgrade=False, - ): - """iterate revisions from upper to lower. - - The traversal is depth-first within branches, and breadth-first - across branches as a whole. - - """ - - requested_lowers = self.get_revisions(lower) - - # some complexity to accommodate an iteration where some - # branches are starting from nothing, and others are starting - # from a given point. Additionally, if the bottom branch - # is specified using a branch identifier, then we limit operations - # to just that branch. - - limit_to_lower_branch = isinstance( - lower, compat.string_types - ) and lower.endswith("@base") - - uppers = util.dedupe_tuple(self.get_revisions(upper)) - - if not uppers and not requested_lowers: - return - - upper_ancestors = set(self._get_ancestor_nodes(uppers, check=True)) - - if limit_to_lower_branch: - lowers = self.get_revisions(self._get_base_revisions(lower)) - elif implicit_base and requested_lowers: - lower_ancestors = set(self._get_ancestor_nodes(requested_lowers)) - lower_descendants = set( - self._get_descendant_nodes(requested_lowers) - ) - base_lowers = set() - candidate_lowers = upper_ancestors.difference( - lower_ancestors - ).difference(lower_descendants) - for rev in candidate_lowers: - for downrev in rev._all_down_revisions: - if self._revision_map[downrev] in candidate_lowers: - break - else: - base_lowers.add(rev) - lowers = base_lowers.union(requested_lowers) - elif implicit_base: - base_lowers = set(self.get_revisions(self._real_bases)) - lowers = base_lowers.union(requested_lowers) - elif not requested_lowers: - lowers = set(self.get_revisions(self._real_bases)) - else: - lowers = requested_lowers - - # represents all nodes we will produce - total_space = set( - rev.revision for rev in upper_ancestors - ).intersection( - rev.revision - for rev in self._get_descendant_nodes( - lowers, - check=True, - omit_immediate_dependencies=( - select_for_downgrade and requested_lowers - ), - ) - ) - - if not total_space: - # no nodes. determine if this is an invalid range - # or not. - start_from = set(requested_lowers) - start_from.update( - self._get_ancestor_nodes( - list(start_from), include_dependencies=True - ) - ) - - # determine all the current branch points represented - # by requested_lowers - start_from = self._filter_into_branch_heads(start_from) - - # if the requested start is one of those branch points, - # then just return empty set - if start_from.intersection(upper_ancestors): - return - else: - # otherwise, they requested nodes out of - # order - raise RangeNotAncestorError(lower, upper) - - # organize branch points to be consumed separately from - # member nodes - branch_todo = set( - rev - for rev in (self._revision_map[rev] for rev in total_space) - if rev._is_real_branch_point - and len(total_space.intersection(rev._all_nextrev)) > 1 - ) - - # it's not possible for any "uppers" to be in branch_todo, - # because the ._all_nextrev of those nodes is not in total_space - # assert not branch_todo.intersection(uppers) - - todo = collections.deque( - r for r in uppers if r.revision in total_space - ) - - # iterate for total_space being emptied out - total_space_modified = True - while total_space: - - if not total_space_modified: - raise RevisionError( - "Dependency resolution failed; iteration can't proceed" - ) - total_space_modified = False - # when everything non-branch pending is consumed, - # add to the todo any branch nodes that have no - # descendants left in the queue - if not todo: - todo.extendleft( - sorted( - ( - rev - for rev in branch_todo - if not rev._all_nextrev.intersection(total_space) - ), - # favor "revisioned" branch points before - # dependent ones - key=lambda rev: 0 if rev.is_branch_point else 1, - ) - ) - branch_todo.difference_update(todo) - # iterate nodes that are in the immediate todo - while todo: - rev = todo.popleft() - total_space.remove(rev.revision) - total_space_modified = True - - # do depth first for elements within branches, - # don't consume any actual branch nodes - todo.extendleft( - [ - self._revision_map[downrev] - for downrev in reversed(rev._all_down_revisions) - if self._revision_map[downrev] not in branch_todo - and downrev in total_space - ] - ) - - if not inclusive and rev in requested_lowers: - continue - yield rev - - assert not branch_todo - - -class Revision(object): - """Base class for revisioned objects. - - The :class:`.Revision` class is the base of the more public-facing - :class:`.Script` object, which represents a migration script. - The mechanics of revision management and traversal are encapsulated - within :class:`.Revision`, while :class:`.Script` applies this logic - to Python files in a version directory. - - """ - - nextrev = frozenset() - """following revisions, based on down_revision only.""" - - _all_nextrev = frozenset() - - revision = None - """The string revision number.""" - - down_revision = None - """The ``down_revision`` identifier(s) within the migration script. - - Note that the total set of "down" revisions is - down_revision + dependencies. - - """ - - dependencies = None - """Additional revisions which this revision is dependent on. - - From a migration standpoint, these dependencies are added to the - down_revision to form the full iteration. However, the separation - of down_revision from "dependencies" is to assist in navigating - a history that contains many branches, typically a multi-root scenario. - - """ - - branch_labels = None - """Optional string/tuple of symbolic names to apply to this - revision's branch""" - - @classmethod - def verify_rev_id(cls, revision): - illegal_chars = set(revision).intersection(_revision_illegal_chars) - if illegal_chars: - raise RevisionError( - "Character(s) '%s' not allowed in revision identifier '%s'" - % (", ".join(sorted(illegal_chars)), revision) - ) - - def __init__( - self, revision, down_revision, dependencies=None, branch_labels=None - ): - self.verify_rev_id(revision) - self.revision = revision - self.down_revision = tuple_rev_as_scalar(down_revision) - self.dependencies = tuple_rev_as_scalar(dependencies) - self._resolved_dependencies = () - self._orig_branch_labels = util.to_tuple(branch_labels, default=()) - self.branch_labels = set(self._orig_branch_labels) - - def __repr__(self): - args = [repr(self.revision), repr(self.down_revision)] - if self.dependencies: - args.append("dependencies=%r" % (self.dependencies,)) - if self.branch_labels: - args.append("branch_labels=%r" % (self.branch_labels,)) - return "%s(%s)" % (self.__class__.__name__, ", ".join(args)) - - def add_nextrev(self, revision): - self._all_nextrev = self._all_nextrev.union([revision.revision]) - if self.revision in revision._versioned_down_revisions: - self.nextrev = self.nextrev.union([revision.revision]) - - @property - def _all_down_revisions(self): - return ( - util.to_tuple(self.down_revision, default=()) - + self._resolved_dependencies - ) - - @property - def _versioned_down_revisions(self): - return util.to_tuple(self.down_revision, default=()) - - @property - def is_head(self): - """Return True if this :class:`.Revision` is a 'head' revision. - - This is determined based on whether any other :class:`.Script` - within the :class:`.ScriptDirectory` refers to this - :class:`.Script`. Multiple heads can be present. - - """ - return not bool(self.nextrev) - - @property - def _is_real_head(self): - return not bool(self._all_nextrev) - - @property - def is_base(self): - """Return True if this :class:`.Revision` is a 'base' revision.""" - - return self.down_revision is None - - @property - def _is_real_base(self): - """Return True if this :class:`.Revision` is a "real" base revision, - e.g. that it has no dependencies either.""" - - # we use self.dependencies here because this is called up - # in initialization where _real_dependencies isn't set up - # yet - return self.down_revision is None and self.dependencies is None - - @property - def is_branch_point(self): - """Return True if this :class:`.Script` is a branch point. - - A branchpoint is defined as a :class:`.Script` which is referred - to by more than one succeeding :class:`.Script`, that is more - than one :class:`.Script` has a `down_revision` identifier pointing - here. - - """ - return len(self.nextrev) > 1 - - @property - def _is_real_branch_point(self): - """Return True if this :class:`.Script` is a 'real' branch point, - taking into account dependencies as well. - - """ - return len(self._all_nextrev) > 1 - - @property - def is_merge_point(self): - """Return True if this :class:`.Script` is a merge point.""" - - return len(self._versioned_down_revisions) > 1 - - -def tuple_rev_as_scalar(rev): - if not rev: - return None - elif len(rev) == 1: - return rev[0] - else: - return rev diff --git a/venv/lib/python3.7/site-packages/alembic/script/write_hooks.py b/venv/lib/python3.7/site-packages/alembic/script/write_hooks.py deleted file mode 100644 index 61a6a27..0000000 --- a/venv/lib/python3.7/site-packages/alembic/script/write_hooks.py +++ /dev/null @@ -1,113 +0,0 @@ -import subprocess -import sys - -from .. import util -from ..util import compat - - -_registry = {} - - -def register(name): - """A function decorator that will register that function as a write hook. - - See the documentation linked below for an example. - - .. versionadded:: 1.2.0 - - .. seealso:: - - :ref:`post_write_hooks_custom` - - - """ - - def decorate(fn): - _registry[name] = fn - - return decorate - - -def _invoke(name, revision, options): - """Invokes the formatter registered for the given name. - - :param name: The name of a formatter in the registry - :param revision: A :class:`.MigrationRevision` instance - :param options: A dict containing kwargs passed to the - specified formatter. - :raises: :class:`alembic.util.CommandError` - """ - try: - hook = _registry[name] - except KeyError: - compat.raise_from_cause( - util.CommandError("No formatter with name '%s' registered" % name) - ) - else: - return hook(revision, options) - - -def _run_hooks(path, hook_config): - """Invoke hooks for a generated revision. - - """ - - from .base import _split_on_space_comma - - names = _split_on_space_comma.split(hook_config.get("hooks", "")) - - for name in names: - if not name: - continue - opts = { - key[len(name) + 1 :]: hook_config[key] - for key in hook_config - if key.startswith(name + ".") - } - opts["_hook_name"] = name - try: - type_ = opts["type"] - except KeyError: - compat.raise_from_cause( - util.CommandError( - "Key %s.type is required for post write hook %r" - % (name, name) - ) - ) - else: - util.status( - 'Running post write hook "%s"' % name, - _invoke, - type_, - path, - opts, - newline=True, - ) - - -@register("console_scripts") -def console_scripts(path, options): - import pkg_resources - - try: - entrypoint_name = options["entrypoint"] - except KeyError: - compat.raise_from_cause( - util.CommandError( - "Key %s.entrypoint is required for post write hook %r" - % (options["_hook_name"], options["_hook_name"]) - ) - ) - iter_ = pkg_resources.iter_entry_points("console_scripts", entrypoint_name) - impl = next(iter_) - options = options.get("options", "") - subprocess.run( - [ - sys.executable, - "-c", - "import %s; %s()" - % (impl.module_name, ".".join((impl.module_name,) + impl.attrs)), - path, - ] - + options.split() - ) diff --git a/venv/lib/python3.7/site-packages/alembic/templates/generic/README b/venv/lib/python3.7/site-packages/alembic/templates/generic/README deleted file mode 100644 index 98e4f9c..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/generic/README +++ /dev/null @@ -1 +0,0 @@ -Generic single-database configuration. \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/alembic/templates/generic/__pycache__/env.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/templates/generic/__pycache__/env.cpython-37.pyc deleted file mode 100644 index 2c56202..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/templates/generic/__pycache__/env.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/templates/generic/alembic.ini.mako b/venv/lib/python3.7/site-packages/alembic/templates/generic/alembic.ini.mako deleted file mode 100644 index 281794f..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/generic/alembic.ini.mako +++ /dev/null @@ -1,85 +0,0 @@ -# A generic, single database configuration. - -[alembic] -# path to migration scripts -script_location = ${script_location} - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to ${script_location}/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -sqlalchemy.url = driver://user:pass@localhost/dbname - - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks=black -# black.type=console_scripts -# black.entrypoint=black -# black.options=-l 79 - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/venv/lib/python3.7/site-packages/alembic/templates/generic/env.py b/venv/lib/python3.7/site-packages/alembic/templates/generic/env.py deleted file mode 100644 index 70518a2..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/generic/env.py +++ /dev/null @@ -1,77 +0,0 @@ -from logging.config import fileConfig - -from sqlalchemy import engine_from_config -from sqlalchemy import pool - -from alembic import context - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = None - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - url = config.get_main_option("sqlalchemy.url") - context.configure( - url=url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - connectable = engine_from_config( - config.get_section(config.config_ini_section), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - with connectable.connect() as connection: - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/venv/lib/python3.7/site-packages/alembic/templates/generic/script.py.mako b/venv/lib/python3.7/site-packages/alembic/templates/generic/script.py.mako deleted file mode 100644 index 2c01563..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/generic/script.py.mako +++ /dev/null @@ -1,24 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/venv/lib/python3.7/site-packages/alembic/templates/multidb/README b/venv/lib/python3.7/site-packages/alembic/templates/multidb/README deleted file mode 100644 index 5db219f..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/multidb/README +++ /dev/null @@ -1 +0,0 @@ -Rudimentary multi-database configuration. \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/alembic/templates/multidb/__pycache__/env.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/templates/multidb/__pycache__/env.cpython-37.pyc deleted file mode 100644 index 90bb37a..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/templates/multidb/__pycache__/env.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/templates/multidb/alembic.ini.mako b/venv/lib/python3.7/site-packages/alembic/templates/multidb/alembic.ini.mako deleted file mode 100644 index 0b0919e..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/multidb/alembic.ini.mako +++ /dev/null @@ -1,90 +0,0 @@ -# a multi-database configuration. - -[alembic] -# path to migration scripts -script_location = ${script_location} - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to ${script_location}/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -databases = engine1, engine2 - -[engine1] -sqlalchemy.url = driver://user:pass@localhost/dbname - -[engine2] -sqlalchemy.url = driver://user:pass@localhost/dbname2 - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks=black -# black.type=console_scripts -# black.entrypoint=black -# black.options=-l 79 - -# Logging configuration -[loggers] -keys = root,sqlalchemy,alembic - -[handlers] -keys = console - -[formatters] -keys = generic - -[logger_root] -level = WARN -handlers = console -qualname = - -[logger_sqlalchemy] -level = WARN -handlers = -qualname = sqlalchemy.engine - -[logger_alembic] -level = INFO -handlers = -qualname = alembic - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatter_generic] -format = %(levelname)-5.5s [%(name)s] %(message)s -datefmt = %H:%M:%S diff --git a/venv/lib/python3.7/site-packages/alembic/templates/multidb/env.py b/venv/lib/python3.7/site-packages/alembic/templates/multidb/env.py deleted file mode 100644 index f1a9a9a..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/multidb/env.py +++ /dev/null @@ -1,139 +0,0 @@ -import logging -from logging.config import fileConfig -import re - -from sqlalchemy import engine_from_config -from sqlalchemy import pool - -from alembic import context - -USE_TWOPHASE = False - -# this is the Alembic Config object, which provides -# access to the values within the .ini file in use. -config = context.config - -# Interpret the config file for Python logging. -# This line sets up loggers basically. -fileConfig(config.config_file_name) -logger = logging.getLogger("alembic.env") - -# gather section names referring to different -# databases. These are named "engine1", "engine2" -# in the sample .ini file. -db_names = config.get_main_option("databases") - -# add your model's MetaData objects here -# for 'autogenerate' support. These must be set -# up to hold just those tables targeting a -# particular database. table.tometadata() may be -# helpful here in case a "copy" of -# a MetaData is needed. -# from myapp import mymodel -# target_metadata = { -# 'engine1':mymodel.metadata1, -# 'engine2':mymodel.metadata2 -# } -target_metadata = {} - -# other values from the config, defined by the needs of env.py, -# can be acquired: -# my_important_option = config.get_main_option("my_important_option") -# ... etc. - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - # for the --sql use case, run migrations for each URL into - # individual files. - - engines = {} - for name in re.split(r",\s*", db_names): - engines[name] = rec = {} - rec["url"] = context.config.get_section_option(name, "sqlalchemy.url") - - for name, rec in engines.items(): - logger.info("Migrating database %s" % name) - file_ = "%s.sql" % name - logger.info("Writing output to %s" % file_) - with open(file_, "w") as buffer: - context.configure( - url=rec["url"], - output_buffer=buffer, - target_metadata=target_metadata.get(name), - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - with context.begin_transaction(): - context.run_migrations(engine_name=name) - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - - # for the direct-to-DB use case, start a transaction on all - # engines, then run all migrations, then commit all transactions. - - engines = {} - for name in re.split(r",\s*", db_names): - engines[name] = rec = {} - rec["engine"] = engine_from_config( - context.config.get_section(name), - prefix="sqlalchemy.", - poolclass=pool.NullPool, - ) - - for name, rec in engines.items(): - engine = rec["engine"] - rec["connection"] = conn = engine.connect() - - if USE_TWOPHASE: - rec["transaction"] = conn.begin_twophase() - else: - rec["transaction"] = conn.begin() - - try: - for name, rec in engines.items(): - logger.info("Migrating database %s" % name) - context.configure( - connection=rec["connection"], - upgrade_token="%s_upgrades" % name, - downgrade_token="%s_downgrades" % name, - target_metadata=target_metadata.get(name), - ) - context.run_migrations(engine_name=name) - - if USE_TWOPHASE: - for rec in engines.values(): - rec["transaction"].prepare() - - for rec in engines.values(): - rec["transaction"].commit() - except: - for rec in engines.values(): - rec["transaction"].rollback() - raise - finally: - for rec in engines.values(): - rec["connection"].close() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/venv/lib/python3.7/site-packages/alembic/templates/multidb/script.py.mako b/venv/lib/python3.7/site-packages/alembic/templates/multidb/script.py.mako deleted file mode 100644 index c3970a5..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/multidb/script.py.mako +++ /dev/null @@ -1,45 +0,0 @@ -<%! -import re - -%>"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(engine_name): - globals()["upgrade_%s" % engine_name]() - - -def downgrade(engine_name): - globals()["downgrade_%s" % engine_name]() - -<% - db_names = config.get_main_option("databases") -%> - -## generate an "upgrade_() / downgrade_()" function -## for each database name in the ini file. - -% for db_name in re.split(r',\s*', db_names): - -def upgrade_${db_name}(): - ${context.get("%s_upgrades" % db_name, "pass")} - - -def downgrade_${db_name}(): - ${context.get("%s_downgrades" % db_name, "pass")} - -% endfor diff --git a/venv/lib/python3.7/site-packages/alembic/templates/pylons/README b/venv/lib/python3.7/site-packages/alembic/templates/pylons/README deleted file mode 100644 index ed3c28e..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/pylons/README +++ /dev/null @@ -1 +0,0 @@ -Configuration that reads from a Pylons project environment. \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/alembic/templates/pylons/__pycache__/env.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/templates/pylons/__pycache__/env.cpython-37.pyc deleted file mode 100644 index abafec9..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/templates/pylons/__pycache__/env.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/templates/pylons/alembic.ini.mako b/venv/lib/python3.7/site-packages/alembic/templates/pylons/alembic.ini.mako deleted file mode 100644 index 70fead0..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/pylons/alembic.ini.mako +++ /dev/null @@ -1,51 +0,0 @@ -# a Pylons configuration. - -[alembic] -# path to migration scripts -script_location = ${script_location} - -# template used to generate migration files -# file_template = %%(rev)s_%%(slug)s - -# timezone to use when rendering the date -# within the migration file as well as the filename. -# string value is passed to dateutil.tz.gettz() -# leave blank for localtime -# timezone = - -# max length of characters to apply to the -# "slug" field -# truncate_slug_length = 40 - -# set to 'true' to run the environment during -# the 'revision' command, regardless of autogenerate -# revision_environment = false - -# set to 'true' to allow .pyc and .pyo files without -# a source .py file to be detected as revisions in the -# versions/ directory -# sourceless = false - -# version location specification; this defaults -# to ${script_location}/versions. When using multiple version -# directories, initial revisions must be specified with --version-path -# version_locations = %(here)s/bar %(here)s/bat ${script_location}/versions - -# the output encoding used when revision files -# are written from script.py.mako -# output_encoding = utf-8 - -[post_write_hooks] -# post_write_hooks defines scripts or Python functions that are run -# on newly generated revision scripts. See the documentation for further -# detail and examples - -# format using "black" - use the console_scripts runner, against the "black" entrypoint -# hooks=black -# black.type=console_scripts -# black.entrypoint=black -# black.options=-l 79 - -pylons_config_file = ./development.ini - -# that's it ! \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/alembic/templates/pylons/env.py b/venv/lib/python3.7/site-packages/alembic/templates/pylons/env.py deleted file mode 100644 index b2d610d..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/pylons/env.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Pylons bootstrap environment. - -Place 'pylons_config_file' into alembic.ini, and the application will -be loaded from there. - -""" -from logging.config import fileConfig - -from paste.deploy import loadapp - -from alembic import context - - -try: - # if pylons app already in, don't create a new app - from pylons import config as pylons_config - - pylons_config["__file__"] -except: - config = context.config - # can use config['__file__'] here, i.e. the Pylons - # ini file, instead of alembic.ini - config_file = config.get_main_option("pylons_config_file") - fileConfig(config_file) - wsgi_app = loadapp("config:%s" % config_file, relative_to=".") - - -# customize this section for non-standard engine configurations. -meta = __import__( - "%s.model.meta" % wsgi_app.config["pylons.package"] -).model.meta - -# add your model's MetaData object here -# for 'autogenerate' support -# from myapp import mymodel -# target_metadata = mymodel.Base.metadata -target_metadata = None - - -def run_migrations_offline(): - """Run migrations in 'offline' mode. - - This configures the context with just a URL - and not an Engine, though an Engine is acceptable - here as well. By skipping the Engine creation - we don't even need a DBAPI to be available. - - Calls to context.execute() here emit the given string to the - script output. - - """ - context.configure( - url=meta.engine.url, - target_metadata=target_metadata, - literal_binds=True, - dialect_opts={"paramstyle": "named"}, - ) - with context.begin_transaction(): - context.run_migrations() - - -def run_migrations_online(): - """Run migrations in 'online' mode. - - In this scenario we need to create an Engine - and associate a connection with the context. - - """ - # specify here how the engine is acquired - # engine = meta.engine - raise NotImplementedError("Please specify engine connectivity here") - - with engine.connect() as connection: # noqa - context.configure( - connection=connection, target_metadata=target_metadata - ) - - with context.begin_transaction(): - context.run_migrations() - - -if context.is_offline_mode(): - run_migrations_offline() -else: - run_migrations_online() diff --git a/venv/lib/python3.7/site-packages/alembic/templates/pylons/script.py.mako b/venv/lib/python3.7/site-packages/alembic/templates/pylons/script.py.mako deleted file mode 100644 index 2c01563..0000000 --- a/venv/lib/python3.7/site-packages/alembic/templates/pylons/script.py.mako +++ /dev/null @@ -1,24 +0,0 @@ -"""${message} - -Revision ID: ${up_revision} -Revises: ${down_revision | comma,n} -Create Date: ${create_date} - -""" -from alembic import op -import sqlalchemy as sa -${imports if imports else ""} - -# revision identifiers, used by Alembic. -revision = ${repr(up_revision)} -down_revision = ${repr(down_revision)} -branch_labels = ${repr(branch_labels)} -depends_on = ${repr(depends_on)} - - -def upgrade(): - ${upgrades if upgrades else "pass"} - - -def downgrade(): - ${downgrades if downgrades else "pass"} diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__init__.py b/venv/lib/python3.7/site-packages/alembic/testing/__init__.py deleted file mode 100644 index f009da9..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from sqlalchemy.testing import config # noqa -from sqlalchemy.testing import emits_warning # noqa -from sqlalchemy.testing import engines # noqa -from sqlalchemy.testing import mock # noqa -from sqlalchemy.testing import provide_metadata # noqa -from sqlalchemy.testing import uses_deprecated # noqa -from sqlalchemy.testing.config import requirements as requires # noqa - -from alembic import util # noqa -from . import exclusions # noqa -from .assertions import assert_raises # noqa -from .assertions import assert_raises_message # noqa -from .assertions import emits_python_deprecation_warning # noqa -from .assertions import eq_ # noqa -from .assertions import eq_ignore_whitespace # noqa -from .assertions import is_ # noqa -from .assertions import is_false # noqa -from .assertions import is_not_ # noqa -from .assertions import is_true # noqa -from .assertions import ne_ # noqa -from .fixture_functions import combinations # noqa -from .fixture_functions import fixture # noqa -from .fixtures import TestBase # noqa -from .util import resolve_lambda # noqa diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index e6e265f..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/assertions.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/assertions.cpython-37.pyc deleted file mode 100644 index cd6d1f9..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/assertions.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/env.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/env.cpython-37.pyc deleted file mode 100644 index faf6b70..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/env.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/exclusions.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/exclusions.cpython-37.pyc deleted file mode 100644 index 5bf793b..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/exclusions.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/fixture_functions.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/fixture_functions.cpython-37.pyc deleted file mode 100644 index 0c47af4..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/fixture_functions.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/fixtures.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/fixtures.cpython-37.pyc deleted file mode 100644 index e4f084a..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/fixtures.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/requirements.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/requirements.cpython-37.pyc deleted file mode 100644 index 63b0828..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/requirements.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/util.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/util.cpython-37.pyc deleted file mode 100644 index 8857b39..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/__pycache__/util.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/assertions.py b/venv/lib/python3.7/site-packages/alembic/testing/assertions.py deleted file mode 100644 index a78e5e8..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/assertions.py +++ /dev/null @@ -1,112 +0,0 @@ -from __future__ import absolute_import - -import re - -from sqlalchemy import util -from sqlalchemy.engine import default -from sqlalchemy.testing.assertions import _expect_warnings -from sqlalchemy.testing.assertions import eq_ # noqa -from sqlalchemy.testing.assertions import is_ # noqa -from sqlalchemy.testing.assertions import is_false # noqa -from sqlalchemy.testing.assertions import is_not_ # noqa -from sqlalchemy.testing.assertions import is_true # noqa -from sqlalchemy.testing.assertions import ne_ # noqa -from sqlalchemy.util import decorator - -from ..util.compat import py3k - - -def assert_raises(except_cls, callable_, *args, **kw): - try: - callable_(*args, **kw) - success = False - except except_cls: - success = True - - # assert outside the block so it works for AssertionError too ! - assert success, "Callable did not raise an exception" - - -def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): - try: - callable_(*args, **kwargs) - assert False, "Callable did not raise an exception" - except except_cls as e: - assert re.search(msg, util.text_type(e), re.UNICODE), "%r !~ %s" % ( - msg, - e, - ) - print(util.text_type(e).encode("utf-8")) - - -def eq_ignore_whitespace(a, b, msg=None): - # sqlalchemy.testing.assertion has this function - # but not with the special "!U" detection part - - a = re.sub(r"^\s+?|\n", "", a) - a = re.sub(r" {2,}", " ", a) - b = re.sub(r"^\s+?|\n", "", b) - b = re.sub(r" {2,}", " ", b) - - # convert for unicode string rendering, - # using special escape character "!U" - if py3k: - b = re.sub(r"!U", "", b) - else: - b = re.sub(r"!U", "u", b) - - assert a == b, msg or "%r != %r" % (a, b) - - -_dialect_mods = {} - - -def _get_dialect(name): - if name is None or name == "default": - return default.DefaultDialect() - else: - try: - dialect_mod = _dialect_mods[name] - except KeyError: - dialect_mod = getattr( - __import__("sqlalchemy.dialects.%s" % name).dialects, name - ) - _dialect_mods[name] = dialect_mod - d = dialect_mod.dialect() - if name == "postgresql": - d.implicit_returning = True - elif name == "mssql": - d.legacy_schema_aliasing = False - return d - - -def expect_warnings(*messages, **kw): - """Context manager which expects one or more warnings. - - With no arguments, squelches all SAWarnings emitted via - sqlalchemy.util.warn and sqlalchemy.util.warn_limited. Otherwise - pass string expressions that will match selected warnings via regex; - all non-matching warnings are sent through. - - The expect version **asserts** that the warnings were in fact seen. - - Note that the test suite sets SAWarning warnings to raise exceptions. - - """ - return _expect_warnings(Warning, messages, **kw) - - -def emits_python_deprecation_warning(*messages): - """Decorator form of expect_warnings(). - - Note that emits_warning does **not** assert that the warnings - were in fact seen. - - """ - - @decorator - def decorate(fn, *args, **kw): - with _expect_warnings(DeprecationWarning, assert_=False, *messages): - return fn(*args, **kw) - - return decorate diff --git a/venv/lib/python3.7/site-packages/alembic/testing/env.py b/venv/lib/python3.7/site-packages/alembic/testing/env.py deleted file mode 100644 index 473c73e..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/env.py +++ /dev/null @@ -1,506 +0,0 @@ -#!coding: utf-8 - -import os -import shutil -import textwrap - -from sqlalchemy.testing import engines -from sqlalchemy.testing import provision - -from .. import util -from ..script import Script -from ..script import ScriptDirectory -from ..util.compat import get_current_bytecode_suffixes -from ..util.compat import has_pep3147 -from ..util.compat import u - - -def _get_staging_directory(): - if provision.FOLLOWER_IDENT: - return "scratch_%s" % provision.FOLLOWER_IDENT - else: - return "scratch" - - -def staging_env(create=True, template="generic", sourceless=False): - from alembic import command, script - - cfg = _testing_config() - if create: - - path = os.path.join(_get_staging_directory(), "scripts") - assert not os.path.exists(path), ( - "staging directory %s already exists; poor cleanup?" % path - ) - - command.init(cfg, path, template=template) - if sourceless: - try: - # do an import so that a .pyc/.pyo is generated. - util.load_python_file(path, "env.py") - except AttributeError: - # we don't have the migration context set up yet - # so running the .env py throws this exception. - # theoretically we could be using py_compiler here to - # generate .pyc/.pyo without importing but not really - # worth it. - pass - assert sourceless in ( - "pep3147_envonly", - "simple", - "pep3147_everything", - ), sourceless - make_sourceless( - os.path.join(path, "env.py"), - "pep3147" if "pep3147" in sourceless else "simple", - ) - - sc = script.ScriptDirectory.from_config(cfg) - return sc - - -def clear_staging_env(): - from sqlalchemy.testing import engines - - engines.testing_reaper.close_all() - shutil.rmtree(_get_staging_directory(), True) - - -def script_file_fixture(txt): - dir_ = os.path.join(_get_staging_directory(), "scripts") - path = os.path.join(dir_, "script.py.mako") - with open(path, "w") as f: - f.write(txt) - - -def env_file_fixture(txt): - dir_ = os.path.join(_get_staging_directory(), "scripts") - txt = ( - """ -from alembic import context - -config = context.config -""" - + txt - ) - - path = os.path.join(dir_, "env.py") - pyc_path = util.pyc_file_from_path(path) - if pyc_path: - os.unlink(pyc_path) - - with open(path, "w") as f: - f.write(txt) - - -def _sqlite_file_db(tempname="foo.db"): - dir_ = os.path.join(_get_staging_directory(), "scripts") - url = "sqlite:///%s/%s" % (dir_, tempname) - return engines.testing_engine(url=url) - - -def _sqlite_testing_config(sourceless=False): - dir_ = os.path.join(_get_staging_directory(), "scripts") - url = "sqlite:///%s/foo.db" % dir_ - - return _write_config_file( - """ -[alembic] -script_location = %s -sqlalchemy.url = %s -sourceless = %s - -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - """ - % (dir_, url, "true" if sourceless else "false") - ) - - -def _multi_dir_testing_config(sourceless=False, extra_version_location=""): - dir_ = os.path.join(_get_staging_directory(), "scripts") - url = "sqlite:///%s/foo.db" % dir_ - - return _write_config_file( - """ -[alembic] -script_location = %s -sqlalchemy.url = %s -sourceless = %s -version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s - -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - """ - % ( - dir_, - url, - "true" if sourceless else "false", - extra_version_location, - ) - ) - - -def _no_sql_testing_config(dialect="postgresql", directives=""): - """use a postgresql url with no host so that - connections guaranteed to fail""" - dir_ = os.path.join(_get_staging_directory(), "scripts") - return _write_config_file( - """ -[alembic] -script_location = %s -sqlalchemy.url = %s:// -%s - -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - -""" - % (dir_, dialect, directives) - ) - - -def _write_config_file(text): - cfg = _testing_config() - with open(cfg.config_file_name, "w") as f: - f.write(text) - return cfg - - -def _testing_config(): - from alembic.config import Config - - if not os.access(_get_staging_directory(), os.F_OK): - os.mkdir(_get_staging_directory()) - return Config(os.path.join(_get_staging_directory(), "test_alembic.ini")) - - -def write_script( - scriptdir, rev_id, content, encoding="ascii", sourceless=False -): - old = scriptdir.revision_map.get_revision(rev_id) - path = old.path - - content = textwrap.dedent(content) - if encoding: - content = content.encode(encoding) - with open(path, "wb") as fp: - fp.write(content) - pyc_path = util.pyc_file_from_path(path) - if pyc_path: - os.unlink(pyc_path) - script = Script._from_path(scriptdir, path) - old = scriptdir.revision_map.get_revision(script.revision) - if old.down_revision != script.down_revision: - raise Exception( - "Can't change down_revision " "on a refresh operation." - ) - scriptdir.revision_map.add_revision(script, _replace=True) - - if sourceless: - make_sourceless( - path, "pep3147" if sourceless == "pep3147_everything" else "simple" - ) - - -def make_sourceless(path, style): - - import py_compile - - py_compile.compile(path) - - if style == "simple" and has_pep3147(): - pyc_path = util.pyc_file_from_path(path) - suffix = get_current_bytecode_suffixes()[0] - filepath, ext = os.path.splitext(path) - simple_pyc_path = filepath + suffix - shutil.move(pyc_path, simple_pyc_path) - pyc_path = simple_pyc_path - elif style == "pep3147" and not has_pep3147(): - raise NotImplementedError() - else: - assert style in ("pep3147", "simple") - pyc_path = util.pyc_file_from_path(path) - - assert os.access(pyc_path, os.F_OK) - - os.unlink(path) - - -def three_rev_fixture(cfg): - a = util.rev_id() - b = util.rev_id() - c = util.rev_id() - - script = ScriptDirectory.from_config(cfg) - script.generate_revision(a, "revision a", refresh=True, head="base") - write_script( - script, - a, - """\ -"Rev A" -revision = '%s' -down_revision = None - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 1") - - -def downgrade(): - op.execute("DROP STEP 1") - -""" - % a, - ) - - script.generate_revision(b, "revision b", refresh=True, head=a) - write_script( - script, - b, - u( - """# coding: utf-8 -"Rev B, méil, %3" -revision = '{}' -down_revision = '{}' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 2") - - -def downgrade(): - op.execute("DROP STEP 2") - -""" - ).format(b, a), - encoding="utf-8", - ) - - script.generate_revision(c, "revision c", refresh=True, head=b) - write_script( - script, - c, - """\ -"Rev C" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 3") - - -def downgrade(): - op.execute("DROP STEP 3") - -""" - % (c, b), - ) - return a, b, c - - -def multi_heads_fixture(cfg, a, b, c): - """Create a multiple head fixture from the three-revs fixture""" - - # a->b->c - # -> d -> e - # -> f - d = util.rev_id() - e = util.rev_id() - f = util.rev_id() - - script = ScriptDirectory.from_config(cfg) - script.generate_revision( - d, "revision d from b", head=b, splice=True, refresh=True - ) - write_script( - script, - d, - """\ -"Rev D" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 4") - - -def downgrade(): - op.execute("DROP STEP 4") - -""" - % (d, b), - ) - - script.generate_revision( - e, "revision e from d", head=d, splice=True, refresh=True - ) - write_script( - script, - e, - """\ -"Rev E" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 5") - - -def downgrade(): - op.execute("DROP STEP 5") - -""" - % (e, d), - ) - - script.generate_revision( - f, "revision f from b", head=b, splice=True, refresh=True - ) - write_script( - script, - f, - """\ -"Rev F" -revision = '%s' -down_revision = '%s' - -from alembic import op - - -def upgrade(): - op.execute("CREATE STEP 6") - - -def downgrade(): - op.execute("DROP STEP 6") - -""" - % (f, b), - ) - - return d, e, f - - -def _multidb_testing_config(engines): - """alembic.ini fixture to work exactly with the 'multidb' template""" - - dir_ = os.path.join(_get_staging_directory(), "scripts") - - databases = ", ".join(engines.keys()) - engines = "\n\n".join( - "[%s]\n" "sqlalchemy.url = %s" % (key, value.url) - for key, value in engines.items() - ) - - return _write_config_file( - """ -[alembic] -script_location = %s -sourceless = false - -databases = %s - -%s -[loggers] -keys = root - -[handlers] -keys = console - -[logger_root] -level = WARN -handlers = console -qualname = - -[handler_console] -class = StreamHandler -args = (sys.stderr,) -level = NOTSET -formatter = generic - -[formatters] -keys = generic - -[formatter_generic] -format = %%(levelname)-5.5s [%%(name)s] %%(message)s -datefmt = %%H:%%M:%%S - """ - % (dir_, databases, engines) - ) diff --git a/venv/lib/python3.7/site-packages/alembic/testing/exclusions.py b/venv/lib/python3.7/site-packages/alembic/testing/exclusions.py deleted file mode 100644 index 91f2d5b..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/exclusions.py +++ /dev/null @@ -1,484 +0,0 @@ -# testing/exclusions.py -# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -import contextlib -import operator -import re - -from sqlalchemy import util as sqla_util -from sqlalchemy.util import decorator - -from . import config -from . import fixture_functions -from .. import util -from ..util.compat import inspect_getargspec - - -def skip_if(predicate, reason=None): - rule = compound() - pred = _as_predicate(predicate, reason) - rule.skips.add(pred) - return rule - - -def fails_if(predicate, reason=None): - rule = compound() - pred = _as_predicate(predicate, reason) - rule.fails.add(pred) - return rule - - -class compound(object): - def __init__(self): - self.fails = set() - self.skips = set() - self.tags = set() - self.combinations = {} - - def __add__(self, other): - return self.add(other) - - def with_combination(self, **kw): - copy = compound() - copy.fails.update(self.fails) - copy.skips.update(self.skips) - copy.tags.update(self.tags) - copy.combinations.update((f, kw) for f in copy.fails) - copy.combinations.update((s, kw) for s in copy.skips) - return copy - - def add(self, *others): - copy = compound() - copy.fails.update(self.fails) - copy.skips.update(self.skips) - copy.tags.update(self.tags) - for other in others: - copy.fails.update(other.fails) - copy.skips.update(other.skips) - copy.tags.update(other.tags) - return copy - - def not_(self): - copy = compound() - copy.fails.update(NotPredicate(fail) for fail in self.fails) - copy.skips.update(NotPredicate(skip) for skip in self.skips) - copy.tags.update(self.tags) - return copy - - @property - def enabled(self): - return self.enabled_for_config(config._current) - - def enabled_for_config(self, config): - for predicate in self.skips.union(self.fails): - if predicate(config): - return False - else: - return True - - def matching_config_reasons(self, config): - return [ - predicate._as_string(config) - for predicate in self.skips.union(self.fails) - if predicate(config) - ] - - def include_test(self, include_tags, exclude_tags): - return bool( - not self.tags.intersection(exclude_tags) - and (not include_tags or self.tags.intersection(include_tags)) - ) - - def _extend(self, other): - self.skips.update(other.skips) - self.fails.update(other.fails) - self.tags.update(other.tags) - self.combinations.update(other.combinations) - - def __call__(self, fn): - if hasattr(fn, "_sa_exclusion_extend"): - fn._sa_exclusion_extend._extend(self) - return fn - - @decorator - def decorate(fn, *args, **kw): - return self._do(config._current, fn, *args, **kw) - - decorated = decorate(fn) - decorated._sa_exclusion_extend = self - return decorated - - @contextlib.contextmanager - def fail_if(self): - all_fails = compound() - all_fails.fails.update(self.skips.union(self.fails)) - - try: - yield - except Exception as ex: - all_fails._expect_failure(config._current, ex, None) - else: - all_fails._expect_success(config._current, None) - - def _check_combinations(self, combination, predicate): - if predicate in self.combinations: - for k, v in combination: - if ( - k in self.combinations[predicate] - and self.combinations[predicate][k] != v - ): - return False - return True - - def _do(self, cfg, fn, *args, **kw): - if len(args) > 1: - insp = inspect_getargspec(fn) - combination = list(zip(insp.args[1:], args[1:])) - else: - combination = None - - for skip in self.skips: - if self._check_combinations(combination, skip) and skip(cfg): - msg = "'%s' : %s" % ( - fixture_functions.get_current_test_name(), - skip._as_string(cfg), - ) - config.skip_test(msg) - - try: - return_value = fn(*args, **kw) - except Exception as ex: - self._expect_failure(cfg, ex, combination, name=fn.__name__) - else: - self._expect_success(cfg, combination, name=fn.__name__) - return return_value - - def _expect_failure(self, config, ex, combination, name="block"): - for fail in self.fails: - if self._check_combinations(combination, fail) and fail(config): - if sqla_util.py2k: - str_ex = unicode(ex).encode( # noqa: F821 - "utf-8", errors="ignore" - ) - else: - str_ex = str(ex) - print( - ( - "%s failed as expected (%s): %s " - % (name, fail._as_string(config), str_ex) - ) - ) - break - else: - util.raise_from_cause(ex) - - def _expect_success(self, config, combination, name="block"): - if not self.fails: - return - - for fail in self.fails: - if self._check_combinations(combination, fail) and fail(config): - raise AssertionError( - "Unexpected success for '%s' (%s)" - % ( - name, - " and ".join( - fail._as_string(config) for fail in self.fails - ), - ) - ) - - -def requires_tag(tagname): - return tags([tagname]) - - -def tags(tagnames): - comp = compound() - comp.tags.update(tagnames) - return comp - - -def only_if(predicate, reason=None): - predicate = _as_predicate(predicate) - return skip_if(NotPredicate(predicate), reason) - - -def succeeds_if(predicate, reason=None): - predicate = _as_predicate(predicate) - return fails_if(NotPredicate(predicate), reason) - - -class Predicate(object): - @classmethod - def as_predicate(cls, predicate, description=None): - if isinstance(predicate, compound): - return cls.as_predicate(predicate.enabled_for_config, description) - elif isinstance(predicate, Predicate): - if description and predicate.description is None: - predicate.description = description - return predicate - elif isinstance(predicate, (list, set)): - return OrPredicate( - [cls.as_predicate(pred) for pred in predicate], description - ) - elif isinstance(predicate, tuple): - return SpecPredicate(*predicate) - elif isinstance(predicate, sqla_util.string_types): - tokens = re.match( - r"([\+\w]+)\s*(?:(>=|==|!=|<=|<|>)\s*([\d\.]+))?", predicate - ) - if not tokens: - raise ValueError( - "Couldn't locate DB name in predicate: %r" % predicate - ) - db = tokens.group(1) - op = tokens.group(2) - spec = ( - tuple(int(d) for d in tokens.group(3).split(".")) - if tokens.group(3) - else None - ) - - return SpecPredicate(db, op, spec, description=description) - elif callable(predicate): - return LambdaPredicate(predicate, description) - else: - assert False, "unknown predicate type: %s" % predicate - - def _format_description(self, config, negate=False): - bool_ = self(config) - if negate: - bool_ = not negate - return self.description % { - "driver": config.db.url.get_driver_name() - if config - else "", - "database": config.db.url.get_backend_name() - if config - else "", - "doesnt_support": "doesn't support" if bool_ else "does support", - "does_support": "does support" if bool_ else "doesn't support", - } - - def _as_string(self, config=None, negate=False): - raise NotImplementedError() - - -class BooleanPredicate(Predicate): - def __init__(self, value, description=None): - self.value = value - self.description = description or "boolean %s" % value - - def __call__(self, config): - return self.value - - def _as_string(self, config, negate=False): - return self._format_description(config, negate=negate) - - -class SpecPredicate(Predicate): - def __init__(self, db, op=None, spec=None, description=None): - self.db = db - self.op = op - self.spec = spec - self.description = description - - _ops = { - "<": operator.lt, - ">": operator.gt, - "==": operator.eq, - "!=": operator.ne, - "<=": operator.le, - ">=": operator.ge, - "in": operator.contains, - "between": lambda val, pair: val >= pair[0] and val <= pair[1], - } - - def __call__(self, config): - engine = config.db - - if "+" in self.db: - dialect, driver = self.db.split("+") - else: - dialect, driver = self.db, None - - if dialect and engine.name != dialect: - return False - if driver is not None and engine.driver != driver: - return False - - if self.op is not None: - assert driver is None, "DBAPI version specs not supported yet" - - version = _server_version(engine) - oper = ( - hasattr(self.op, "__call__") and self.op or self._ops[self.op] - ) - return oper(version, self.spec) - else: - return True - - def _as_string(self, config, negate=False): - if self.description is not None: - return self._format_description(config) - elif self.op is None: - if negate: - return "not %s" % self.db - else: - return "%s" % self.db - else: - if negate: - return "not %s %s %s" % (self.db, self.op, self.spec) - else: - return "%s %s %s" % (self.db, self.op, self.spec) - - -class LambdaPredicate(Predicate): - def __init__(self, lambda_, description=None, args=None, kw=None): - spec = inspect_getargspec(lambda_) - if not spec[0]: - self.lambda_ = lambda db: lambda_() - else: - self.lambda_ = lambda_ - self.args = args or () - self.kw = kw or {} - if description: - self.description = description - elif lambda_.__doc__: - self.description = lambda_.__doc__ - else: - self.description = "custom function" - - def __call__(self, config): - return self.lambda_(config) - - def _as_string(self, config, negate=False): - return self._format_description(config) - - -class NotPredicate(Predicate): - def __init__(self, predicate, description=None): - self.predicate = predicate - self.description = description - - def __call__(self, config): - return not self.predicate(config) - - def _as_string(self, config, negate=False): - if self.description: - return self._format_description(config, not negate) - else: - return self.predicate._as_string(config, not negate) - - -class OrPredicate(Predicate): - def __init__(self, predicates, description=None): - self.predicates = predicates - self.description = description - - def __call__(self, config): - for pred in self.predicates: - if pred(config): - return True - return False - - def _eval_str(self, config, negate=False): - if negate: - conjunction = " and " - else: - conjunction = " or " - return conjunction.join( - p._as_string(config, negate=negate) for p in self.predicates - ) - - def _negation_str(self, config): - if self.description is not None: - return "Not " + self._format_description(config) - else: - return self._eval_str(config, negate=True) - - def _as_string(self, config, negate=False): - if negate: - return self._negation_str(config) - else: - if self.description is not None: - return self._format_description(config) - else: - return self._eval_str(config) - - -_as_predicate = Predicate.as_predicate - - -def _is_excluded(db, op, spec): - return SpecPredicate(db, op, spec)(config._current) - - -def _server_version(engine): - """Return a server_version_info tuple.""" - - # force metadata to be retrieved - conn = engine.connect() - version = getattr(engine.dialect, "server_version_info", None) - if version is None: - version = () - conn.close() - return version - - -def db_spec(*dbs): - return OrPredicate([Predicate.as_predicate(db) for db in dbs]) - - -def open(): # noqa - return skip_if(BooleanPredicate(False, "mark as execute")) - - -def closed(): - return skip_if(BooleanPredicate(True, "marked as skip")) - - -def fails(reason=None): - return fails_if(BooleanPredicate(True, reason or "expected to fail")) - - -@decorator -def future(fn, *arg): - return fails_if(LambdaPredicate(fn), "Future feature") - - -def fails_on(db, reason=None): - return fails_if(db, reason) - - -def fails_on_everything_except(*dbs): - return succeeds_if(OrPredicate([Predicate.as_predicate(db) for db in dbs])) - - -def skip(db, reason=None): - return skip_if(db, reason) - - -def only_on(dbs, reason=None): - return only_if( - OrPredicate( - [Predicate.as_predicate(db, reason) for db in util.to_list(dbs)] - ) - ) - - -def exclude(db, op, spec, reason=None): - return skip_if(SpecPredicate(db, op, spec), reason) - - -def against(config, *queries): - assert queries, "no queries sent!" - return OrPredicate([Predicate.as_predicate(query) for query in queries])( - config - ) diff --git a/venv/lib/python3.7/site-packages/alembic/testing/fixture_functions.py b/venv/lib/python3.7/site-packages/alembic/testing/fixture_functions.py deleted file mode 100644 index 2640693..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/fixture_functions.py +++ /dev/null @@ -1,79 +0,0 @@ -_fixture_functions = None # installed by plugin_base - - -def combinations(*comb, **kw): - r"""Deliver multiple versions of a test based on positional combinations. - - This is a facade over pytest.mark.parametrize. - - - :param \*comb: argument combinations. These are tuples that will be passed - positionally to the decorated function. - - :param argnames: optional list of argument names. These are the names - of the arguments in the test function that correspond to the entries - in each argument tuple. pytest.mark.parametrize requires this, however - the combinations function will derive it automatically if not present - by using ``inspect.getfullargspec(fn).args[1:]``. Note this assumes the - first argument is "self" which is discarded. - - :param id\_: optional id template. This is a string template that - describes how the "id" for each parameter set should be defined, if any. - The number of characters in the template should match the number of - entries in each argument tuple. Each character describes how the - corresponding entry in the argument tuple should be handled, as far as - whether or not it is included in the arguments passed to the function, as - well as if it is included in the tokens used to create the id of the - parameter set. - - If omitted, the argument combinations are passed to parametrize as is. If - passed, each argument combination is turned into a pytest.param() object, - mapping the elements of the argument tuple to produce an id based on a - character value in the same position within the string template using the - following scheme:: - - i - the given argument is a string that is part of the id only, don't - pass it as an argument - - n - the given argument should be passed and it should be added to the - id by calling the .__name__ attribute - - r - the given argument should be passed and it should be added to the - id by calling repr() - - s - the given argument should be passed and it should be added to the - id by calling str() - - a - (argument) the given argument should be passed and it should not - be used to generated the id - - e.g.:: - - @testing.combinations( - (operator.eq, "eq"), - (operator.ne, "ne"), - (operator.gt, "gt"), - (operator.lt, "lt"), - id_="na" - ) - def test_operator(self, opfunc, name): - pass - - The above combination will call ``.__name__`` on the first member of - each tuple and use that as the "id" to pytest.param(). - - - """ - return _fixture_functions.combinations(*comb, **kw) - - -def fixture(*arg, **kw): - return _fixture_functions.fixture(*arg, **kw) - - -def get_current_test_name(): - return _fixture_functions.get_current_test_name() - - -def skip_test(msg): - raise _fixture_functions.skip_test_exception(msg) diff --git a/venv/lib/python3.7/site-packages/alembic/testing/fixtures.py b/venv/lib/python3.7/site-packages/alembic/testing/fixtures.py deleted file mode 100644 index 2951455..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/fixtures.py +++ /dev/null @@ -1,264 +0,0 @@ -# coding: utf-8 -from contextlib import contextmanager -import io -import re - -from sqlalchemy import Column -from sqlalchemy import inspect -from sqlalchemy import MetaData -from sqlalchemy import String -from sqlalchemy import Table -from sqlalchemy import text -from sqlalchemy.testing import config -from sqlalchemy.testing import mock -from sqlalchemy.testing.assertions import eq_ -from sqlalchemy.testing.fixtures import TestBase # noqa - -import alembic -from .assertions import _get_dialect -from ..environment import EnvironmentContext -from ..migration import MigrationContext -from ..operations import Operations -from ..util import compat -from ..util.compat import configparser -from ..util.compat import string_types -from ..util.compat import text_type -from ..util.sqla_compat import create_mock_engine - -testing_config = configparser.ConfigParser() -testing_config.read(["test.cfg"]) - - -def capture_db(dialect="postgresql://"): - buf = [] - - def dump(sql, *multiparams, **params): - buf.append(str(sql.compile(dialect=engine.dialect))) - - engine = create_mock_engine(dialect, dump) - return engine, buf - - -_engs = {} - - -@contextmanager -def capture_context_buffer(**kw): - if kw.pop("bytes_io", False): - buf = io.BytesIO() - else: - buf = io.StringIO() - - kw.update({"dialect_name": "sqlite", "output_buffer": buf}) - conf = EnvironmentContext.configure - - def configure(*arg, **opt): - opt.update(**kw) - return conf(*arg, **opt) - - with mock.patch.object(EnvironmentContext, "configure", configure): - yield buf - - -@contextmanager -def capture_engine_context_buffer(**kw): - from .env import _sqlite_file_db - from sqlalchemy import event - - buf = compat.StringIO() - - eng = _sqlite_file_db() - - conn = eng.connect() - - @event.listens_for(conn, "before_cursor_execute") - def bce(conn, cursor, statement, parameters, context, executemany): - buf.write(statement + "\n") - - kw.update({"connection": conn}) - conf = EnvironmentContext.configure - - def configure(*arg, **opt): - opt.update(**kw) - return conf(*arg, **opt) - - with mock.patch.object(EnvironmentContext, "configure", configure): - yield buf - - -def op_fixture( - dialect="default", - as_sql=False, - naming_convention=None, - literal_binds=False, - native_boolean=None, -): - - opts = {} - if naming_convention: - opts["target_metadata"] = MetaData(naming_convention=naming_convention) - - class buffer_(object): - def __init__(self): - self.lines = [] - - def write(self, msg): - msg = msg.strip() - msg = re.sub(r"[\n\t]", "", msg) - if as_sql: - # the impl produces soft tabs, - # so search for blocks of 4 spaces - msg = re.sub(r" ", "", msg) - msg = re.sub(r"\;\n*$", "", msg) - - self.lines.append(msg) - - def flush(self): - pass - - buf = buffer_() - - class ctx(MigrationContext): - def get_buf(self): - return buf - - def clear_assertions(self): - buf.lines[:] = [] - - def assert_(self, *sql): - # TODO: make this more flexible about - # whitespace and such - eq_(buf.lines, [re.sub(r"[\n\t]", "", s) for s in sql]) - - def assert_contains(self, sql): - for stmt in buf.lines: - if re.sub(r"[\n\t]", "", sql) in stmt: - return - else: - assert False, "Could not locate fragment %r in %r" % ( - sql, - buf.lines, - ) - - if as_sql: - opts["as_sql"] = as_sql - if literal_binds: - opts["literal_binds"] = literal_binds - if dialect == "mariadb": - ctx_dialect = _get_dialect("mysql") - ctx_dialect.server_version_info = (10, 0, 0, "MariaDB") - - else: - ctx_dialect = _get_dialect(dialect) - if native_boolean is not None: - ctx_dialect.supports_native_boolean = native_boolean - # this is new as of SQLAlchemy 1.2.7 and is used by SQL Server, - # which breaks assumptions in the alembic test suite - ctx_dialect.non_native_boolean_check_constraint = True - if not as_sql: - - def execute(stmt, *multiparam, **param): - if isinstance(stmt, string_types): - stmt = text(stmt) - assert stmt.supports_execution - sql = text_type(stmt.compile(dialect=ctx_dialect)) - - buf.write(sql) - - connection = mock.Mock(dialect=ctx_dialect, execute=execute) - else: - opts["output_buffer"] = buf - connection = None - context = ctx(ctx_dialect, connection, opts) - - alembic.op._proxy = Operations(context) - return context - - -class AlterColRoundTripFixture(object): - - # since these tests are about syntax, use more recent SQLAlchemy as some of - # the type / server default compare logic might not work on older - # SQLAlchemy versions as seems to be the case for SQLAlchemy 1.1 on Oracle - - __requires__ = ("alter_column", "sqlalchemy_12") - - def setUp(self): - self.conn = config.db.connect() - self.ctx = MigrationContext.configure(self.conn) - self.op = Operations(self.ctx) - self.metadata = MetaData() - - def _compare_type(self, t1, t2): - c1 = Column("q", t1) - c2 = Column("q", t2) - assert not self.ctx.impl.compare_type( - c1, c2 - ), "Type objects %r and %r didn't compare as equivalent" % (t1, t2) - - def _compare_server_default(self, t1, s1, t2, s2): - c1 = Column("q", t1, server_default=s1) - c2 = Column("q", t2, server_default=s2) - assert not self.ctx.impl.compare_server_default( - c1, c2, s2, s1 - ), "server defaults %r and %r didn't compare as equivalent" % (s1, s2) - - def tearDown(self): - self.metadata.drop_all(self.conn) - self.conn.close() - - def _run_alter_col(self, from_, to_, compare=None): - column = Column( - from_.get("name", "colname"), - from_.get("type", String(10)), - nullable=from_.get("nullable", True), - server_default=from_.get("server_default", None), - # comment=from_.get("comment", None) - ) - t = Table("x", self.metadata, column) - - t.create(self.conn) - insp = inspect(self.conn) - old_col = insp.get_columns("x")[0] - - # TODO: conditional comment support - self.op.alter_column( - "x", - column.name, - existing_type=column.type, - existing_server_default=column.server_default - if column.server_default is not None - else False, - existing_nullable=True if column.nullable else False, - # existing_comment=column.comment, - nullable=to_.get("nullable", None), - # modify_comment=False, - server_default=to_.get("server_default", False), - new_column_name=to_.get("name", None), - type_=to_.get("type", None), - ) - - insp = inspect(self.conn) - new_col = insp.get_columns("x")[0] - - if compare is None: - compare = to_ - - eq_( - new_col["name"], - compare["name"] if "name" in compare else column.name, - ) - self._compare_type( - new_col["type"], compare.get("type", old_col["type"]) - ) - eq_(new_col["nullable"], compare.get("nullable", column.nullable)) - self._compare_server_default( - new_col["type"], - new_col.get("default", None), - compare.get("type", old_col["type"]), - compare["server_default"].text - if "server_default" in compare - else column.server_default.arg.text - if column.server_default is not None - else None, - ) diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__init__.py b/venv/lib/python3.7/site-packages/alembic/testing/plugin/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 92be5d2..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/bootstrap.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/bootstrap.cpython-37.pyc deleted file mode 100644 index 52879a9..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/bootstrap.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/plugin_base.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/plugin_base.cpython-37.pyc deleted file mode 100644 index 67f74d8..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/plugin_base.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/pytestplugin.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/pytestplugin.cpython-37.pyc deleted file mode 100644 index c978f06..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/testing/plugin/__pycache__/pytestplugin.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/bootstrap.py b/venv/lib/python3.7/site-packages/alembic/testing/plugin/bootstrap.py deleted file mode 100644 index 8200ec1..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/plugin/bootstrap.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Bootstrapper for test framework plugins. - -This is vendored from SQLAlchemy so that we can use local overrides -for plugin_base.py and pytestplugin.py. - -""" - - -import os -import sys - - -bootstrap_file = locals()["bootstrap_file"] -to_bootstrap = locals()["to_bootstrap"] - - -def load_file_as_module(name): - path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) - if sys.version_info >= (3, 3): - from importlib import machinery - - mod = machinery.SourceFileLoader(name, path).load_module() - else: - import imp - - mod = imp.load_source(name, path) - return mod - - -if to_bootstrap == "pytest": - sys.modules["sqla_plugin_base"] = load_file_as_module("plugin_base") - sys.modules["sqla_pytestplugin"] = load_file_as_module("pytestplugin") -else: - raise Exception("unknown bootstrap: %s" % to_bootstrap) # noqa diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/plugin_base.py b/venv/lib/python3.7/site-packages/alembic/testing/plugin/plugin_base.py deleted file mode 100644 index 2d5e95a..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/plugin/plugin_base.py +++ /dev/null @@ -1,125 +0,0 @@ -"""vendored plugin_base functions from the most recent SQLAlchemy versions. - -Alembic tests need to run on older versions of SQLAlchemy that don't -necessarily have all the latest testing fixtures. - -""" -from __future__ import absolute_import - -import abc -import sys - -from sqlalchemy.testing.plugin.plugin_base import * # noqa -from sqlalchemy.testing.plugin.plugin_base import post -from sqlalchemy.testing.plugin.plugin_base import post_begin as sqla_post_begin -from sqlalchemy.testing.plugin.plugin_base import stop_test_class as sqla_stc - -py3k = sys.version_info >= (3, 0) - - -if py3k: - - ABC = abc.ABC -else: - - class ABC(object): - __metaclass__ = abc.ABCMeta - - -def post_begin(): - sqla_post_begin() - - import warnings - - try: - import pytest - except ImportError: - pass - else: - warnings.filterwarnings( - "once", category=pytest.PytestDeprecationWarning - ) - - from sqlalchemy import exc - - if hasattr(exc, "RemovedIn20Warning"): - warnings.filterwarnings( - "error", - category=exc.RemovedIn20Warning, - message=".*Engine.execute", - ) - warnings.filterwarnings( - "error", - category=exc.RemovedIn20Warning, - message=".*Passing a string", - ) - - -# override selected SQLAlchemy pytest hooks with vendored functionality -def stop_test_class(cls): - sqla_stc(cls) - import os - from alembic.testing.env import _get_staging_directory - - assert not os.path.exists(_get_staging_directory()), ( - "staging directory %s was not cleaned up" % _get_staging_directory() - ) - - -def want_class(name, cls): - from sqlalchemy.testing import config - from sqlalchemy.testing import fixtures - - if not issubclass(cls, fixtures.TestBase): - return False - elif name.startswith("_"): - return False - elif ( - config.options.backend_only - and not getattr(cls, "__backend__", False) - and not getattr(cls, "__sparse_backend__", False) - ): - return False - else: - return True - - -@post -def _init_symbols(options, file_config): - from sqlalchemy.testing import config - from alembic.testing import fixture_functions as alembic_config - - config._fixture_functions = ( - alembic_config._fixture_functions - ) = _fixture_fn_class() - - -class FixtureFunctions(ABC): - @abc.abstractmethod - def skip_test_exception(self, *arg, **kw): - raise NotImplementedError() - - @abc.abstractmethod - def combinations(self, *args, **kw): - raise NotImplementedError() - - @abc.abstractmethod - def param_ident(self, *args, **kw): - raise NotImplementedError() - - @abc.abstractmethod - def fixture(self, *arg, **kw): - raise NotImplementedError() - - def get_current_test_name(self): - raise NotImplementedError() - - -_fixture_fn_class = None - - -def set_fixture_functions(fixture_fn_class): - from sqlalchemy.testing.plugin import plugin_base - - global _fixture_fn_class - _fixture_fn_class = plugin_base._fixture_fn_class = fixture_fn_class diff --git a/venv/lib/python3.7/site-packages/alembic/testing/plugin/pytestplugin.py b/venv/lib/python3.7/site-packages/alembic/testing/plugin/pytestplugin.py deleted file mode 100644 index 1c8be05..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/plugin/pytestplugin.py +++ /dev/null @@ -1,232 +0,0 @@ -"""vendored pytestplugin functions from the most recent SQLAlchemy versions. - -Alembic tests need to run on older versions of SQLAlchemy that don't -necessarily have all the latest testing fixtures. - -""" -try: - # installed by bootstrap.py - import sqla_plugin_base as plugin_base -except ImportError: - # assume we're a package, use traditional import - from . import plugin_base - -import inspect -import itertools -import operator -import os -import re -import sys - -import pytest -from sqlalchemy.testing.plugin.pytestplugin import * # noqa -from sqlalchemy.testing.plugin.pytestplugin import pytest_configure as spc - - -# override selected SQLAlchemy pytest hooks with vendored functionality -def pytest_configure(config): - spc(config) - - plugin_base.set_fixture_functions(PytestFixtureFunctions) - - -def pytest_pycollect_makeitem(collector, name, obj): - - if inspect.isclass(obj) and plugin_base.want_class(name, obj): - - # in pytest 5.4.0 - # return [ - # pytest.Class.from_parent(collector, - # name=parametrize_cls.__name__) - # for parametrize_cls in _parametrize_cls(collector.module, obj) - # ] - - return [ - pytest.Class(parametrize_cls.__name__, parent=collector) - for parametrize_cls in _parametrize_cls(collector.module, obj) - ] - elif ( - inspect.isfunction(obj) - and isinstance(collector, pytest.Instance) - and plugin_base.want_method(collector.cls, obj) - ): - # None means, fall back to default logic, which includes - # method-level parametrize - return None - else: - # empty list means skip this item - return [] - - -_current_class = None - - -def _parametrize_cls(module, cls): - """implement a class-based version of pytest parametrize.""" - - if "_sa_parametrize" not in cls.__dict__: - return [cls] - - _sa_parametrize = cls._sa_parametrize - classes = [] - for full_param_set in itertools.product( - *[params for argname, params in _sa_parametrize] - ): - cls_variables = {} - - for argname, param in zip( - [_sa_param[0] for _sa_param in _sa_parametrize], full_param_set - ): - if not argname: - raise TypeError("need argnames for class-based combinations") - argname_split = re.split(r",\s*", argname) - for arg, val in zip(argname_split, param.values): - cls_variables[arg] = val - parametrized_name = "_".join( - # token is a string, but in py2k py.test is giving us a unicode, - # so call str() on it. - str(re.sub(r"\W", "", token)) - for param in full_param_set - for token in param.id.split("-") - ) - name = "%s_%s" % (cls.__name__, parametrized_name) - newcls = type.__new__(type, name, (cls,), cls_variables) - setattr(module, name, newcls) - classes.append(newcls) - return classes - - -def getargspec(fn): - if sys.version_info.major == 3: - return inspect.getfullargspec(fn) - else: - return inspect.getargspec(fn) - - -class PytestFixtureFunctions(plugin_base.FixtureFunctions): - def skip_test_exception(self, *arg, **kw): - return pytest.skip.Exception(*arg, **kw) - - _combination_id_fns = { - "i": lambda obj: obj, - "r": repr, - "s": str, - "n": operator.attrgetter("__name__"), - } - - def combinations(self, *arg_sets, **kw): - """facade for pytest.mark.paramtrize. - - Automatically derives argument names from the callable which in our - case is always a method on a class with positional arguments. - - ids for parameter sets are derived using an optional template. - - """ - from alembic.testing import exclusions - - if sys.version_info.major == 3: - if len(arg_sets) == 1 and hasattr(arg_sets[0], "__next__"): - arg_sets = list(arg_sets[0]) - else: - if len(arg_sets) == 1 and hasattr(arg_sets[0], "next"): - arg_sets = list(arg_sets[0]) - - argnames = kw.pop("argnames", None) - - exclusion_combinations = [] - - def _filter_exclusions(args): - result = [] - gathered_exclusions = [] - for a in args: - if isinstance(a, exclusions.compound): - gathered_exclusions.append(a) - else: - result.append(a) - - exclusion_combinations.extend( - [(exclusion, result) for exclusion in gathered_exclusions] - ) - return result - - id_ = kw.pop("id_", None) - - if id_: - _combination_id_fns = self._combination_id_fns - - # because itemgetter is not consistent for one argument vs. - # multiple, make it multiple in all cases and use a slice - # to omit the first argument - _arg_getter = operator.itemgetter( - 0, - *[ - idx - for idx, char in enumerate(id_) - if char in ("n", "r", "s", "a") - ] - ) - fns = [ - (operator.itemgetter(idx), _combination_id_fns[char]) - for idx, char in enumerate(id_) - if char in _combination_id_fns - ] - arg_sets = [ - pytest.param( - *_arg_getter(_filter_exclusions(arg))[1:], - id="-".join( - comb_fn(getter(arg)) for getter, comb_fn in fns - ) - ) - for arg in [ - (arg,) if not isinstance(arg, tuple) else arg - for arg in arg_sets - ] - ] - else: - # ensure using pytest.param so that even a 1-arg paramset - # still needs to be a tuple. otherwise paramtrize tries to - # interpret a single arg differently than tuple arg - arg_sets = [ - pytest.param(*_filter_exclusions(arg)) - for arg in [ - (arg,) if not isinstance(arg, tuple) else arg - for arg in arg_sets - ] - ] - - def decorate(fn): - if inspect.isclass(fn): - if "_sa_parametrize" not in fn.__dict__: - fn._sa_parametrize = [] - fn._sa_parametrize.append((argnames, arg_sets)) - return fn - else: - if argnames is None: - _argnames = getargspec(fn).args[1:] - else: - _argnames = argnames - - if exclusion_combinations: - for exclusion, combination in exclusion_combinations: - combination_by_kw = { - argname: val - for argname, val in zip(_argnames, combination) - } - exclusion = exclusion.with_combination( - **combination_by_kw - ) - fn = exclusion(fn) - return pytest.mark.parametrize(_argnames, arg_sets)(fn) - - return decorate - - def param_ident(self, *parameters): - ident = parameters[0] - return pytest.param(*parameters[1:], id=ident) - - def fixture(self, *arg, **kw): - return pytest.fixture(*arg, **kw) - - def get_current_test_name(self): - return os.environ.get("PYTEST_CURRENT_TEST") diff --git a/venv/lib/python3.7/site-packages/alembic/testing/requirements.py b/venv/lib/python3.7/site-packages/alembic/testing/requirements.py deleted file mode 100644 index 4804646..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/requirements.py +++ /dev/null @@ -1,159 +0,0 @@ -import sys - -from sqlalchemy.testing.requirements import Requirements - -from alembic import util -from alembic.testing import exclusions -from alembic.util import sqla_compat - - -class SuiteRequirements(Requirements): - @property - def schemas(self): - """Target database must support external schemas, and have one - named 'test_schema'.""" - - return exclusions.open() - - @property - def autocommit_isolation(self): - """target database should support 'AUTOCOMMIT' isolation level""" - - return exclusions.closed() - - @property - def unique_constraint_reflection(self): - def doesnt_have_check_uq_constraints(config): - from sqlalchemy import inspect - - # temporary - if config.db.name == "oracle": - return True - - insp = inspect(config.db) - try: - insp.get_unique_constraints("x") - except NotImplementedError: - return True - except TypeError: - return True - except Exception: - pass - return False - - return exclusions.skip_if(doesnt_have_check_uq_constraints) - - @property - def foreign_key_match(self): - return exclusions.open() - - @property - def check_constraints_w_enforcement(self): - """Target database must support check constraints - and also enforce them.""" - - return exclusions.open() - - @property - def reflects_pk_names(self): - return exclusions.closed() - - @property - def reflects_fk_options(self): - return exclusions.closed() - - @property - def sqlalchemy_issue_3740(self): - """Fixes percent sign escaping for paramstyles that don't require it""" - return exclusions.skip_if( - lambda config: not util.sqla_120, - "SQLAlchemy 1.2 or greater required", - ) - - @property - def sqlalchemy_12(self): - return exclusions.skip_if( - lambda config: not util.sqla_1216, - "SQLAlchemy 1.2.16 or greater required", - ) - - @property - def sqlalchemy_13(self): - return exclusions.skip_if( - lambda config: not util.sqla_13, - "SQLAlchemy 1.3 or greater required", - ) - - @property - def sqlalchemy_14(self): - return exclusions.skip_if( - lambda config: not util.sqla_14, - "SQLAlchemy 1.4 or greater required", - ) - - @property - def sqlalchemy_1115(self): - return exclusions.skip_if( - lambda config: not util.sqla_1115, - "SQLAlchemy 1.1.15 or greater required", - ) - - @property - def sqlalchemy_110(self): - return exclusions.skip_if( - lambda config: not util.sqla_110, - "SQLAlchemy 1.1.0 or greater required", - ) - - @property - def sqlalchemy_issue_4436(self): - def check(config): - vers = sqla_compat._vers - - if vers == (1, 3, 0, "b1"): - return True - elif vers >= (1, 2, 16): - return False - else: - return True - - return exclusions.skip_if( - check, "SQLAlchemy 1.2.16, 1.3.0b2 or greater required" - ) - - @property - def python3(self): - return exclusions.skip_if( - lambda: sys.version_info < (3,), "Python version 3.xx is required." - ) - - @property - def pep3147(self): - - return exclusions.only_if(lambda config: util.compat.has_pep3147()) - - @property - def comments(self): - return exclusions.only_if( - lambda config: sqla_compat._dialect_supports_comments( - config.db.dialect - ) - ) - - @property - def comments_api(self): - return exclusions.only_if(lambda config: util.sqla_120) - - @property - def alter_column(self): - return exclusions.open() - - @property - def computed_columns(self): - return exclusions.closed() - - @property - def computed_columns_api(self): - return exclusions.only_if( - exclusions.BooleanPredicate(sqla_compat.has_computed) - ) diff --git a/venv/lib/python3.7/site-packages/alembic/testing/util.py b/venv/lib/python3.7/site-packages/alembic/testing/util.py deleted file mode 100644 index 3e76645..0000000 --- a/venv/lib/python3.7/site-packages/alembic/testing/util.py +++ /dev/null @@ -1,97 +0,0 @@ -# testing/util.py -# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors -# -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import types - - -def flag_combinations(*combinations): - """A facade around @testing.combinations() oriented towards boolean - keyword-based arguments. - - Basically generates a nice looking identifier based on the keywords - and also sets up the argument names. - - E.g.:: - - @testing.flag_combinations( - dict(lazy=False, passive=False), - dict(lazy=True, passive=False), - dict(lazy=False, passive=True), - dict(lazy=False, passive=True, raiseload=True), - ) - - - would result in:: - - @testing.combinations( - ('', False, False, False), - ('lazy', True, False, False), - ('lazy_passive', True, True, False), - ('lazy_passive', True, True, True), - id_='iaaa', - argnames='lazy,passive,raiseload' - ) - - """ - from sqlalchemy.testing import config - - keys = set() - - for d in combinations: - keys.update(d) - - keys = sorted(keys) - - return config.combinations( - *[ - ("_".join(k for k in keys if d.get(k, False)),) - + tuple(d.get(k, False) for k in keys) - for d in combinations - ], - id_="i" + ("a" * len(keys)), - argnames=",".join(keys) - ) - - -def resolve_lambda(__fn, **kw): - """Given a no-arg lambda and a namespace, return a new lambda that - has all the values filled in. - - This is used so that we can have module-level fixtures that - refer to instance-level variables using lambdas. - - """ - - glb = dict(__fn.__globals__) - glb.update(kw) - new_fn = types.FunctionType(__fn.__code__, glb) - return new_fn() - - -def metadata_fixture(ddl="function"): - """Provide MetaData for a pytest fixture.""" - - from sqlalchemy.testing import config - from . import fixture_functions - - def decorate(fn): - def run_ddl(self): - from sqlalchemy import schema - - metadata = self.metadata = schema.MetaData() - try: - result = fn(self, metadata) - metadata.create_all(config.db) - # TODO: - # somehow get a per-function dml erase fixture here - yield result - finally: - metadata.drop_all(config.db) - - return fixture_functions.fixture(scope=ddl)(run_ddl) - - return decorate diff --git a/venv/lib/python3.7/site-packages/alembic/util/__init__.py b/venv/lib/python3.7/site-packages/alembic/util/__init__.py deleted file mode 100644 index cc86111..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -from .compat import raise_from_cause # noqa -from .exc import CommandError -from .langhelpers import _with_legacy_names # noqa -from .langhelpers import asbool # noqa -from .langhelpers import dedupe_tuple # noqa -from .langhelpers import Dispatcher # noqa -from .langhelpers import immutabledict # noqa -from .langhelpers import memoized_property # noqa -from .langhelpers import ModuleClsProxy # noqa -from .langhelpers import rev_id # noqa -from .langhelpers import to_list # noqa -from .langhelpers import to_tuple # noqa -from .langhelpers import unique_list # noqa -from .messaging import err # noqa -from .messaging import format_as_comma # noqa -from .messaging import msg # noqa -from .messaging import obfuscate_url_pw # noqa -from .messaging import status # noqa -from .messaging import warn # noqa -from .messaging import write_outstream # noqa -from .pyfiles import coerce_resource_to_filename # noqa -from .pyfiles import edit # noqa -from .pyfiles import load_python_file # noqa -from .pyfiles import pyc_file_from_path # noqa -from .pyfiles import template_to_file # noqa -from .sqla_compat import has_computed # noqa -from .sqla_compat import sqla_110 # noqa -from .sqla_compat import sqla_1115 # noqa -from .sqla_compat import sqla_120 # noqa -from .sqla_compat import sqla_1216 # noqa -from .sqla_compat import sqla_13 # noqa -from .sqla_compat import sqla_14 # noqa - - -if not sqla_110: - raise CommandError("SQLAlchemy 1.1.0 or greater is required. ") diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index bdd7dd2..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/compat.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/compat.cpython-37.pyc deleted file mode 100644 index 569487d..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/compat.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/exc.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/exc.cpython-37.pyc deleted file mode 100644 index dff328f..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/exc.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/langhelpers.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/langhelpers.cpython-37.pyc deleted file mode 100644 index 392f9bb..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/langhelpers.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/messaging.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/messaging.cpython-37.pyc deleted file mode 100644 index 65c05ed..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/messaging.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/pyfiles.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/pyfiles.cpython-37.pyc deleted file mode 100644 index f10fa89..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/pyfiles.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/sqla_compat.cpython-37.pyc b/venv/lib/python3.7/site-packages/alembic/util/__pycache__/sqla_compat.cpython-37.pyc deleted file mode 100644 index f7884ac..0000000 Binary files a/venv/lib/python3.7/site-packages/alembic/util/__pycache__/sqla_compat.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/alembic/util/compat.py b/venv/lib/python3.7/site-packages/alembic/util/compat.py deleted file mode 100644 index c9c2a58..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/compat.py +++ /dev/null @@ -1,371 +0,0 @@ -import collections -import inspect -import io -import sys - -py27 = sys.version_info >= (2, 7) -py2k = sys.version_info.major < 3 -py3k = sys.version_info.major >= 3 -py35 = sys.version_info >= (3, 5) -py36 = sys.version_info >= (3, 6) - - -ArgSpec = collections.namedtuple( - "ArgSpec", ["args", "varargs", "keywords", "defaults"] -) - - -def inspect_getargspec(func): - """getargspec based on fully vendored getfullargspec from Python 3.3.""" - - if inspect.ismethod(func): - func = func.__func__ - if not inspect.isfunction(func): - raise TypeError("{!r} is not a Python function".format(func)) - - co = func.__code__ - if not inspect.iscode(co): - raise TypeError("{!r} is not a code object".format(co)) - - nargs = co.co_argcount - names = co.co_varnames - nkwargs = co.co_kwonlyargcount if py3k else 0 - args = list(names[:nargs]) - - nargs += nkwargs - varargs = None - if co.co_flags & inspect.CO_VARARGS: - varargs = co.co_varnames[nargs] - nargs = nargs + 1 - varkw = None - if co.co_flags & inspect.CO_VARKEYWORDS: - varkw = co.co_varnames[nargs] - - return ArgSpec(args, varargs, varkw, func.__defaults__) - - -if py3k: - from io import StringIO -else: - # accepts strings - from StringIO import StringIO # noqa - -if py3k: - import builtins as compat_builtins - - string_types = (str,) - binary_type = bytes - text_type = str - - def callable(fn): # noqa - return hasattr(fn, "__call__") - - def u(s): - return s - - def ue(s): - return s - - range = range # noqa -else: - import __builtin__ as compat_builtins - - string_types = (basestring,) # noqa - binary_type = str - text_type = unicode # noqa - callable = callable # noqa - - def u(s): - return unicode(s, "utf-8") # noqa - - def ue(s): - return unicode(s, "unicode_escape") # noqa - - range = xrange # noqa - -if py3k: - import collections.abc as collections_abc -else: - import collections as collections_abc # noqa - -if py35: - - def _formatannotation(annotation, base_module=None): - """vendored from python 3.7 - """ - - if getattr(annotation, "__module__", None) == "typing": - return repr(annotation).replace("typing.", "") - if isinstance(annotation, type): - if annotation.__module__ in ("builtins", base_module): - return annotation.__qualname__ - return annotation.__module__ + "." + annotation.__qualname__ - return repr(annotation) - - def inspect_formatargspec( - args, - varargs=None, - varkw=None, - defaults=None, - kwonlyargs=(), - kwonlydefaults={}, - annotations={}, - formatarg=str, - formatvarargs=lambda name: "*" + name, - formatvarkw=lambda name: "**" + name, - formatvalue=lambda value: "=" + repr(value), - formatreturns=lambda text: " -> " + text, - formatannotation=_formatannotation, - ): - """Copy formatargspec from python 3.7 standard library. - - Python 3 has deprecated formatargspec and requested that Signature - be used instead, however this requires a full reimplementation - of formatargspec() in terms of creating Parameter objects and such. - Instead of introducing all the object-creation overhead and having - to reinvent from scratch, just copy their compatibility routine. - - """ - - def formatargandannotation(arg): - result = formatarg(arg) - if arg in annotations: - result += ": " + formatannotation(annotations[arg]) - return result - - specs = [] - if defaults: - firstdefault = len(args) - len(defaults) - for i, arg in enumerate(args): - spec = formatargandannotation(arg) - if defaults and i >= firstdefault: - spec = spec + formatvalue(defaults[i - firstdefault]) - specs.append(spec) - if varargs is not None: - specs.append(formatvarargs(formatargandannotation(varargs))) - else: - if kwonlyargs: - specs.append("*") - if kwonlyargs: - for kwonlyarg in kwonlyargs: - spec = formatargandannotation(kwonlyarg) - if kwonlydefaults and kwonlyarg in kwonlydefaults: - spec += formatvalue(kwonlydefaults[kwonlyarg]) - specs.append(spec) - if varkw is not None: - specs.append(formatvarkw(formatargandannotation(varkw))) - result = "(" + ", ".join(specs) + ")" - if "return" in annotations: - result += formatreturns(formatannotation(annotations["return"])) - return result - - -else: - from inspect import formatargspec as inspect_formatargspec # noqa - - -if py3k: - from configparser import ConfigParser as SafeConfigParser - import configparser -else: - from ConfigParser import SafeConfigParser # noqa - import ConfigParser as configparser # noqa - -if py2k: - from mako.util import parse_encoding - -if py35: - import importlib.util - import importlib.machinery - - def load_module_py(module_id, path): - spec = importlib.util.spec_from_file_location(module_id, path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - - def load_module_pyc(module_id, path): - spec = importlib.util.spec_from_file_location(module_id, path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - return module - - -elif py3k: - import importlib.machinery - - def load_module_py(module_id, path): - module = importlib.machinery.SourceFileLoader( - module_id, path - ).load_module(module_id) - del sys.modules[module_id] - return module - - def load_module_pyc(module_id, path): - module = importlib.machinery.SourcelessFileLoader( - module_id, path - ).load_module(module_id) - del sys.modules[module_id] - return module - - -if py3k: - - def get_bytecode_suffixes(): - try: - return importlib.machinery.BYTECODE_SUFFIXES - except AttributeError: - return importlib.machinery.DEBUG_BYTECODE_SUFFIXES - - def get_current_bytecode_suffixes(): - if py35: - suffixes = importlib.machinery.BYTECODE_SUFFIXES - else: - if sys.flags.optimize: - suffixes = importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES - else: - suffixes = importlib.machinery.BYTECODE_SUFFIXES - - return suffixes - - def has_pep3147(): - - if py35: - return True - else: - # TODO: not sure if we are supporting old versions of Python - # the import here emits a deprecation warning which the test - # suite only catches if imp wasn't imported alreadt - # http://www.python.org/dev/peps/pep-3147/#detecting-pep-3147-availability - import imp - - return hasattr(imp, "get_tag") - - -else: - import imp - - def load_module_py(module_id, path): # noqa - with open(path, "rb") as fp: - mod = imp.load_source(module_id, path, fp) - if py2k: - source_encoding = parse_encoding(fp) - if source_encoding: - mod._alembic_source_encoding = source_encoding - del sys.modules[module_id] - return mod - - def load_module_pyc(module_id, path): # noqa - with open(path, "rb") as fp: - mod = imp.load_compiled(module_id, path, fp) - # no source encoding here - del sys.modules[module_id] - return mod - - def get_current_bytecode_suffixes(): - if sys.flags.optimize: - return [".pyo"] # e.g. .pyo - else: - return [".pyc"] # e.g. .pyc - - def has_pep3147(): - return False - - -try: - exec_ = getattr(compat_builtins, "exec") -except AttributeError: - # Python 2 - def exec_(func_text, globals_, lcl): - exec("exec func_text in globals_, lcl") - - -################################################ -# cross-compatible metaclass implementation -# Copyright (c) 2010-2012 Benjamin Peterson - - -def with_metaclass(meta, base=object): - """Create a base class with a metaclass.""" - return meta("%sBase" % meta.__name__, (base,), {}) - - -################################################ - -if py3k: - - def reraise(tp, value, tb=None, cause=None): - if cause is not None: - value.__cause__ = cause - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - def raise_from_cause(exception, exc_info=None): - if exc_info is None: - exc_info = sys.exc_info() - exc_type, exc_value, exc_tb = exc_info - reraise(type(exception), exception, tb=exc_tb, cause=exc_value) - - -else: - exec( - "def reraise(tp, value, tb=None, cause=None):\n" - " raise tp, value, tb\n" - ) - - def raise_from_cause(exception, exc_info=None): - # not as nice as that of Py3K, but at least preserves - # the code line where the issue occurred - if exc_info is None: - exc_info = sys.exc_info() - exc_type, exc_value, exc_tb = exc_info - reraise(type(exception), exception, tb=exc_tb) - - -# produce a wrapper that allows encoded text to stream -# into a given buffer, but doesn't close it. -# not sure of a more idiomatic approach to this. -class EncodedIO(io.TextIOWrapper): - def close(self): - pass - - -if py2k: - # in Py2K, the io.* package is awkward because it does not - # easily wrap the file type (e.g. sys.stdout) and I can't - # figure out at all how to wrap StringIO.StringIO - # and also might be user specified too. So create a full - # adapter. - - class ActLikePy3kIO(object): - - """Produce an object capable of wrapping either - sys.stdout (e.g. file) *or* StringIO.StringIO(). - - """ - - def _false(self): - return False - - def _true(self): - return True - - readable = seekable = _false - writable = _true - closed = False - - def __init__(self, file_): - self.file_ = file_ - - def write(self, text): - return self.file_.write(text) - - def flush(self): - return self.file_.flush() - - class EncodedIO(EncodedIO): - def __init__(self, file_, encoding): - super(EncodedIO, self).__init__( - ActLikePy3kIO(file_), encoding=encoding - ) diff --git a/venv/lib/python3.7/site-packages/alembic/util/exc.py b/venv/lib/python3.7/site-packages/alembic/util/exc.py deleted file mode 100644 index f7ad021..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/exc.py +++ /dev/null @@ -1,2 +0,0 @@ -class CommandError(Exception): - pass diff --git a/venv/lib/python3.7/site-packages/alembic/util/langhelpers.py b/venv/lib/python3.7/site-packages/alembic/util/langhelpers.py deleted file mode 100644 index bb9c8f5..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/langhelpers.py +++ /dev/null @@ -1,320 +0,0 @@ -import collections -import textwrap -import uuid -import warnings - -from .compat import callable -from .compat import collections_abc -from .compat import exec_ -from .compat import inspect_getargspec -from .compat import string_types -from .compat import with_metaclass - - -class _ModuleClsMeta(type): - def __setattr__(cls, key, value): - super(_ModuleClsMeta, cls).__setattr__(key, value) - cls._update_module_proxies(key) - - -class ModuleClsProxy(with_metaclass(_ModuleClsMeta)): - """Create module level proxy functions for the - methods on a given class. - - The functions will have a compatible signature - as the methods. - - """ - - _setups = collections.defaultdict(lambda: (set(), [])) - - @classmethod - def _update_module_proxies(cls, name): - attr_names, modules = cls._setups[cls] - for globals_, locals_ in modules: - cls._add_proxied_attribute(name, globals_, locals_, attr_names) - - def _install_proxy(self): - attr_names, modules = self._setups[self.__class__] - for globals_, locals_ in modules: - globals_["_proxy"] = self - for attr_name in attr_names: - globals_[attr_name] = getattr(self, attr_name) - - def _remove_proxy(self): - attr_names, modules = self._setups[self.__class__] - for globals_, locals_ in modules: - globals_["_proxy"] = None - for attr_name in attr_names: - del globals_[attr_name] - - @classmethod - def create_module_class_proxy(cls, globals_, locals_): - attr_names, modules = cls._setups[cls] - modules.append((globals_, locals_)) - cls._setup_proxy(globals_, locals_, attr_names) - - @classmethod - def _setup_proxy(cls, globals_, locals_, attr_names): - for methname in dir(cls): - cls._add_proxied_attribute(methname, globals_, locals_, attr_names) - - @classmethod - def _add_proxied_attribute(cls, methname, globals_, locals_, attr_names): - if not methname.startswith("_"): - meth = getattr(cls, methname) - if callable(meth): - locals_[methname] = cls._create_method_proxy( - methname, globals_, locals_ - ) - else: - attr_names.add(methname) - - @classmethod - def _create_method_proxy(cls, name, globals_, locals_): - fn = getattr(cls, name) - - def _name_error(name): - raise NameError( - "Can't invoke function '%s', as the proxy object has " - "not yet been " - "established for the Alembic '%s' class. " - "Try placing this code inside a callable." - % (name, cls.__name__) - ) - - globals_["_name_error"] = _name_error - - translations = getattr(fn, "_legacy_translations", []) - if translations: - spec = inspect_getargspec(fn) - if spec[0] and spec[0][0] == "self": - spec[0].pop(0) - - outer_args = inner_args = "*args, **kw" - translate_str = "args, kw = _translate(%r, %r, %r, args, kw)" % ( - fn.__name__, - tuple(spec), - translations, - ) - - def translate(fn_name, spec, translations, args, kw): - return_kw = {} - return_args = [] - - for oldname, newname in translations: - if oldname in kw: - warnings.warn( - "Argument %r is now named %r " - "for method %s()." % (oldname, newname, fn_name) - ) - return_kw[newname] = kw.pop(oldname) - return_kw.update(kw) - - args = list(args) - if spec[3]: - pos_only = spec[0][: -len(spec[3])] - else: - pos_only = spec[0] - for arg in pos_only: - if arg not in return_kw: - try: - return_args.append(args.pop(0)) - except IndexError: - raise TypeError( - "missing required positional argument: %s" - % arg - ) - return_args.extend(args) - - return return_args, return_kw - - globals_["_translate"] = translate - else: - outer_args = "*args, **kw" - inner_args = "*args, **kw" - translate_str = "" - - func_text = textwrap.dedent( - """\ - def %(name)s(%(args)s): - %(doc)r - %(translate)s - try: - p = _proxy - except NameError: - _name_error('%(name)s') - return _proxy.%(name)s(%(apply_kw)s) - e - """ - % { - "name": name, - "translate": translate_str, - "args": outer_args, - "apply_kw": inner_args, - "doc": fn.__doc__, - } - ) - lcl = {} - exec_(func_text, globals_, lcl) - return lcl[name] - - -def _with_legacy_names(translations): - def decorate(fn): - fn._legacy_translations = translations - return fn - - return decorate - - -def asbool(value): - return value is not None and value.lower() == "true" - - -def rev_id(): - return uuid.uuid4().hex[-12:] - - -def to_list(x, default=None): - if x is None: - return default - elif isinstance(x, string_types): - return [x] - elif isinstance(x, collections_abc.Iterable): - return list(x) - else: - return [x] - - -def to_tuple(x, default=None): - if x is None: - return default - elif isinstance(x, string_types): - return (x,) - elif isinstance(x, collections_abc.Iterable): - return tuple(x) - else: - return (x,) - - -def unique_list(seq, hashfunc=None): - seen = set() - seen_add = seen.add - if not hashfunc: - return [x for x in seq if x not in seen and not seen_add(x)] - else: - return [ - x - for x in seq - if hashfunc(x) not in seen and not seen_add(hashfunc(x)) - ] - - -def dedupe_tuple(tup): - return tuple(unique_list(tup)) - - -class memoized_property(object): - - """A read-only @property that is only evaluated once.""" - - def __init__(self, fget, doc=None): - self.fget = fget - self.__doc__ = doc or fget.__doc__ - self.__name__ = fget.__name__ - - def __get__(self, obj, cls): - if obj is None: - return self - obj.__dict__[self.__name__] = result = self.fget(obj) - return result - - -class immutabledict(dict): - def _immutable(self, *arg, **kw): - raise TypeError("%s object is immutable" % self.__class__.__name__) - - __delitem__ = ( - __setitem__ - ) = __setattr__ = clear = pop = popitem = setdefault = update = _immutable - - def __new__(cls, *args): - new = dict.__new__(cls) - dict.__init__(new, *args) - return new - - def __init__(self, *args): - pass - - def __reduce__(self): - return immutabledict, (dict(self),) - - def union(self, d): - if not self: - return immutabledict(d) - else: - d2 = immutabledict(self) - dict.update(d2, d) - return d2 - - def __repr__(self): - return "immutabledict(%s)" % dict.__repr__(self) - - -class Dispatcher(object): - def __init__(self, uselist=False): - self._registry = {} - self.uselist = uselist - - def dispatch_for(self, target, qualifier="default"): - def decorate(fn): - if self.uselist: - self._registry.setdefault((target, qualifier), []).append(fn) - else: - assert (target, qualifier) not in self._registry - self._registry[(target, qualifier)] = fn - return fn - - return decorate - - def dispatch(self, obj, qualifier="default"): - - if isinstance(obj, string_types): - targets = [obj] - elif isinstance(obj, type): - targets = obj.__mro__ - else: - targets = type(obj).__mro__ - - for spcls in targets: - if qualifier != "default" and (spcls, qualifier) in self._registry: - return self._fn_or_list(self._registry[(spcls, qualifier)]) - elif (spcls, "default") in self._registry: - return self._fn_or_list(self._registry[(spcls, "default")]) - else: - raise ValueError("no dispatch function for object: %s" % obj) - - def _fn_or_list(self, fn_or_list): - if self.uselist: - - def go(*arg, **kw): - for fn in fn_or_list: - fn(*arg, **kw) - - return go - else: - return fn_or_list - - def branch(self): - """Return a copy of this dispatcher that is independently - writable.""" - - d = Dispatcher() - if self.uselist: - d._registry.update( - (k, [fn for fn in self._registry[k]]) for k in self._registry - ) - else: - d._registry.update(self._registry) - return d diff --git a/venv/lib/python3.7/site-packages/alembic/util/messaging.py b/venv/lib/python3.7/site-packages/alembic/util/messaging.py deleted file mode 100644 index 65b92c8..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/messaging.py +++ /dev/null @@ -1,101 +0,0 @@ -import logging -import sys -import textwrap -import warnings - -from sqlalchemy.engine import url - -from .compat import binary_type -from .compat import collections_abc -from .compat import py27 -from .compat import string_types - -log = logging.getLogger(__name__) - -if py27: - # disable "no handler found" errors - logging.getLogger("alembic").addHandler(logging.NullHandler()) - - -try: - import fcntl - import termios - import struct - - ioctl = fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)) - _h, TERMWIDTH, _hp, _wp = struct.unpack("HHHH", ioctl) - if TERMWIDTH <= 0: # can occur if running in emacs pseudo-tty - TERMWIDTH = None -except (ImportError, IOError): - TERMWIDTH = None - - -def write_outstream(stream, *text): - encoding = getattr(stream, "encoding", "ascii") or "ascii" - for t in text: - if not isinstance(t, binary_type): - t = t.encode(encoding, "replace") - t = t.decode(encoding) - try: - stream.write(t) - except IOError: - # suppress "broken pipe" errors. - # no known way to handle this on Python 3 however - # as the exception is "ignored" (noisily) in TextIOWrapper. - break - - -def status(_statmsg, fn, *arg, **kw): - newline = kw.pop("newline", False) - msg(_statmsg + " ...", newline, True) - try: - ret = fn(*arg, **kw) - write_outstream(sys.stdout, " done\n") - return ret - except: - write_outstream(sys.stdout, " FAILED\n") - raise - - -def err(message): - log.error(message) - msg("FAILED: %s" % message) - sys.exit(-1) - - -def obfuscate_url_pw(u): - u = url.make_url(u) - if u.password: - u.password = "XXXXX" - return str(u) - - -def warn(msg, stacklevel=2): - warnings.warn(msg, UserWarning, stacklevel=stacklevel) - - -def msg(msg, newline=True, flush=False): - if TERMWIDTH is None: - write_outstream(sys.stdout, msg) - if newline: - write_outstream(sys.stdout, "\n") - else: - # left indent output lines - lines = textwrap.wrap(msg, TERMWIDTH) - if len(lines) > 1: - for line in lines[0:-1]: - write_outstream(sys.stdout, " ", line, "\n") - write_outstream(sys.stdout, " ", lines[-1], ("\n" if newline else "")) - if flush: - sys.stdout.flush() - - -def format_as_comma(value): - if value is None: - return "" - elif isinstance(value, string_types): - return value - elif isinstance(value, collections_abc.Iterable): - return ", ".join(value) - else: - raise ValueError("Don't know how to comma-format %r" % value) diff --git a/venv/lib/python3.7/site-packages/alembic/util/pyfiles.py b/venv/lib/python3.7/site-packages/alembic/util/pyfiles.py deleted file mode 100644 index 013b147..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/pyfiles.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -import re -import tempfile - -from mako import exceptions -from mako.template import Template - -from .compat import get_current_bytecode_suffixes -from .compat import has_pep3147 -from .compat import load_module_py -from .compat import load_module_pyc -from .compat import py35 -from .exc import CommandError - - -def template_to_file(template_file, dest, output_encoding, **kw): - template = Template(filename=template_file) - try: - output = template.render_unicode(**kw).encode(output_encoding) - except: - with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as ntf: - ntf.write( - exceptions.text_error_template() - .render_unicode() - .encode(output_encoding) - ) - fname = ntf.name - raise CommandError( - "Template rendering failed; see %s for a " - "template-oriented traceback." % fname - ) - else: - with open(dest, "wb") as f: - f.write(output) - - -def coerce_resource_to_filename(fname): - """Interpret a filename as either a filesystem location or as a package - resource. - - Names that are non absolute paths and contain a colon - are interpreted as resources and coerced to a file location. - - """ - if not os.path.isabs(fname) and ":" in fname: - import pkg_resources - - fname = pkg_resources.resource_filename(*fname.split(":")) - return fname - - -def pyc_file_from_path(path): - """Given a python source path, locate the .pyc. - - """ - - if has_pep3147(): - if py35: - import importlib - - candidate = importlib.util.cache_from_source(path) - else: - import imp - - candidate = imp.cache_from_source(path) - if os.path.exists(candidate): - return candidate - - # even for pep3147, fall back to the old way of finding .pyc files, - # to support sourceless operation - filepath, ext = os.path.splitext(path) - for ext in get_current_bytecode_suffixes(): - if os.path.exists(filepath + ext): - return filepath + ext - else: - return None - - -def edit(path): - """Given a source path, run the EDITOR for it""" - - import editor - - try: - editor.edit(path) - except Exception as exc: - raise CommandError("Error executing editor (%s)" % (exc,)) - - -def load_python_file(dir_, filename): - """Load a file from the given path as a Python module.""" - - module_id = re.sub(r"\W", "_", filename) - path = os.path.join(dir_, filename) - _, ext = os.path.splitext(filename) - if ext == ".py": - if os.path.exists(path): - module = load_module_py(module_id, path) - else: - pyc_path = pyc_file_from_path(path) - if pyc_path is None: - raise ImportError("Can't find Python file %s" % path) - else: - module = load_module_pyc(module_id, pyc_path) - elif ext in (".pyc", ".pyo"): - module = load_module_pyc(module_id, path) - return module diff --git a/venv/lib/python3.7/site-packages/alembic/util/sqla_compat.py b/venv/lib/python3.7/site-packages/alembic/util/sqla_compat.py deleted file mode 100644 index d303033..0000000 --- a/venv/lib/python3.7/site-packages/alembic/util/sqla_compat.py +++ /dev/null @@ -1,305 +0,0 @@ -import re - -from sqlalchemy import __version__ -from sqlalchemy import inspect -from sqlalchemy import schema -from sqlalchemy import sql -from sqlalchemy import types as sqltypes -from sqlalchemy.ext.compiler import compiles -from sqlalchemy.schema import CheckConstraint -from sqlalchemy.schema import Column -from sqlalchemy.schema import ForeignKeyConstraint -from sqlalchemy.sql.elements import quoted_name -from sqlalchemy.sql.expression import _BindParamClause -from sqlalchemy.sql.expression import _TextClause as TextClause -from sqlalchemy.sql.visitors import traverse - -from . import compat - - -def _safe_int(value): - try: - return int(value) - except: - return value - - -_vers = tuple( - [_safe_int(x) for x in re.findall(r"(\d+|[abc]\d)", __version__)] -) -sqla_110 = _vers >= (1, 1, 0) -sqla_1115 = _vers >= (1, 1, 15) -sqla_120 = _vers >= (1, 2, 0) -sqla_1216 = _vers >= (1, 2, 16) -sqla_13 = _vers >= (1, 3) -sqla_14 = _vers >= (1, 4) -try: - from sqlalchemy import Computed # noqa - - has_computed = True - - has_computed_reflection = _vers >= (1, 3, 16) -except ImportError: - has_computed = False - has_computed_reflection = False - -AUTOINCREMENT_DEFAULT = "auto" - - -def _connectable_has_table(connectable, tablename, schemaname): - if sqla_14: - return inspect(connectable).has_table(tablename, schemaname) - else: - return connectable.dialect.has_table( - connectable, tablename, schemaname - ) - - -def _exec_on_inspector(inspector, statement, **params): - if sqla_14: - with inspector._operation_context() as conn: - return conn.execute(statement, params) - else: - return inspector.bind.execute(statement, params) - - -def _server_default_is_computed(column): - if not has_computed: - return False - else: - return isinstance(column.computed, Computed) - - -def _table_for_constraint(constraint): - if isinstance(constraint, ForeignKeyConstraint): - return constraint.parent - else: - return constraint.table - - -def _columns_for_constraint(constraint): - if isinstance(constraint, ForeignKeyConstraint): - return [fk.parent for fk in constraint.elements] - elif isinstance(constraint, CheckConstraint): - return _find_columns(constraint.sqltext) - else: - return list(constraint.columns) - - -def _fk_spec(constraint): - source_columns = [ - constraint.columns[key].name for key in constraint.column_keys - ] - - source_table = constraint.parent.name - source_schema = constraint.parent.schema - target_schema = constraint.elements[0].column.table.schema - target_table = constraint.elements[0].column.table.name - target_columns = [element.column.name for element in constraint.elements] - ondelete = constraint.ondelete - onupdate = constraint.onupdate - deferrable = constraint.deferrable - initially = constraint.initially - return ( - source_schema, - source_table, - source_columns, - target_schema, - target_table, - target_columns, - onupdate, - ondelete, - deferrable, - initially, - ) - - -def _fk_is_self_referential(constraint): - spec = constraint.elements[0]._get_colspec() - tokens = spec.split(".") - tokens.pop(-1) # colname - tablekey = ".".join(tokens) - return tablekey == constraint.parent.key - - -def _is_type_bound(constraint): - # this deals with SQLAlchemy #3260, don't copy CHECK constraints - # that will be generated by the type. - # new feature added for #3260 - return constraint._type_bound - - -def _find_columns(clause): - """locate Column objects within the given expression.""" - - cols = set() - traverse(clause, {}, {"column": cols.add}) - return cols - - -def _remove_column_from_collection(collection, column): - """remove a column from a ColumnCollection.""" - - # workaround for older SQLAlchemy, remove the - # same object that's present - to_remove = collection[column.key] - collection.remove(to_remove) - - -def _textual_index_column(table, text_): - """a workaround for the Index construct's severe lack of flexibility""" - if isinstance(text_, compat.string_types): - c = Column(text_, sqltypes.NULLTYPE) - table.append_column(c) - return c - elif isinstance(text_, TextClause): - return _textual_index_element(table, text_) - else: - raise ValueError("String or text() construct expected") - - -class _textual_index_element(sql.ColumnElement): - """Wrap around a sqlalchemy text() construct in such a way that - we appear like a column-oriented SQL expression to an Index - construct. - - The issue here is that currently the Postgresql dialect, the biggest - recipient of functional indexes, keys all the index expressions to - the corresponding column expressions when rendering CREATE INDEX, - so the Index we create here needs to have a .columns collection that - is the same length as the .expressions collection. Ultimately - SQLAlchemy should support text() expressions in indexes. - - See SQLAlchemy issue 3174. - - """ - - __visit_name__ = "_textual_idx_element" - - def __init__(self, table, text): - self.table = table - self.text = text - self.key = text.text - self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE) - table.append_column(self.fake_column) - - def get_children(self): - return [self.fake_column] - - -@compiles(_textual_index_element) -def _render_textual_index_column(element, compiler, **kw): - return compiler.process(element.text, **kw) - - -class _literal_bindparam(_BindParamClause): - pass - - -@compiles(_literal_bindparam) -def _render_literal_bindparam(element, compiler, **kw): - return compiler.render_literal_bindparam(element, **kw) - - -def _get_index_expressions(idx): - return list(idx.expressions) - - -def _get_index_column_names(idx): - return [getattr(exp, "name", None) for exp in _get_index_expressions(idx)] - - -def _column_kwargs(col): - if sqla_13: - return col.kwargs - else: - return {} - - -def _get_constraint_final_name(constraint, dialect): - if constraint.name is None: - return None - elif sqla_14: - # for SQLAlchemy 1.4 we would like to have the option to expand - # the use of "deferred" names for constraints as well as to have - # some flexibility with "None" name and similar; make use of new - # SQLAlchemy API to return what would be the final compiled form of - # the name for this dialect. - return dialect.identifier_preparer.format_constraint( - constraint, _alembic_quote=False - ) - else: - - # prior to SQLAlchemy 1.4, work around quoting logic to get at the - # final compiled name without quotes. - if hasattr(constraint.name, "quote"): - # might be quoted_name, might be truncated_name, keep it the - # same - quoted_name_cls = type(constraint.name) - else: - quoted_name_cls = quoted_name - - new_name = quoted_name_cls(str(constraint.name), quote=False) - constraint = constraint.__class__(name=new_name) - - if isinstance(constraint, schema.Index): - # name should not be quoted. - return dialect.ddl_compiler(dialect, None)._prepared_index_name( - constraint - ) - else: - # name should not be quoted. - return dialect.identifier_preparer.format_constraint(constraint) - - -def _constraint_is_named(constraint, dialect): - if sqla_14: - if constraint.name is None: - return False - name = dialect.identifier_preparer.format_constraint( - constraint, _alembic_quote=False - ) - return name is not None - else: - return constraint.name is not None - - -def _dialect_supports_comments(dialect): - if sqla_120: - return dialect.supports_comments - else: - return False - - -def _comment_attribute(obj): - """return the .comment attribute from a Table or Column""" - - if sqla_120: - return obj.comment - else: - return None - - -def _is_mariadb(mysql_dialect): - return ( - mysql_dialect.server_version_info - and "MariaDB" in mysql_dialect.server_version_info - ) - - -def _mariadb_normalized_version_info(mysql_dialect): - if len(mysql_dialect.server_version_info) > 5: - return mysql_dialect.server_version_info[3:] - else: - return mysql_dialect.server_version_info - - -if sqla_14: - from sqlalchemy import create_mock_engine -else: - from sqlalchemy import create_engine - - def create_mock_engine(url, executor): - return create_engine( - "postgresql://", strategy="mock", executor=executor - ) diff --git a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/LICENSE.rst b/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/LICENSE.rst deleted file mode 100644 index d12a849..0000000 --- a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2014 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/METADATA b/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/METADATA deleted file mode 100644 index fa4536d..0000000 --- a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/METADATA +++ /dev/null @@ -1,102 +0,0 @@ -Metadata-Version: 2.1 -Name: click -Version: 7.1.1 -Summary: Composable command line interface toolkit -Home-page: https://palletsprojects.com/p/click/ -Maintainer: Pallets -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://click.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/click -Project-URL: Issue tracker, https://github.com/pallets/click/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 3 -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* - -\$ click\_ -========== - -Click is a Python package for creating beautiful command line interfaces -in a composable way with as little code as necessary. It's the "Command -Line Interface Creation Kit". It's highly configurable but comes with -sensible defaults out of the box. - -It aims to make the process of writing command line tools quick and fun -while also preventing any frustration caused by the inability to -implement an intended CLI API. - -Click in three points: - -- Arbitrary nesting of commands -- Automatic help page generation -- Supports lazy loading of subcommands at runtime - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - $ pip install -U click - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -A Simple Example ----------------- - -.. code-block:: python - - import click - - @click.command() - @click.option("--count", default=1, help="Number of greetings.") - @click.option("--name", prompt="Your name", help="The person to greet.") - def hello(count, name): - """Simple program that greets NAME for a total of COUNT times.""" - for _ in range(count): - click.echo(f"Hello, {name}!") - - if __name__ == '__main__': - hello() - -.. code-block:: text - - $ python hello.py --count=3 - Your name: Click - Hello, Click! - Hello, Click! - Hello, Click! - - -Donate ------- - -The Pallets organization develops and supports Click and other popular -packages. In order to grow the community of contributors and users, and -allow the maintainers to devote more time to the projects, `please -donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -- Website: https://palletsprojects.com/p/click/ -- Documentation: https://click.palletsprojects.com/ -- Releases: https://pypi.org/project/click/ -- Code: https://github.com/pallets/click -- Issue tracker: https://github.com/pallets/click/issues -- Test status: https://dev.azure.com/pallets/click/_build -- Official chat: https://discord.gg/t6rrQZH - - diff --git a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/RECORD b/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/RECORD deleted file mode 100644 index c52207f..0000000 --- a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/RECORD +++ /dev/null @@ -1,40 +0,0 @@ -click-7.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -click-7.1.1.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475 -click-7.1.1.dist-info/METADATA,sha256=qGBq4nyx59fI9CN-NY-C_ye4USndxpKszWFLe5KMhQM,2868 -click-7.1.1.dist-info/RECORD,, -click-7.1.1.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 -click-7.1.1.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 -click/__init__.py,sha256=_Mora-ZWjo_kRK3mg_vX3ZmQV3pop8vrCLj-C209TvQ,2463 -click/__pycache__/__init__.cpython-37.pyc,, -click/__pycache__/_bashcomplete.cpython-37.pyc,, -click/__pycache__/_compat.cpython-37.pyc,, -click/__pycache__/_termui_impl.cpython-37.pyc,, -click/__pycache__/_textwrap.cpython-37.pyc,, -click/__pycache__/_unicodefun.cpython-37.pyc,, -click/__pycache__/_winconsole.cpython-37.pyc,, -click/__pycache__/core.cpython-37.pyc,, -click/__pycache__/decorators.cpython-37.pyc,, -click/__pycache__/exceptions.cpython-37.pyc,, -click/__pycache__/formatting.cpython-37.pyc,, -click/__pycache__/globals.cpython-37.pyc,, -click/__pycache__/parser.cpython-37.pyc,, -click/__pycache__/termui.cpython-37.pyc,, -click/__pycache__/testing.cpython-37.pyc,, -click/__pycache__/types.cpython-37.pyc,, -click/__pycache__/utils.cpython-37.pyc,, -click/_bashcomplete.py,sha256=9J98IHQYmCAr2Jup6TDshUr5FJEen-AoQCZR0K5nKxQ,12309 -click/_compat.py,sha256=CCA3QaccPgx3TL3biRljHNnqELqCSMr3wPIe1kXaOcQ,24257 -click/_termui_impl.py,sha256=w2Fgse5XiOSjV72IZLBKG0loK_Q1oogvh9e0spJpRAY,20793 -click/_textwrap.py,sha256=ajCzkzFly5tjm9foQ5N9_MOeaYJMBjAltuFa69n4iXY,1197 -click/_unicodefun.py,sha256=apLSNEBZgUsQNPMUv072zJ1swqnm0dYVT5TqcIWTt6w,4201 -click/_winconsole.py,sha256=6YDu6Rq1Wxx4w9uinBMK2LHvP83aerZM9GQurlk3QDo,10010 -click/core.py,sha256=V6DJzastGhrC6WTDwV9MSLwcJUdX2Uf1ypmgkjBdn_Y,77650 -click/decorators.py,sha256=3TvEO_BkaHl7k6Eh1G5eC7JK4LKPdpFqH9JP0QDyTlM,11215 -click/exceptions.py,sha256=3pQAyyMFzx5A3eV0Y27WtDTyGogZRbrC6_o5DjjKBbw,8118 -click/formatting.py,sha256=Wb4gqFEpWaKPgAbOvnkCl8p-bEZx5KpM5ZSByhlnJNk,9281 -click/globals.py,sha256=ht7u2kUGI08pAarB4e4yC8Lkkxy6gJfRZyzxEj8EbWQ,1501 -click/parser.py,sha256=mFK-k58JtPpqO0AC36WAr0t5UfzEw1mvgVSyn7WCe9M,15691 -click/termui.py,sha256=G7QBEKIepRIGLvNdGwBTYiEtSImRxvTO_AglVpyHH2s,23998 -click/testing.py,sha256=EUEsDUqNXFgCLhZ0ZFOROpaVDA5I_rijwnNPE6qICgA,12854 -click/types.py,sha256=wuubik4VqgqAw5dvbYFkDt-zSAx97y9TQXuXcVaRyQA,25045 -click/utils.py,sha256=4VEcJ7iEHwjnFuzEuRtkT99o5VG3zqSD7Q2CVzv13WU,15940 diff --git a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/WHEEL b/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/WHEEL deleted file mode 100644 index ef99c6c..0000000 --- a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.34.2) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/top_level.txt deleted file mode 100644 index dca9a90..0000000 --- a/venv/lib/python3.7/site-packages/click-7.1.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -click diff --git a/venv/lib/python3.7/site-packages/click/__init__.py b/venv/lib/python3.7/site-packages/click/__init__.py deleted file mode 100644 index 3910b80..0000000 --- a/venv/lib/python3.7/site-packages/click/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -""" -Click is a simple Python module inspired by the stdlib optparse to make -writing command line scripts fun. Unlike other modules, it's based -around a simple API that does not come with too much magic and is -composable. -""" -from .core import Argument -from .core import BaseCommand -from .core import Command -from .core import CommandCollection -from .core import Context -from .core import Group -from .core import MultiCommand -from .core import Option -from .core import Parameter -from .decorators import argument -from .decorators import command -from .decorators import confirmation_option -from .decorators import group -from .decorators import help_option -from .decorators import make_pass_decorator -from .decorators import option -from .decorators import pass_context -from .decorators import pass_obj -from .decorators import password_option -from .decorators import version_option -from .exceptions import Abort -from .exceptions import BadArgumentUsage -from .exceptions import BadOptionUsage -from .exceptions import BadParameter -from .exceptions import ClickException -from .exceptions import FileError -from .exceptions import MissingParameter -from .exceptions import NoSuchOption -from .exceptions import UsageError -from .formatting import HelpFormatter -from .formatting import wrap_text -from .globals import get_current_context -from .parser import OptionParser -from .termui import clear -from .termui import confirm -from .termui import echo_via_pager -from .termui import edit -from .termui import get_terminal_size -from .termui import getchar -from .termui import launch -from .termui import pause -from .termui import progressbar -from .termui import prompt -from .termui import secho -from .termui import style -from .termui import unstyle -from .types import BOOL -from .types import Choice -from .types import DateTime -from .types import File -from .types import FLOAT -from .types import FloatRange -from .types import INT -from .types import IntRange -from .types import ParamType -from .types import Path -from .types import STRING -from .types import Tuple -from .types import UNPROCESSED -from .types import UUID -from .utils import echo -from .utils import format_filename -from .utils import get_app_dir -from .utils import get_binary_stream -from .utils import get_os_args -from .utils import get_text_stream -from .utils import open_file - -# Controls if click should emit the warning about the use of unicode -# literals. -disable_unicode_literals_warning = False - -__version__ = "7.1.1" diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index e194c1a..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/_bashcomplete.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/_bashcomplete.cpython-37.pyc deleted file mode 100644 index 5d910eb..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/_bashcomplete.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/_compat.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/_compat.cpython-37.pyc deleted file mode 100644 index 05b39cb..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/_compat.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/_termui_impl.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/_termui_impl.cpython-37.pyc deleted file mode 100644 index f224a8a..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/_termui_impl.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/_textwrap.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/_textwrap.cpython-37.pyc deleted file mode 100644 index 63025a9..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/_textwrap.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/_unicodefun.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/_unicodefun.cpython-37.pyc deleted file mode 100644 index 144e139..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/_unicodefun.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/_winconsole.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/_winconsole.cpython-37.pyc deleted file mode 100644 index 0832de0..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/_winconsole.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/core.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/core.cpython-37.pyc deleted file mode 100644 index 3eda7c9..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/core.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/decorators.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/decorators.cpython-37.pyc deleted file mode 100644 index 29f17c7..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/decorators.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/exceptions.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/exceptions.cpython-37.pyc deleted file mode 100644 index 46647fa..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/exceptions.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/formatting.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/formatting.cpython-37.pyc deleted file mode 100644 index abc2ea9..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/formatting.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/globals.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/globals.cpython-37.pyc deleted file mode 100644 index accf277..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/globals.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/parser.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/parser.cpython-37.pyc deleted file mode 100644 index 0523dd5..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/parser.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/termui.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/termui.cpython-37.pyc deleted file mode 100644 index 58928c4..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/termui.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/testing.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/testing.cpython-37.pyc deleted file mode 100644 index 8b55fae..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/testing.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/types.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/types.cpython-37.pyc deleted file mode 100644 index df7df00..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/types.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/__pycache__/utils.cpython-37.pyc b/venv/lib/python3.7/site-packages/click/__pycache__/utils.cpython-37.pyc deleted file mode 100644 index caa1efe..0000000 Binary files a/venv/lib/python3.7/site-packages/click/__pycache__/utils.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/click/_bashcomplete.py b/venv/lib/python3.7/site-packages/click/_bashcomplete.py deleted file mode 100644 index 8bca244..0000000 --- a/venv/lib/python3.7/site-packages/click/_bashcomplete.py +++ /dev/null @@ -1,375 +0,0 @@ -import copy -import os -import re - -from .core import Argument -from .core import MultiCommand -from .core import Option -from .parser import split_arg_string -from .types import Choice -from .utils import echo - -try: - from collections import abc -except ImportError: - import collections as abc - -WORDBREAK = "=" - -# Note, only BASH version 4.4 and later have the nosort option. -COMPLETION_SCRIPT_BASH = """ -%(complete_func)s() { - local IFS=$'\n' - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - %(autocomplete_var)s=complete $1 ) ) - return 0 -} - -%(complete_func)setup() { - local COMPLETION_OPTIONS="" - local BASH_VERSION_ARR=(${BASH_VERSION//./ }) - # Only BASH version 4.4 and later have the nosort option. - if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \ -&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then - COMPLETION_OPTIONS="-o nosort" - fi - - complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s -} - -%(complete_func)setup -""" - -COMPLETION_SCRIPT_ZSH = """ -#compdef %(script_names)s - -%(complete_func)s() { - local -a completions - local -a completions_with_descriptions - local -a response - (( ! $+commands[%(script_names)s] )) && return 1 - - response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\ - COMP_CWORD=$((CURRENT-1)) \\ - %(autocomplete_var)s=\"complete_zsh\" \\ - %(script_names)s )}") - - for key descr in ${(kv)response}; do - if [[ "$descr" == "_" ]]; then - completions+=("$key") - else - completions_with_descriptions+=("$key":"$descr") - fi - done - - if [ -n "$completions_with_descriptions" ]; then - _describe -V unsorted completions_with_descriptions -U - fi - - if [ -n "$completions" ]; then - compadd -U -V unsorted -a completions - fi - compstate[insert]="automenu" -} - -compdef %(complete_func)s %(script_names)s -""" - -COMPLETION_SCRIPT_FISH = ( - "complete --no-files --command %(script_names)s --arguments" - ' "(env %(autocomplete_var)s=complete_fish' - " COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)" - ' %(script_names)s)"' -) - -_completion_scripts = { - "bash": COMPLETION_SCRIPT_BASH, - "zsh": COMPLETION_SCRIPT_ZSH, - "fish": COMPLETION_SCRIPT_FISH, -} - -_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]") - - -def get_completion_script(prog_name, complete_var, shell): - cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_")) - script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH) - return ( - script - % { - "complete_func": "_{}_completion".format(cf_name), - "script_names": prog_name, - "autocomplete_var": complete_var, - } - ).strip() + ";" - - -def resolve_ctx(cli, prog_name, args): - """Parse into a hierarchy of contexts. Contexts are connected - through the parent variable. - - :param cli: command definition - :param prog_name: the program that is running - :param args: full list of args - :return: the final context/command parsed - """ - ctx = cli.make_context(prog_name, args, resilient_parsing=True) - args = ctx.protected_args + ctx.args - while args: - if isinstance(ctx.command, MultiCommand): - if not ctx.command.chain: - cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) - if cmd is None: - return ctx - ctx = cmd.make_context( - cmd_name, args, parent=ctx, resilient_parsing=True - ) - args = ctx.protected_args + ctx.args - else: - # Walk chained subcommand contexts saving the last one. - while args: - cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) - if cmd is None: - return ctx - sub_ctx = cmd.make_context( - cmd_name, - args, - parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - resilient_parsing=True, - ) - args = sub_ctx.args - ctx = sub_ctx - args = sub_ctx.protected_args + sub_ctx.args - else: - break - return ctx - - -def start_of_option(param_str): - """ - :param param_str: param_str to check - :return: whether or not this is the start of an option declaration - (i.e. starts "-" or "--") - """ - return param_str and param_str[:1] == "-" - - -def is_incomplete_option(all_args, cmd_param): - """ - :param all_args: the full original list of args supplied - :param cmd_param: the current command paramter - :return: whether or not the last option declaration (i.e. starts - "-" or "--") is incomplete and corresponds to this cmd_param. In - other words whether this cmd_param option can still accept - values - """ - if not isinstance(cmd_param, Option): - return False - if cmd_param.is_flag: - return False - last_option = None - for index, arg_str in enumerate( - reversed([arg for arg in all_args if arg != WORDBREAK]) - ): - if index + 1 > cmd_param.nargs: - break - if start_of_option(arg_str): - last_option = arg_str - - return True if last_option and last_option in cmd_param.opts else False - - -def is_incomplete_argument(current_params, cmd_param): - """ - :param current_params: the current params and values for this - argument as already entered - :param cmd_param: the current command parameter - :return: whether or not the last argument is incomplete and - corresponds to this cmd_param. In other words whether or not the - this cmd_param argument can still accept values - """ - if not isinstance(cmd_param, Argument): - return False - current_param_values = current_params[cmd_param.name] - if current_param_values is None: - return True - if cmd_param.nargs == -1: - return True - if ( - isinstance(current_param_values, abc.Iterable) - and cmd_param.nargs > 1 - and len(current_param_values) < cmd_param.nargs - ): - return True - return False - - -def get_user_autocompletions(ctx, args, incomplete, cmd_param): - """ - :param ctx: context associated with the parsed command - :param args: full list of args - :param incomplete: the incomplete text to autocomplete - :param cmd_param: command definition - :return: all the possible user-specified completions for the param - """ - results = [] - if isinstance(cmd_param.type, Choice): - # Choices don't support descriptions. - results = [ - (c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete) - ] - elif cmd_param.autocompletion is not None: - dynamic_completions = cmd_param.autocompletion( - ctx=ctx, args=args, incomplete=incomplete - ) - results = [ - c if isinstance(c, tuple) else (c, None) for c in dynamic_completions - ] - return results - - -def get_visible_commands_starting_with(ctx, starts_with): - """ - :param ctx: context associated with the parsed command - :starts_with: string that visible commands must start with. - :return: all visible (not hidden) commands that start with starts_with. - """ - for c in ctx.command.list_commands(ctx): - if c.startswith(starts_with): - command = ctx.command.get_command(ctx, c) - if not command.hidden: - yield command - - -def add_subcommand_completions(ctx, incomplete, completions_out): - # Add subcommand completions. - if isinstance(ctx.command, MultiCommand): - completions_out.extend( - [ - (c.name, c.get_short_help_str()) - for c in get_visible_commands_starting_with(ctx, incomplete) - ] - ) - - # Walk up the context list and add any other completion - # possibilities from chained commands - while ctx.parent is not None: - ctx = ctx.parent - if isinstance(ctx.command, MultiCommand) and ctx.command.chain: - remaining_commands = [ - c - for c in get_visible_commands_starting_with(ctx, incomplete) - if c.name not in ctx.protected_args - ] - completions_out.extend( - [(c.name, c.get_short_help_str()) for c in remaining_commands] - ) - - -def get_choices(cli, prog_name, args, incomplete): - """ - :param cli: command definition - :param prog_name: the program that is running - :param args: full list of args - :param incomplete: the incomplete text to autocomplete - :return: all the possible completions for the incomplete - """ - all_args = copy.deepcopy(args) - - ctx = resolve_ctx(cli, prog_name, args) - if ctx is None: - return [] - - has_double_dash = "--" in all_args - - # In newer versions of bash long opts with '='s are partitioned, but - # it's easier to parse without the '=' - if start_of_option(incomplete) and WORDBREAK in incomplete: - partition_incomplete = incomplete.partition(WORDBREAK) - all_args.append(partition_incomplete[0]) - incomplete = partition_incomplete[2] - elif incomplete == WORDBREAK: - incomplete = "" - - completions = [] - if not has_double_dash and start_of_option(incomplete): - # completions for partial options - for param in ctx.command.params: - if isinstance(param, Option) and not param.hidden: - param_opts = [ - param_opt - for param_opt in param.opts + param.secondary_opts - if param_opt not in all_args or param.multiple - ] - completions.extend( - [(o, param.help) for o in param_opts if o.startswith(incomplete)] - ) - return completions - # completion for option values from user supplied values - for param in ctx.command.params: - if is_incomplete_option(all_args, param): - return get_user_autocompletions(ctx, all_args, incomplete, param) - # completion for argument values from user supplied values - for param in ctx.command.params: - if is_incomplete_argument(ctx.params, param): - return get_user_autocompletions(ctx, all_args, incomplete, param) - - add_subcommand_completions(ctx, incomplete, completions) - # Sort before returning so that proper ordering can be enforced in custom types. - return sorted(completions) - - -def do_complete(cli, prog_name, include_descriptions): - cwords = split_arg_string(os.environ["COMP_WORDS"]) - cword = int(os.environ["COMP_CWORD"]) - args = cwords[1:cword] - try: - incomplete = cwords[cword] - except IndexError: - incomplete = "" - - for item in get_choices(cli, prog_name, args, incomplete): - echo(item[0]) - if include_descriptions: - # ZSH has trouble dealing with empty array parameters when - # returned from commands, use '_' to indicate no description - # is present. - echo(item[1] if item[1] else "_") - - return True - - -def do_complete_fish(cli, prog_name): - cwords = split_arg_string(os.environ["COMP_WORDS"]) - incomplete = os.environ["COMP_CWORD"] - args = cwords[1:] - - for item in get_choices(cli, prog_name, args, incomplete): - if item[1]: - echo("{arg}\t{desc}".format(arg=item[0], desc=item[1])) - else: - echo(item[0]) - - return True - - -def bashcomplete(cli, prog_name, complete_var, complete_instr): - if "_" in complete_instr: - command, shell = complete_instr.split("_", 1) - else: - command = complete_instr - shell = "bash" - - if command == "source": - echo(get_completion_script(prog_name, complete_var, shell)) - return True - elif command == "complete": - if shell == "fish": - return do_complete_fish(cli, prog_name) - elif shell in {"bash", "zsh"}: - return do_complete(cli, prog_name, shell == "zsh") - - return False diff --git a/venv/lib/python3.7/site-packages/click/_compat.py b/venv/lib/python3.7/site-packages/click/_compat.py deleted file mode 100644 index ed57a18..0000000 --- a/venv/lib/python3.7/site-packages/click/_compat.py +++ /dev/null @@ -1,790 +0,0 @@ -# flake8: noqa -import codecs -import io -import os -import re -import sys -from weakref import WeakKeyDictionary - -PY2 = sys.version_info[0] == 2 -CYGWIN = sys.platform.startswith("cygwin") -MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version) -# Determine local App Engine environment, per Google's own suggestion -APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get( - "SERVER_SOFTWARE", "" -) -WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2 -DEFAULT_COLUMNS = 80 - - -_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") - - -def get_filesystem_encoding(): - return sys.getfilesystemencoding() or sys.getdefaultencoding() - - -def _make_text_stream( - stream, encoding, errors, force_readable=False, force_writable=False -): - if encoding is None: - encoding = get_best_encoding(stream) - if errors is None: - errors = "replace" - return _NonClosingTextIOWrapper( - stream, - encoding, - errors, - line_buffering=True, - force_readable=force_readable, - force_writable=force_writable, - ) - - -def is_ascii_encoding(encoding): - """Checks if a given encoding is ascii.""" - try: - return codecs.lookup(encoding).name == "ascii" - except LookupError: - return False - - -def get_best_encoding(stream): - """Returns the default stream encoding if not found.""" - rv = getattr(stream, "encoding", None) or sys.getdefaultencoding() - if is_ascii_encoding(rv): - return "utf-8" - return rv - - -class _NonClosingTextIOWrapper(io.TextIOWrapper): - def __init__( - self, - stream, - encoding, - errors, - force_readable=False, - force_writable=False, - **extra - ): - self._stream = stream = _FixupStream(stream, force_readable, force_writable) - io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) - - # The io module is a place where the Python 3 text behavior - # was forced upon Python 2, so we need to unbreak - # it to look like Python 2. - if PY2: - - def write(self, x): - if isinstance(x, str) or is_bytes(x): - try: - self.flush() - except Exception: - pass - return self.buffer.write(str(x)) - return io.TextIOWrapper.write(self, x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __del__(self): - try: - self.detach() - except Exception: - pass - - def isatty(self): - # https://bitbucket.org/pypy/pypy/issue/1803 - return self._stream.isatty() - - -class _FixupStream(object): - """The new io interface needs more from streams than streams - traditionally implement. As such, this fix-up code is necessary in - some circumstances. - - The forcing of readable and writable flags are there because some tools - put badly patched objects on sys (one such offender are certain version - of jupyter notebook). - """ - - def __init__(self, stream, force_readable=False, force_writable=False): - self._stream = stream - self._force_readable = force_readable - self._force_writable = force_writable - - def __getattr__(self, name): - return getattr(self._stream, name) - - def read1(self, size): - f = getattr(self._stream, "read1", None) - if f is not None: - return f(size) - # We only dispatch to readline instead of read in Python 2 as we - # do not want cause problems with the different implementation - # of line buffering. - if PY2: - return self._stream.readline(size) - return self._stream.read(size) - - def readable(self): - if self._force_readable: - return True - x = getattr(self._stream, "readable", None) - if x is not None: - return x() - try: - self._stream.read(0) - except Exception: - return False - return True - - def writable(self): - if self._force_writable: - return True - x = getattr(self._stream, "writable", None) - if x is not None: - return x() - try: - self._stream.write("") - except Exception: - try: - self._stream.write(b"") - except Exception: - return False - return True - - def seekable(self): - x = getattr(self._stream, "seekable", None) - if x is not None: - return x() - try: - self._stream.seek(self._stream.tell()) - except Exception: - return False - return True - - -if PY2: - text_type = unicode - raw_input = raw_input - string_types = (str, unicode) - int_types = (int, long) - iteritems = lambda x: x.iteritems() - range_type = xrange - - from pipes import quote as shlex_quote - - def is_bytes(x): - return isinstance(x, (buffer, bytearray)) - - _identifier_re = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$") - - # For Windows, we need to force stdout/stdin/stderr to binary if it's - # fetched for that. This obviously is not the most correct way to do - # it as it changes global state. Unfortunately, there does not seem to - # be a clear better way to do it as just reopening the file in binary - # mode does not change anything. - # - # An option would be to do what Python 3 does and to open the file as - # binary only, patch it back to the system, and then use a wrapper - # stream that converts newlines. It's not quite clear what's the - # correct option here. - # - # This code also lives in _winconsole for the fallback to the console - # emulation stream. - # - # There are also Windows environments where the `msvcrt` module is not - # available (which is why we use try-catch instead of the WIN variable - # here), such as the Google App Engine development server on Windows. In - # those cases there is just nothing we can do. - def set_binary_mode(f): - return f - - try: - import msvcrt - except ImportError: - pass - else: - - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - msvcrt.setmode(fileno, os.O_BINARY) - return f - - try: - import fcntl - except ImportError: - pass - else: - - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - flags = fcntl.fcntl(fileno, fcntl.F_GETFL) - fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) - return f - - def isidentifier(x): - return _identifier_re.search(x) is not None - - def get_binary_stdin(): - return set_binary_mode(sys.stdin) - - def get_binary_stdout(): - _wrap_std_stream("stdout") - return set_binary_mode(sys.stdout) - - def get_binary_stderr(): - _wrap_std_stream("stderr") - return set_binary_mode(sys.stderr) - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdin, encoding, errors, force_readable=True) - - def get_text_stdout(encoding=None, errors=None): - _wrap_std_stream("stdout") - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdout, encoding, errors, force_writable=True) - - def get_text_stderr(encoding=None, errors=None): - _wrap_std_stream("stderr") - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stderr, encoding, errors, force_writable=True) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), "replace") - return value - - -else: - import io - - text_type = str - raw_input = input - string_types = (str,) - int_types = (int,) - range_type = range - isidentifier = lambda x: x.isidentifier() - iteritems = lambda x: iter(x.items()) - - from shlex import quote as shlex_quote - - def is_bytes(x): - return isinstance(x, (bytes, memoryview, bytearray)) - - def _is_binary_reader(stream, default=False): - try: - return isinstance(stream.read(0), bytes) - except Exception: - return default - # This happens in some cases where the stream was already - # closed. In this case, we assume the default. - - def _is_binary_writer(stream, default=False): - try: - stream.write(b"") - except Exception: - try: - stream.write("") - return False - except Exception: - pass - return default - return True - - def _find_binary_reader(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_reader(stream, False): - return stream - - buf = getattr(stream, "buffer", None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_reader(buf, True): - return buf - - def _find_binary_writer(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detatching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_writer(stream, False): - return stream - - buf = getattr(stream, "buffer", None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_writer(buf, True): - return buf - - def _stream_is_misconfigured(stream): - """A stream is misconfigured if its encoding is ASCII.""" - # If the stream does not have an encoding set, we assume it's set - # to ASCII. This appears to happen in certain unittest - # environments. It's not quite clear what the correct behavior is - # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii") - - def _is_compat_stream_attr(stream, attr, value): - """A stream attribute is compatible if it is equal to the - desired value or the desired value is unset and the attribute - has a value. - """ - stream_value = getattr(stream, attr, None) - return stream_value == value or (value is None and stream_value is not None) - - def _is_compatible_text_stream(stream, encoding, errors): - """Check if a stream's encoding and errors attributes are - compatible with the desired values. - """ - return _is_compat_stream_attr( - stream, "encoding", encoding - ) and _is_compat_stream_attr(stream, "errors", errors) - - def _force_correct_text_stream( - text_stream, - encoding, - errors, - is_binary, - find_binary, - force_readable=False, - force_writable=False, - ): - if is_binary(text_stream, False): - binary_reader = text_stream - else: - # If the stream looks compatible, and won't default to a - # misconfigured ascii encoding, return it as-is. - if _is_compatible_text_stream(text_stream, encoding, errors) and not ( - encoding is None and _stream_is_misconfigured(text_stream) - ): - return text_stream - - # Otherwise, get the underlying binary reader. - binary_reader = find_binary(text_stream) - - # If that's not possible, silently use the original reader - # and get mojibake instead of exceptions. - if binary_reader is None: - return text_stream - - # Default errors to replace instead of strict in order to get - # something that works. - if errors is None: - errors = "replace" - - # Wrap the binary stream in a text stream with the correct - # encoding parameters. - return _make_text_stream( - binary_reader, - encoding, - errors, - force_readable=force_readable, - force_writable=force_writable, - ) - - def _force_correct_text_reader(text_reader, encoding, errors, force_readable=False): - return _force_correct_text_stream( - text_reader, - encoding, - errors, - _is_binary_reader, - _find_binary_reader, - force_readable=force_readable, - ) - - def _force_correct_text_writer(text_writer, encoding, errors, force_writable=False): - return _force_correct_text_stream( - text_writer, - encoding, - errors, - _is_binary_writer, - _find_binary_writer, - force_writable=force_writable, - ) - - def get_binary_stdin(): - reader = _find_binary_reader(sys.stdin) - if reader is None: - raise RuntimeError("Was not able to determine binary stream for sys.stdin.") - return reader - - def get_binary_stdout(): - writer = _find_binary_writer(sys.stdout) - if writer is None: - raise RuntimeError( - "Was not able to determine binary stream for sys.stdout." - ) - return writer - - def get_binary_stderr(): - writer = _find_binary_writer(sys.stderr) - if writer is None: - raise RuntimeError( - "Was not able to determine binary stream for sys.stderr." - ) - return writer - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_reader( - sys.stdin, encoding, errors, force_readable=True - ) - - def get_text_stdout(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer( - sys.stdout, encoding, errors, force_writable=True - ) - - def get_text_stderr(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer( - sys.stderr, encoding, errors, force_writable=True - ) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), "replace") - else: - value = value.encode("utf-8", "surrogateescape").decode("utf-8", "replace") - return value - - -def get_streerror(e, default=None): - if hasattr(e, "strerror"): - msg = e.strerror - else: - if default is not None: - msg = default - else: - msg = str(e) - if isinstance(msg, bytes): - msg = msg.decode("utf-8", "replace") - return msg - - -def _wrap_io_open(file, mode, encoding, errors): - """On Python 2, :func:`io.open` returns a text file wrapper that - requires passing ``unicode`` to ``write``. Need to open the file in - binary mode then wrap it in a subclass that can write ``str`` and - ``unicode``. - - Also handles not passing ``encoding`` and ``errors`` in binary mode. - """ - binary = "b" in mode - - if binary: - kwargs = {} - else: - kwargs = {"encoding": encoding, "errors": errors} - - if not PY2 or binary: - return io.open(file, mode, **kwargs) - - f = io.open(file, "{}b".format(mode.replace("t", ""))) - return _make_text_stream(f, **kwargs) - - -def open_stream(filename, mode="r", encoding=None, errors="strict", atomic=False): - binary = "b" in mode - - # Standard streams first. These are simple because they don't need - # special handling for the atomic flag. It's entirely ignored. - if filename == "-": - if any(m in mode for m in ["w", "a", "x"]): - if binary: - return get_binary_stdout(), False - return get_text_stdout(encoding=encoding, errors=errors), False - if binary: - return get_binary_stdin(), False - return get_text_stdin(encoding=encoding, errors=errors), False - - # Non-atomic writes directly go out through the regular open functions. - if not atomic: - return _wrap_io_open(filename, mode, encoding, errors), True - - # Some usability stuff for atomic writes - if "a" in mode: - raise ValueError( - "Appending to an existing file is not supported, because that" - " would involve an expensive `copy`-operation to a temporary" - " file. Open the file in normal `w`-mode and copy explicitly" - " if that's what you're after." - ) - if "x" in mode: - raise ValueError("Use the `overwrite`-parameter instead.") - if "w" not in mode: - raise ValueError("Atomic writes only make sense with `w`-mode.") - - # Atomic writes are more complicated. They work by opening a file - # as a proxy in the same folder and then using the fdopen - # functionality to wrap it in a Python file. Then we wrap it in an - # atomic file that moves the file over on close. - import errno - import random - - try: - perm = os.stat(filename).st_mode - except OSError: - perm = None - - flags = os.O_RDWR | os.O_CREAT | os.O_EXCL - - if binary: - flags |= getattr(os, "O_BINARY", 0) - - while True: - tmp_filename = os.path.join( - os.path.dirname(filename), - ".__atomic-write{:08x}".format(random.randrange(1 << 32)), - ) - try: - fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm) - break - except OSError as e: - if e.errno == errno.EEXIST or ( - os.name == "nt" - and e.errno == errno.EACCES - and os.path.isdir(e.filename) - and os.access(e.filename, os.W_OK) - ): - continue - raise - - if perm is not None: - os.chmod(tmp_filename, perm) # in case perm includes bits in umask - - f = _wrap_io_open(fd, mode, encoding, errors) - return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True - - -# Used in a destructor call, needs extra protection from interpreter cleanup. -if hasattr(os, "replace"): - _replace = os.replace - _can_replace = True -else: - _replace = os.rename - _can_replace = not WIN - - -class _AtomicFile(object): - def __init__(self, f, tmp_filename, real_filename): - self._f = f - self._tmp_filename = tmp_filename - self._real_filename = real_filename - self.closed = False - - @property - def name(self): - return self._real_filename - - def close(self, delete=False): - if self.closed: - return - self._f.close() - if not _can_replace: - try: - os.remove(self._real_filename) - except OSError: - pass - _replace(self._tmp_filename, self._real_filename) - self.closed = True - - def __getattr__(self, name): - return getattr(self._f, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close(delete=exc_type is not None) - - def __repr__(self): - return repr(self._f) - - -auto_wrap_for_ansi = None -colorama = None -get_winterm_size = None - - -def strip_ansi(value): - return _ansi_re.sub("", value) - - -def _is_jupyter_kernel_output(stream): - if WIN: - # TODO: Couldn't test on Windows, should't try to support until - # someone tests the details wrt colorama. - return - - while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)): - stream = stream._stream - - return stream.__class__.__module__.startswith("ipykernel.") - - -def should_strip_ansi(stream=None, color=None): - if color is None: - if stream is None: - stream = sys.stdin - return not isatty(stream) and not _is_jupyter_kernel_output(stream) - return not color - - -# If we're on Windows, we provide transparent integration through -# colorama. This will make ANSI colors through the echo function -# work automatically. -if WIN: - # Windows has a smaller terminal - DEFAULT_COLUMNS = 79 - - from ._winconsole import _get_windows_console_stream, _wrap_std_stream - - def _get_argv_encoding(): - import locale - - return locale.getpreferredencoding() - - if PY2: - - def raw_input(prompt=""): - sys.stderr.flush() - if prompt: - stdout = _default_text_stdout() - stdout.write(prompt) - stdin = _default_text_stdin() - return stdin.readline().rstrip("\r\n") - - try: - import colorama - except ImportError: - pass - else: - _ansi_stream_wrappers = WeakKeyDictionary() - - def auto_wrap_for_ansi(stream, color=None): - """This function wraps a stream so that calls through colorama - are issued to the win32 console API to recolor on demand. It - also ensures to reset the colors if a write call is interrupted - to not destroy the console afterwards. - """ - try: - cached = _ansi_stream_wrappers.get(stream) - except Exception: - cached = None - if cached is not None: - return cached - strip = should_strip_ansi(stream, color) - ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) - rv = ansi_wrapper.stream - _write = rv.write - - def _safe_write(s): - try: - return _write(s) - except: - ansi_wrapper.reset_all() - raise - - rv.write = _safe_write - try: - _ansi_stream_wrappers[stream] = rv - except Exception: - pass - return rv - - def get_winterm_size(): - win = colorama.win32.GetConsoleScreenBufferInfo( - colorama.win32.STDOUT - ).srWindow - return win.Right - win.Left, win.Bottom - win.Top - - -else: - - def _get_argv_encoding(): - return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding() - - _get_windows_console_stream = lambda *x: None - _wrap_std_stream = lambda *x: None - - -def term_len(x): - return len(strip_ansi(x)) - - -def isatty(stream): - try: - return stream.isatty() - except Exception: - return False - - -def _make_cached_stream_func(src_func, wrapper_func): - cache = WeakKeyDictionary() - - def func(): - stream = src_func() - try: - rv = cache.get(stream) - except Exception: - rv = None - if rv is not None: - return rv - rv = wrapper_func() - try: - stream = src_func() # In case wrapper_func() modified the stream - cache[stream] = rv - except Exception: - pass - return rv - - return func - - -_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr) - - -binary_streams = { - "stdin": get_binary_stdin, - "stdout": get_binary_stdout, - "stderr": get_binary_stderr, -} - -text_streams = { - "stdin": get_text_stdin, - "stdout": get_text_stdout, - "stderr": get_text_stderr, -} diff --git a/venv/lib/python3.7/site-packages/click/_termui_impl.py b/venv/lib/python3.7/site-packages/click/_termui_impl.py deleted file mode 100644 index c6e86cc..0000000 --- a/venv/lib/python3.7/site-packages/click/_termui_impl.py +++ /dev/null @@ -1,661 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module contains implementations for the termui module. To keep the -import time of Click down, some infrequently used functionality is -placed in this module and only imported as needed. -""" -import contextlib -import math -import os -import sys -import time - -from ._compat import _default_text_stdout -from ._compat import CYGWIN -from ._compat import get_best_encoding -from ._compat import int_types -from ._compat import isatty -from ._compat import open_stream -from ._compat import range_type -from ._compat import shlex_quote -from ._compat import strip_ansi -from ._compat import term_len -from ._compat import WIN -from .exceptions import ClickException -from .utils import echo - -if os.name == "nt": - BEFORE_BAR = "\r" - AFTER_BAR = "\n" -else: - BEFORE_BAR = "\r\033[?25l" - AFTER_BAR = "\033[?25h\n" - - -def _length_hint(obj): - """Returns the length hint of an object.""" - try: - return len(obj) - except (AttributeError, TypeError): - try: - get_hint = type(obj).__length_hint__ - except AttributeError: - return None - try: - hint = get_hint(obj) - except TypeError: - return None - if hint is NotImplemented or not isinstance(hint, int_types) or hint < 0: - return None - return hint - - -class ProgressBar(object): - def __init__( - self, - iterable, - length=None, - fill_char="#", - empty_char=" ", - bar_template="%(bar)s", - info_sep=" ", - show_eta=True, - show_percent=None, - show_pos=False, - item_show_func=None, - label=None, - file=None, - color=None, - width=30, - ): - self.fill_char = fill_char - self.empty_char = empty_char - self.bar_template = bar_template - self.info_sep = info_sep - self.show_eta = show_eta - self.show_percent = show_percent - self.show_pos = show_pos - self.item_show_func = item_show_func - self.label = label or "" - if file is None: - file = _default_text_stdout() - self.file = file - self.color = color - self.width = width - self.autowidth = width == 0 - - if length is None: - length = _length_hint(iterable) - if iterable is None: - if length is None: - raise TypeError("iterable or length is required") - iterable = range_type(length) - self.iter = iter(iterable) - self.length = length - self.length_known = length is not None - self.pos = 0 - self.avg = [] - self.start = self.last_eta = time.time() - self.eta_known = False - self.finished = False - self.max_width = None - self.entered = False - self.current_item = None - self.is_hidden = not isatty(self.file) - self._last_line = None - self.short_limit = 0.5 - - def __enter__(self): - self.entered = True - self.render_progress() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.render_finish() - - def __iter__(self): - if not self.entered: - raise RuntimeError("You need to use progress bars in a with block.") - self.render_progress() - return self.generator() - - def __next__(self): - # Iteration is defined in terms of a generator function, - # returned by iter(self); use that to define next(). This works - # because `self.iter` is an iterable consumed by that generator, - # so it is re-entry safe. Calling `next(self.generator())` - # twice works and does "what you want". - return next(iter(self)) - - # Python 2 compat - next = __next__ - - def is_fast(self): - return time.time() - self.start <= self.short_limit - - def render_finish(self): - if self.is_hidden or self.is_fast(): - return - self.file.write(AFTER_BAR) - self.file.flush() - - @property - def pct(self): - if self.finished: - return 1.0 - return min(self.pos / (float(self.length) or 1), 1.0) - - @property - def time_per_iteration(self): - if not self.avg: - return 0.0 - return sum(self.avg) / float(len(self.avg)) - - @property - def eta(self): - if self.length_known and not self.finished: - return self.time_per_iteration * (self.length - self.pos) - return 0.0 - - def format_eta(self): - if self.eta_known: - t = int(self.eta) - seconds = t % 60 - t //= 60 - minutes = t % 60 - t //= 60 - hours = t % 24 - t //= 24 - if t > 0: - return "{}d {:02}:{:02}:{:02}".format(t, hours, minutes, seconds) - else: - return "{:02}:{:02}:{:02}".format(hours, minutes, seconds) - return "" - - def format_pos(self): - pos = str(self.pos) - if self.length_known: - pos += "/{}".format(self.length) - return pos - - def format_pct(self): - return "{: 4}%".format(int(self.pct * 100))[1:] - - def format_bar(self): - if self.length_known: - bar_length = int(self.pct * self.width) - bar = self.fill_char * bar_length - bar += self.empty_char * (self.width - bar_length) - elif self.finished: - bar = self.fill_char * self.width - else: - bar = list(self.empty_char * (self.width or 1)) - if self.time_per_iteration != 0: - bar[ - int( - (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5) - * self.width - ) - ] = self.fill_char - bar = "".join(bar) - return bar - - def format_progress_line(self): - show_percent = self.show_percent - - info_bits = [] - if self.length_known and show_percent is None: - show_percent = not self.show_pos - - if self.show_pos: - info_bits.append(self.format_pos()) - if show_percent: - info_bits.append(self.format_pct()) - if self.show_eta and self.eta_known and not self.finished: - info_bits.append(self.format_eta()) - if self.item_show_func is not None: - item_info = self.item_show_func(self.current_item) - if item_info is not None: - info_bits.append(item_info) - - return ( - self.bar_template - % { - "label": self.label, - "bar": self.format_bar(), - "info": self.info_sep.join(info_bits), - } - ).rstrip() - - def render_progress(self): - from .termui import get_terminal_size - - if self.is_hidden: - return - - buf = [] - # Update width in case the terminal has been resized - if self.autowidth: - old_width = self.width - self.width = 0 - clutter_length = term_len(self.format_progress_line()) - new_width = max(0, get_terminal_size()[0] - clutter_length) - if new_width < old_width: - buf.append(BEFORE_BAR) - buf.append(" " * self.max_width) - self.max_width = new_width - self.width = new_width - - clear_width = self.width - if self.max_width is not None: - clear_width = self.max_width - - buf.append(BEFORE_BAR) - line = self.format_progress_line() - line_len = term_len(line) - if self.max_width is None or self.max_width < line_len: - self.max_width = line_len - - buf.append(line) - buf.append(" " * (clear_width - line_len)) - line = "".join(buf) - # Render the line only if it changed. - - if line != self._last_line and not self.is_fast(): - self._last_line = line - echo(line, file=self.file, color=self.color, nl=False) - self.file.flush() - - def make_step(self, n_steps): - self.pos += n_steps - if self.length_known and self.pos >= self.length: - self.finished = True - - if (time.time() - self.last_eta) < 1.0: - return - - self.last_eta = time.time() - - # self.avg is a rolling list of length <= 7 of steps where steps are - # defined as time elapsed divided by the total progress through - # self.length. - if self.pos: - step = (time.time() - self.start) / self.pos - else: - step = time.time() - self.start - - self.avg = self.avg[-6:] + [step] - - self.eta_known = self.length_known - - def update(self, n_steps): - self.make_step(n_steps) - self.render_progress() - - def finish(self): - self.eta_known = 0 - self.current_item = None - self.finished = True - - def generator(self): - """Return a generator which yields the items added to the bar - during construction, and updates the progress bar *after* the - yielded block returns. - """ - # WARNING: the iterator interface for `ProgressBar` relies on - # this and only works because this is a simple generator which - # doesn't create or manage additional state. If this function - # changes, the impact should be evaluated both against - # `iter(bar)` and `next(bar)`. `next()` in particular may call - # `self.generator()` repeatedly, and this must remain safe in - # order for that interface to work. - if not self.entered: - raise RuntimeError("You need to use progress bars in a with block.") - - if self.is_hidden: - for rv in self.iter: - yield rv - else: - for rv in self.iter: - self.current_item = rv - yield rv - self.update(1) - self.finish() - self.render_progress() - - -def pager(generator, color=None): - """Decide what method to use for paging through text.""" - stdout = _default_text_stdout() - if not isatty(sys.stdin) or not isatty(stdout): - return _nullpager(stdout, generator, color) - pager_cmd = (os.environ.get("PAGER", None) or "").strip() - if pager_cmd: - if WIN: - return _tempfilepager(generator, pager_cmd, color) - return _pipepager(generator, pager_cmd, color) - if os.environ.get("TERM") in ("dumb", "emacs"): - return _nullpager(stdout, generator, color) - if WIN or sys.platform.startswith("os2"): - return _tempfilepager(generator, "more <", color) - if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0: - return _pipepager(generator, "less", color) - - import tempfile - - fd, filename = tempfile.mkstemp() - os.close(fd) - try: - if ( - hasattr(os, "system") - and os.system("more {}".format(shlex_quote(filename))) == 0 - ): - return _pipepager(generator, "more", color) - return _nullpager(stdout, generator, color) - finally: - os.unlink(filename) - - -def _pipepager(generator, cmd, color): - """Page through text by feeding it to another program. Invoking a - pager through this might support colors. - """ - import subprocess - - env = dict(os.environ) - - # If we're piping to less we might support colors under the - # condition that - cmd_detail = cmd.rsplit("/", 1)[-1].split() - if color is None and cmd_detail[0] == "less": - less_flags = "{}{}".format(os.environ.get("LESS", ""), " ".join(cmd_detail[1:])) - if not less_flags: - env["LESS"] = "-R" - color = True - elif "r" in less_flags or "R" in less_flags: - color = True - - c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env) - encoding = get_best_encoding(c.stdin) - try: - for text in generator: - if not color: - text = strip_ansi(text) - - c.stdin.write(text.encode(encoding, "replace")) - except (IOError, KeyboardInterrupt): - pass - else: - c.stdin.close() - - # Less doesn't respect ^C, but catches it for its own UI purposes (aborting - # search or other commands inside less). - # - # That means when the user hits ^C, the parent process (click) terminates, - # but less is still alive, paging the output and messing up the terminal. - # - # If the user wants to make the pager exit on ^C, they should set - # `LESS='-K'`. It's not our decision to make. - while True: - try: - c.wait() - except KeyboardInterrupt: - pass - else: - break - - -def _tempfilepager(generator, cmd, color): - """Page through text by invoking a program on a temporary file.""" - import tempfile - - filename = tempfile.mktemp() - # TODO: This never terminates if the passed generator never terminates. - text = "".join(generator) - if not color: - text = strip_ansi(text) - encoding = get_best_encoding(sys.stdout) - with open_stream(filename, "wb")[0] as f: - f.write(text.encode(encoding)) - try: - os.system("{} {}".format(shlex_quote(cmd), shlex_quote(filename))) - finally: - os.unlink(filename) - - -def _nullpager(stream, generator, color): - """Simply print unformatted text. This is the ultimate fallback.""" - for text in generator: - if not color: - text = strip_ansi(text) - stream.write(text) - - -class Editor(object): - def __init__(self, editor=None, env=None, require_save=True, extension=".txt"): - self.editor = editor - self.env = env - self.require_save = require_save - self.extension = extension - - def get_editor(self): - if self.editor is not None: - return self.editor - for key in "VISUAL", "EDITOR": - rv = os.environ.get(key) - if rv: - return rv - if WIN: - return "notepad" - for editor in "sensible-editor", "vim", "nano": - if os.system("which {} >/dev/null 2>&1".format(editor)) == 0: - return editor - return "vi" - - def edit_file(self, filename): - import subprocess - - editor = self.get_editor() - if self.env: - environ = os.environ.copy() - environ.update(self.env) - else: - environ = None - try: - c = subprocess.Popen( - "{} {}".format(shlex_quote(editor), shlex_quote(filename)), - env=environ, - shell=True, - ) - exit_code = c.wait() - if exit_code != 0: - raise ClickException("{}: Editing failed!".format(editor)) - except OSError as e: - raise ClickException("{}: Editing failed: {}".format(editor, e)) - - def edit(self, text): - import tempfile - - text = text or "" - if text and not text.endswith("\n"): - text += "\n" - - fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension) - try: - if WIN: - encoding = "utf-8-sig" - text = text.replace("\n", "\r\n") - else: - encoding = "utf-8" - text = text.encode(encoding) - - f = os.fdopen(fd, "wb") - f.write(text) - f.close() - timestamp = os.path.getmtime(name) - - self.edit_file(name) - - if self.require_save and os.path.getmtime(name) == timestamp: - return None - - f = open(name, "rb") - try: - rv = f.read() - finally: - f.close() - return rv.decode("utf-8-sig").replace("\r\n", "\n") - finally: - os.unlink(name) - - -def open_url(url, wait=False, locate=False): - import subprocess - - def _unquote_file(url): - try: - import urllib - except ImportError: - import urllib - if url.startswith("file://"): - url = urllib.unquote(url[7:]) - return url - - if sys.platform == "darwin": - args = ["open"] - if wait: - args.append("-W") - if locate: - args.append("-R") - args.append(_unquote_file(url)) - null = open("/dev/null", "w") - try: - return subprocess.Popen(args, stderr=null).wait() - finally: - null.close() - elif WIN: - if locate: - url = _unquote_file(url) - args = "explorer /select,{}".format(shlex_quote(url)) - else: - args = 'start {} "" {}'.format("/WAIT" if wait else "", shlex_quote(url)) - return os.system(args) - elif CYGWIN: - if locate: - url = _unquote_file(url) - args = "cygstart {}".format(shlex_quote(os.path.dirname(url))) - else: - args = "cygstart {} {}".format("-w" if wait else "", shlex_quote(url)) - return os.system(args) - - try: - if locate: - url = os.path.dirname(_unquote_file(url)) or "." - else: - url = _unquote_file(url) - c = subprocess.Popen(["xdg-open", url]) - if wait: - return c.wait() - return 0 - except OSError: - if url.startswith(("http://", "https://")) and not locate and not wait: - import webbrowser - - webbrowser.open(url) - return 0 - return 1 - - -def _translate_ch_to_exc(ch): - if ch == u"\x03": - raise KeyboardInterrupt() - if ch == u"\x04" and not WIN: # Unix-like, Ctrl+D - raise EOFError() - if ch == u"\x1a" and WIN: # Windows, Ctrl+Z - raise EOFError() - - -if WIN: - import msvcrt - - @contextlib.contextmanager - def raw_terminal(): - yield - - def getchar(echo): - # The function `getch` will return a bytes object corresponding to - # the pressed character. Since Windows 10 build 1803, it will also - # return \x00 when called a second time after pressing a regular key. - # - # `getwch` does not share this probably-bugged behavior. Moreover, it - # returns a Unicode object by default, which is what we want. - # - # Either of these functions will return \x00 or \xe0 to indicate - # a special key, and you need to call the same function again to get - # the "rest" of the code. The fun part is that \u00e0 is - # "latin small letter a with grave", so if you type that on a French - # keyboard, you _also_ get a \xe0. - # E.g., consider the Up arrow. This returns \xe0 and then \x48. The - # resulting Unicode string reads as "a with grave" + "capital H". - # This is indistinguishable from when the user actually types - # "a with grave" and then "capital H". - # - # When \xe0 is returned, we assume it's part of a special-key sequence - # and call `getwch` again, but that means that when the user types - # the \u00e0 character, `getchar` doesn't return until a second - # character is typed. - # The alternative is returning immediately, but that would mess up - # cross-platform handling of arrow keys and others that start with - # \xe0. Another option is using `getch`, but then we can't reliably - # read non-ASCII characters, because return values of `getch` are - # limited to the current 8-bit codepage. - # - # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` - # is doing the right thing in more situations than with `getch`. - if echo: - func = msvcrt.getwche - else: - func = msvcrt.getwch - - rv = func() - if rv in (u"\x00", u"\xe0"): - # \x00 and \xe0 are control characters that indicate special key, - # see above. - rv += func() - _translate_ch_to_exc(rv) - return rv - - -else: - import tty - import termios - - @contextlib.contextmanager - def raw_terminal(): - if not isatty(sys.stdin): - f = open("/dev/tty") - fd = f.fileno() - else: - fd = sys.stdin.fileno() - f = None - try: - old_settings = termios.tcgetattr(fd) - try: - tty.setraw(fd) - yield fd - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - sys.stdout.flush() - if f is not None: - f.close() - except termios.error: - pass - - def getchar(echo): - with raw_terminal() as fd: - ch = os.read(fd, 32) - ch = ch.decode(get_best_encoding(sys.stdin), "replace") - if echo and isatty(sys.stdout): - sys.stdout.write(ch) - _translate_ch_to_exc(ch) - return ch diff --git a/venv/lib/python3.7/site-packages/click/_textwrap.py b/venv/lib/python3.7/site-packages/click/_textwrap.py deleted file mode 100644 index 6959087..0000000 --- a/venv/lib/python3.7/site-packages/click/_textwrap.py +++ /dev/null @@ -1,37 +0,0 @@ -import textwrap -from contextlib import contextmanager - - -class TextWrapper(textwrap.TextWrapper): - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - space_left = max(width - cur_len, 1) - - if self.break_long_words: - last = reversed_chunks[-1] - cut = last[:space_left] - res = last[space_left:] - cur_line.append(cut) - reversed_chunks[-1] = res - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - @contextmanager - def extra_indent(self, indent): - old_initial_indent = self.initial_indent - old_subsequent_indent = self.subsequent_indent - self.initial_indent += indent - self.subsequent_indent += indent - try: - yield - finally: - self.initial_indent = old_initial_indent - self.subsequent_indent = old_subsequent_indent - - def indent_only(self, text): - rv = [] - for idx, line in enumerate(text.splitlines()): - indent = self.initial_indent - if idx > 0: - indent = self.subsequent_indent - rv.append(indent + line) - return "\n".join(rv) diff --git a/venv/lib/python3.7/site-packages/click/_unicodefun.py b/venv/lib/python3.7/site-packages/click/_unicodefun.py deleted file mode 100644 index 781c365..0000000 --- a/venv/lib/python3.7/site-packages/click/_unicodefun.py +++ /dev/null @@ -1,131 +0,0 @@ -import codecs -import os -import sys - -from ._compat import PY2 - - -def _find_unicode_literals_frame(): - import __future__ - - if not hasattr(sys, "_getframe"): # not all Python implementations have it - return 0 - frm = sys._getframe(1) - idx = 1 - while frm is not None: - if frm.f_globals.get("__name__", "").startswith("click."): - frm = frm.f_back - idx += 1 - elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: - return idx - else: - break - return 0 - - -def _check_for_unicode_literals(): - if not __debug__: - return - - from . import disable_unicode_literals_warning - - if not PY2 or disable_unicode_literals_warning: - return - bad_frame = _find_unicode_literals_frame() - if bad_frame <= 0: - return - from warnings import warn - - warn( - Warning( - "Click detected the use of the unicode_literals __future__" - " import. This is heavily discouraged because it can" - " introduce subtle bugs in your code. You should instead" - ' use explicit u"" literals for your unicode strings. For' - " more information see" - " https://click.palletsprojects.com/python3/" - ), - stacklevel=bad_frame, - ) - - -def _verify_python3_env(): - """Ensures that the environment is good for unicode on Python 3.""" - if PY2: - return - try: - import locale - - fs_enc = codecs.lookup(locale.getpreferredencoding()).name - except Exception: - fs_enc = "ascii" - if fs_enc != "ascii": - return - - extra = "" - if os.name == "posix": - import subprocess - - try: - rv = subprocess.Popen( - ["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE - ).communicate()[0] - except OSError: - rv = b"" - good_locales = set() - has_c_utf8 = False - - # Make sure we're operating on text here. - if isinstance(rv, bytes): - rv = rv.decode("ascii", "replace") - - for line in rv.splitlines(): - locale = line.strip() - if locale.lower().endswith((".utf-8", ".utf8")): - good_locales.add(locale) - if locale.lower() in ("c.utf8", "c.utf-8"): - has_c_utf8 = True - - extra += "\n\n" - if not good_locales: - extra += ( - "Additional information: on this system no suitable" - " UTF-8 locales were discovered. This most likely" - " requires resolving by reconfiguring the locale" - " system." - ) - elif has_c_utf8: - extra += ( - "This system supports the C.UTF-8 locale which is" - " recommended. You might be able to resolve your issue" - " by exporting the following environment variables:\n\n" - " export LC_ALL=C.UTF-8\n" - " export LANG=C.UTF-8" - ) - else: - extra += ( - "This system lists a couple of UTF-8 supporting locales" - " that you can pick from. The following suitable" - " locales were discovered: {}".format(", ".join(sorted(good_locales))) - ) - - bad_locale = None - for locale in os.environ.get("LC_ALL"), os.environ.get("LANG"): - if locale and locale.lower().endswith((".utf-8", ".utf8")): - bad_locale = locale - if locale is not None: - break - if bad_locale is not None: - extra += ( - "\n\nClick discovered that you exported a UTF-8 locale" - " but the locale system could not pick up from it" - " because it does not exist. The exported locale is" - " '{}' but it is not supported".format(bad_locale) - ) - - raise RuntimeError( - "Click will abort further execution because Python 3 was" - " configured to use ASCII as encoding for the environment." - " Consult https://click.palletsprojects.com/python3/ for" - " mitigation steps.{}".format(extra) - ) diff --git a/venv/lib/python3.7/site-packages/click/_winconsole.py b/venv/lib/python3.7/site-packages/click/_winconsole.py deleted file mode 100644 index b6c4274..0000000 --- a/venv/lib/python3.7/site-packages/click/_winconsole.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -# This module is based on the excellent work by Adam Bartoš who -# provided a lot of what went into the implementation here in -# the discussion to issue1602 in the Python bug tracker. -# -# There are some general differences in regards to how this works -# compared to the original patches as we do not need to patch -# the entire interpreter but just work in our little world of -# echo and prmopt. -import ctypes -import io -import os -import sys -import time -import zlib -from ctypes import byref -from ctypes import c_char -from ctypes import c_char_p -from ctypes import c_int -from ctypes import c_ssize_t -from ctypes import c_ulong -from ctypes import c_void_p -from ctypes import POINTER -from ctypes import py_object -from ctypes import windll -from ctypes import WinError -from ctypes import WINFUNCTYPE -from ctypes.wintypes import DWORD -from ctypes.wintypes import HANDLE -from ctypes.wintypes import LPCWSTR -from ctypes.wintypes import LPWSTR - -import msvcrt - -from ._compat import _NonClosingTextIOWrapper -from ._compat import PY2 -from ._compat import text_type - -try: - from ctypes import pythonapi - - PyObject_GetBuffer = pythonapi.PyObject_GetBuffer - PyBuffer_Release = pythonapi.PyBuffer_Release -except ImportError: - pythonapi = None - - -c_ssize_p = POINTER(c_ssize_t) - -kernel32 = windll.kernel32 -GetStdHandle = kernel32.GetStdHandle -ReadConsoleW = kernel32.ReadConsoleW -WriteConsoleW = kernel32.WriteConsoleW -GetConsoleMode = kernel32.GetConsoleMode -GetLastError = kernel32.GetLastError -GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32)) -CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( - ("CommandLineToArgvW", windll.shell32) -) -LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)( - ("LocalFree", windll.kernel32) -) - - -STDIN_HANDLE = GetStdHandle(-10) -STDOUT_HANDLE = GetStdHandle(-11) -STDERR_HANDLE = GetStdHandle(-12) - - -PyBUF_SIMPLE = 0 -PyBUF_WRITABLE = 1 - -ERROR_SUCCESS = 0 -ERROR_NOT_ENOUGH_MEMORY = 8 -ERROR_OPERATION_ABORTED = 995 - -STDIN_FILENO = 0 -STDOUT_FILENO = 1 -STDERR_FILENO = 2 - -EOF = b"\x1a" -MAX_BYTES_WRITTEN = 32767 - - -class Py_buffer(ctypes.Structure): - _fields_ = [ - ("buf", c_void_p), - ("obj", py_object), - ("len", c_ssize_t), - ("itemsize", c_ssize_t), - ("readonly", c_int), - ("ndim", c_int), - ("format", c_char_p), - ("shape", c_ssize_p), - ("strides", c_ssize_p), - ("suboffsets", c_ssize_p), - ("internal", c_void_p), - ] - - if PY2: - _fields_.insert(-1, ("smalltable", c_ssize_t * 2)) - - -# On PyPy we cannot get buffers so our ability to operate here is -# serverly limited. -if pythonapi is None: - get_buffer = None -else: - - def get_buffer(obj, writable=False): - buf = Py_buffer() - flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE - PyObject_GetBuffer(py_object(obj), byref(buf), flags) - try: - buffer_type = c_char * buf.len - return buffer_type.from_address(buf.buf) - finally: - PyBuffer_Release(byref(buf)) - - -class _WindowsConsoleRawIOBase(io.RawIOBase): - def __init__(self, handle): - self.handle = handle - - def isatty(self): - io.RawIOBase.isatty(self) - return True - - -class _WindowsConsoleReader(_WindowsConsoleRawIOBase): - def readable(self): - return True - - def readinto(self, b): - bytes_to_be_read = len(b) - if not bytes_to_be_read: - return 0 - elif bytes_to_be_read % 2: - raise ValueError( - "cannot read odd number of bytes from UTF-16-LE encoded console" - ) - - buffer = get_buffer(b, writable=True) - code_units_to_be_read = bytes_to_be_read // 2 - code_units_read = c_ulong() - - rv = ReadConsoleW( - HANDLE(self.handle), - buffer, - code_units_to_be_read, - byref(code_units_read), - None, - ) - if GetLastError() == ERROR_OPERATION_ABORTED: - # wait for KeyboardInterrupt - time.sleep(0.1) - if not rv: - raise OSError("Windows error: {}".format(GetLastError())) - - if buffer[0] == EOF: - return 0 - return 2 * code_units_read.value - - -class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): - def writable(self): - return True - - @staticmethod - def _get_error_message(errno): - if errno == ERROR_SUCCESS: - return "ERROR_SUCCESS" - elif errno == ERROR_NOT_ENOUGH_MEMORY: - return "ERROR_NOT_ENOUGH_MEMORY" - return "Windows error {}".format(errno) - - def write(self, b): - bytes_to_be_written = len(b) - buf = get_buffer(b) - code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2 - code_units_written = c_ulong() - - WriteConsoleW( - HANDLE(self.handle), - buf, - code_units_to_be_written, - byref(code_units_written), - None, - ) - bytes_written = 2 * code_units_written.value - - if bytes_written == 0 and bytes_to_be_written > 0: - raise OSError(self._get_error_message(GetLastError())) - return bytes_written - - -class ConsoleStream(object): - def __init__(self, text_stream, byte_stream): - self._text_stream = text_stream - self.buffer = byte_stream - - @property - def name(self): - return self.buffer.name - - def write(self, x): - if isinstance(x, text_type): - return self._text_stream.write(x) - try: - self.flush() - except Exception: - pass - return self.buffer.write(x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __getattr__(self, name): - return getattr(self._text_stream, name) - - def isatty(self): - return self.buffer.isatty() - - def __repr__(self): - return "".format( - self.name, self.encoding - ) - - -class WindowsChunkedWriter(object): - """ - Wraps a stream (such as stdout), acting as a transparent proxy for all - attribute access apart from method 'write()' which we wrap to write in - limited chunks due to a Windows limitation on binary console streams. - """ - - def __init__(self, wrapped): - # double-underscore everything to prevent clashes with names of - # attributes on the wrapped stream object. - self.__wrapped = wrapped - - def __getattr__(self, name): - return getattr(self.__wrapped, name) - - def write(self, text): - total_to_write = len(text) - written = 0 - - while written < total_to_write: - to_write = min(total_to_write - written, MAX_BYTES_WRITTEN) - self.__wrapped.write(text[written : written + to_write]) - written += to_write - - -_wrapped_std_streams = set() - - -def _wrap_std_stream(name): - # Python 2 & Windows 7 and below - if ( - PY2 - and sys.getwindowsversion()[:2] <= (6, 1) - and name not in _wrapped_std_streams - ): - setattr(sys, name, WindowsChunkedWriter(getattr(sys, name))) - _wrapped_std_streams.add(name) - - -def _get_text_stdin(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), - "utf-16-le", - "strict", - line_buffering=True, - ) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stdout(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), - "utf-16-le", - "strict", - line_buffering=True, - ) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stderr(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), - "utf-16-le", - "strict", - line_buffering=True, - ) - return ConsoleStream(text_stream, buffer_stream) - - -if PY2: - - def _hash_py_argv(): - return zlib.crc32("\x00".join(sys.argv[1:])) - - _initial_argv_hash = _hash_py_argv() - - def _get_windows_argv(): - argc = c_int(0) - argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) - if not argv_unicode: - raise WinError() - try: - argv = [argv_unicode[i] for i in range(0, argc.value)] - finally: - LocalFree(argv_unicode) - del argv_unicode - - if not hasattr(sys, "frozen"): - argv = argv[1:] - while len(argv) > 0: - arg = argv[0] - if not arg.startswith("-") or arg == "-": - break - argv = argv[1:] - if arg.startswith(("-c", "-m")): - break - - return argv[1:] - - -_stream_factories = { - 0: _get_text_stdin, - 1: _get_text_stdout, - 2: _get_text_stderr, -} - - -def _is_console(f): - if not hasattr(f, "fileno"): - return False - - try: - fileno = f.fileno() - except OSError: - return False - - handle = msvcrt.get_osfhandle(fileno) - return bool(GetConsoleMode(handle, byref(DWORD()))) - - -def _get_windows_console_stream(f, encoding, errors): - if ( - get_buffer is not None - and encoding in ("utf-16-le", None) - and errors in ("strict", None) - and _is_console(f) - ): - func = _stream_factories.get(f.fileno()) - if func is not None: - if not PY2: - f = getattr(f, "buffer", None) - if f is None: - return None - else: - # If we are on Python 2 we need to set the stream that we - # deal with to binary mode as otherwise the exercise if a - # bit moot. The same problems apply as for - # get_binary_stdin and friends from _compat. - msvcrt.setmode(f.fileno(), os.O_BINARY) - return func(f) diff --git a/venv/lib/python3.7/site-packages/click/core.py b/venv/lib/python3.7/site-packages/click/core.py deleted file mode 100644 index f58bf26..0000000 --- a/venv/lib/python3.7/site-packages/click/core.py +++ /dev/null @@ -1,2030 +0,0 @@ -import errno -import inspect -import os -import sys -from contextlib import contextmanager -from functools import update_wrapper -from itertools import repeat - -from ._compat import isidentifier -from ._compat import iteritems -from ._compat import PY2 -from ._compat import string_types -from ._unicodefun import _check_for_unicode_literals -from ._unicodefun import _verify_python3_env -from .exceptions import Abort -from .exceptions import BadParameter -from .exceptions import ClickException -from .exceptions import Exit -from .exceptions import MissingParameter -from .exceptions import UsageError -from .formatting import HelpFormatter -from .formatting import join_options -from .globals import pop_context -from .globals import push_context -from .parser import OptionParser -from .parser import split_opt -from .termui import confirm -from .termui import prompt -from .termui import style -from .types import BOOL -from .types import convert_type -from .types import IntRange -from .utils import echo -from .utils import get_os_args -from .utils import make_default_short_help -from .utils import make_str -from .utils import PacifyFlushWrapper - -_missing = object() - -SUBCOMMAND_METAVAR = "COMMAND [ARGS]..." -SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..." - -DEPRECATED_HELP_NOTICE = " (DEPRECATED)" -DEPRECATED_INVOKE_NOTICE = "DeprecationWarning: The command %(name)s is deprecated." - - -def _maybe_show_deprecated_notice(cmd): - if cmd.deprecated: - echo(style(DEPRECATED_INVOKE_NOTICE % {"name": cmd.name}, fg="red"), err=True) - - -def fast_exit(code): - """Exit without garbage collection, this speeds up exit by about 10ms for - things like bash completion. - """ - sys.stdout.flush() - sys.stderr.flush() - os._exit(code) - - -def _bashcomplete(cmd, prog_name, complete_var=None): - """Internal handler for the bash completion support.""" - if complete_var is None: - complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper()) - complete_instr = os.environ.get(complete_var) - if not complete_instr: - return - - from ._bashcomplete import bashcomplete - - if bashcomplete(cmd, prog_name, complete_var, complete_instr): - fast_exit(1) - - -def _check_multicommand(base_command, cmd_name, cmd, register=False): - if not base_command.chain or not isinstance(cmd, MultiCommand): - return - if register: - hint = ( - "It is not possible to add multi commands as children to" - " another multi command that is in chain mode." - ) - else: - hint = ( - "Found a multi command as subcommand to a multi command" - " that is in chain mode. This is not supported." - ) - raise RuntimeError( - "{}. Command '{}' is set to chain and '{}' was added as" - " subcommand but it in itself is a multi command. ('{}' is a {}" - " within a chained {} named '{}').".format( - hint, - base_command.name, - cmd_name, - cmd_name, - cmd.__class__.__name__, - base_command.__class__.__name__, - base_command.name, - ) - ) - - -def batch(iterable, batch_size): - return list(zip(*repeat(iter(iterable), batch_size))) - - -def invoke_param_callback(callback, ctx, param, value): - code = getattr(callback, "__code__", None) - args = getattr(code, "co_argcount", 3) - - if args < 3: - from warnings import warn - - warn( - "Parameter callbacks take 3 args, (ctx, param, value). The" - " 2-arg style is deprecated and will be removed in 8.0.".format(callback), - DeprecationWarning, - stacklevel=3, - ) - return callback(ctx, value) - - return callback(ctx, param, value) - - -@contextmanager -def augment_usage_errors(ctx, param=None): - """Context manager that attaches extra information to exceptions.""" - try: - yield - except BadParameter as e: - if e.ctx is None: - e.ctx = ctx - if param is not None and e.param is None: - e.param = param - raise - except UsageError as e: - if e.ctx is None: - e.ctx = ctx - raise - - -def iter_params_for_processing(invocation_order, declaration_order): - """Given a sequence of parameters in the order as should be considered - for processing and an iterable of parameters that exist, this returns - a list in the correct order as they should be processed. - """ - - def sort_key(item): - try: - idx = invocation_order.index(item) - except ValueError: - idx = float("inf") - return (not item.is_eager, idx) - - return sorted(declaration_order, key=sort_key) - - -class Context(object): - """The context is a special internal object that holds state relevant - for the script execution at every single level. It's normally invisible - to commands unless they opt-in to getting access to it. - - The context is useful as it can pass internal objects around and can - control special execution features such as reading data from - environment variables. - - A context can be used as context manager in which case it will call - :meth:`close` on teardown. - - .. versionadded:: 2.0 - Added the `resilient_parsing`, `help_option_names`, - `token_normalize_func` parameters. - - .. versionadded:: 3.0 - Added the `allow_extra_args` and `allow_interspersed_args` - parameters. - - .. versionadded:: 4.0 - Added the `color`, `ignore_unknown_options`, and - `max_content_width` parameters. - - .. versionadded:: 7.1 - Added the `show_default` parameter. - - :param command: the command class for this context. - :param parent: the parent context. - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it is usually - the name of the script, for commands below it it's - the name of the script. - :param obj: an arbitrary object of user data. - :param auto_envvar_prefix: the prefix to use for automatic environment - variables. If this is `None` then reading - from environment variables is disabled. This - does not affect manually set environment - variables which are always read. - :param default_map: a dictionary (like object) with default values - for parameters. - :param terminal_width: the width of the terminal. The default is - inherit from parent context. If no context - defines the terminal width then auto - detection will be applied. - :param max_content_width: the maximum width for content rendered by - Click (this currently only affects help - pages). This defaults to 80 characters if - not overridden. In other words: even if the - terminal is larger than that, Click will not - format things wider than 80 characters by - default. In addition to that, formatters might - add some safety mapping on the right. - :param resilient_parsing: if this flag is enabled then Click will - parse without any interactivity or callback - invocation. Default values will also be - ignored. This is useful for implementing - things such as completion support. - :param allow_extra_args: if this is set to `True` then extra arguments - at the end will not raise an error and will be - kept on the context. The default is to inherit - from the command. - :param allow_interspersed_args: if this is set to `False` then options - and arguments cannot be mixed. The - default is to inherit from the command. - :param ignore_unknown_options: instructs click to ignore options it does - not know and keeps them for later - processing. - :param help_option_names: optionally a list of strings that define how - the default help parameter is named. The - default is ``['--help']``. - :param token_normalize_func: an optional function that is used to - normalize tokens (options, choices, - etc.). This for instance can be used to - implement case insensitive behavior. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are used in texts that Click prints which is by - default not the case. This for instance would affect - help output. - :param show_default: if True, shows defaults for all options. - Even if an option is later created with show_default=False, - this command-level setting overrides it. - """ - - def __init__( - self, - command, - parent=None, - info_name=None, - obj=None, - auto_envvar_prefix=None, - default_map=None, - terminal_width=None, - max_content_width=None, - resilient_parsing=False, - allow_extra_args=None, - allow_interspersed_args=None, - ignore_unknown_options=None, - help_option_names=None, - token_normalize_func=None, - color=None, - show_default=None, - ): - #: the parent context or `None` if none exists. - self.parent = parent - #: the :class:`Command` for this context. - self.command = command - #: the descriptive information name - self.info_name = info_name - #: the parsed parameters except if the value is hidden in which - #: case it's not remembered. - self.params = {} - #: the leftover arguments. - self.args = [] - #: protected arguments. These are arguments that are prepended - #: to `args` when certain parsing scenarios are encountered but - #: must be never propagated to another arguments. This is used - #: to implement nested parsing. - self.protected_args = [] - if obj is None and parent is not None: - obj = parent.obj - #: the user object stored. - self.obj = obj - self._meta = getattr(parent, "meta", {}) - - #: A dictionary (-like object) with defaults for parameters. - if ( - default_map is None - and parent is not None - and parent.default_map is not None - ): - default_map = parent.default_map.get(info_name) - self.default_map = default_map - - #: This flag indicates if a subcommand is going to be executed. A - #: group callback can use this information to figure out if it's - #: being executed directly or because the execution flow passes - #: onwards to a subcommand. By default it's None, but it can be - #: the name of the subcommand to execute. - #: - #: If chaining is enabled this will be set to ``'*'`` in case - #: any commands are executed. It is however not possible to - #: figure out which ones. If you require this knowledge you - #: should use a :func:`resultcallback`. - self.invoked_subcommand = None - - if terminal_width is None and parent is not None: - terminal_width = parent.terminal_width - #: The width of the terminal (None is autodetection). - self.terminal_width = terminal_width - - if max_content_width is None and parent is not None: - max_content_width = parent.max_content_width - #: The maximum width of formatted content (None implies a sensible - #: default which is 80 for most things). - self.max_content_width = max_content_width - - if allow_extra_args is None: - allow_extra_args = command.allow_extra_args - #: Indicates if the context allows extra args or if it should - #: fail on parsing. - #: - #: .. versionadded:: 3.0 - self.allow_extra_args = allow_extra_args - - if allow_interspersed_args is None: - allow_interspersed_args = command.allow_interspersed_args - #: Indicates if the context allows mixing of arguments and - #: options or not. - #: - #: .. versionadded:: 3.0 - self.allow_interspersed_args = allow_interspersed_args - - if ignore_unknown_options is None: - ignore_unknown_options = command.ignore_unknown_options - #: Instructs click to ignore options that a command does not - #: understand and will store it on the context for later - #: processing. This is primarily useful for situations where you - #: want to call into external programs. Generally this pattern is - #: strongly discouraged because it's not possibly to losslessly - #: forward all arguments. - #: - #: .. versionadded:: 4.0 - self.ignore_unknown_options = ignore_unknown_options - - if help_option_names is None: - if parent is not None: - help_option_names = parent.help_option_names - else: - help_option_names = ["--help"] - - #: The names for the help options. - self.help_option_names = help_option_names - - if token_normalize_func is None and parent is not None: - token_normalize_func = parent.token_normalize_func - - #: An optional normalization function for tokens. This is - #: options, choices, commands etc. - self.token_normalize_func = token_normalize_func - - #: Indicates if resilient parsing is enabled. In that case Click - #: will do its best to not cause any failures and default values - #: will be ignored. Useful for completion. - self.resilient_parsing = resilient_parsing - - # If there is no envvar prefix yet, but the parent has one and - # the command on this level has a name, we can expand the envvar - # prefix automatically. - if auto_envvar_prefix is None: - if ( - parent is not None - and parent.auto_envvar_prefix is not None - and self.info_name is not None - ): - auto_envvar_prefix = "{}_{}".format( - parent.auto_envvar_prefix, self.info_name.upper() - ) - else: - auto_envvar_prefix = auto_envvar_prefix.upper() - if auto_envvar_prefix is not None: - auto_envvar_prefix = auto_envvar_prefix.replace("-", "_") - self.auto_envvar_prefix = auto_envvar_prefix - - if color is None and parent is not None: - color = parent.color - - #: Controls if styling output is wanted or not. - self.color = color - - self.show_default = show_default - - self._close_callbacks = [] - self._depth = 0 - - def __enter__(self): - self._depth += 1 - push_context(self) - return self - - def __exit__(self, exc_type, exc_value, tb): - self._depth -= 1 - if self._depth == 0: - self.close() - pop_context() - - @contextmanager - def scope(self, cleanup=True): - """This helper method can be used with the context object to promote - it to the current thread local (see :func:`get_current_context`). - The default behavior of this is to invoke the cleanup functions which - can be disabled by setting `cleanup` to `False`. The cleanup - functions are typically used for things such as closing file handles. - - If the cleanup is intended the context object can also be directly - used as a context manager. - - Example usage:: - - with ctx.scope(): - assert get_current_context() is ctx - - This is equivalent:: - - with ctx: - assert get_current_context() is ctx - - .. versionadded:: 5.0 - - :param cleanup: controls if the cleanup functions should be run or - not. The default is to run these functions. In - some situations the context only wants to be - temporarily pushed in which case this can be disabled. - Nested pushes automatically defer the cleanup. - """ - if not cleanup: - self._depth += 1 - try: - with self as rv: - yield rv - finally: - if not cleanup: - self._depth -= 1 - - @property - def meta(self): - """This is a dictionary which is shared with all the contexts - that are nested. It exists so that click utilities can store some - state here if they need to. It is however the responsibility of - that code to manage this dictionary well. - - The keys are supposed to be unique dotted strings. For instance - module paths are a good choice for it. What is stored in there is - irrelevant for the operation of click. However what is important is - that code that places data here adheres to the general semantics of - the system. - - Example usage:: - - LANG_KEY = f'{__name__}.lang' - - def set_language(value): - ctx = get_current_context() - ctx.meta[LANG_KEY] = value - - def get_language(): - return get_current_context().meta.get(LANG_KEY, 'en_US') - - .. versionadded:: 5.0 - """ - return self._meta - - def make_formatter(self): - """Creates the formatter for the help and usage output.""" - return HelpFormatter( - width=self.terminal_width, max_width=self.max_content_width - ) - - def call_on_close(self, f): - """This decorator remembers a function as callback that should be - executed when the context tears down. This is most useful to bind - resource handling to the script execution. For instance, file objects - opened by the :class:`File` type will register their close callbacks - here. - - :param f: the function to execute on teardown. - """ - self._close_callbacks.append(f) - return f - - def close(self): - """Invokes all close callbacks.""" - for cb in self._close_callbacks: - cb() - self._close_callbacks = [] - - @property - def command_path(self): - """The computed command path. This is used for the ``usage`` - information on the help page. It's automatically created by - combining the info names of the chain of contexts to the root. - """ - rv = "" - if self.info_name is not None: - rv = self.info_name - if self.parent is not None: - rv = "{} {}".format(self.parent.command_path, rv) - return rv.lstrip() - - def find_root(self): - """Finds the outermost context.""" - node = self - while node.parent is not None: - node = node.parent - return node - - def find_object(self, object_type): - """Finds the closest object of a given type.""" - node = self - while node is not None: - if isinstance(node.obj, object_type): - return node.obj - node = node.parent - - def ensure_object(self, object_type): - """Like :meth:`find_object` but sets the innermost object to a - new instance of `object_type` if it does not exist. - """ - rv = self.find_object(object_type) - if rv is None: - self.obj = rv = object_type() - return rv - - def lookup_default(self, name): - """Looks up the default for a parameter name. This by default - looks into the :attr:`default_map` if available. - """ - if self.default_map is not None: - rv = self.default_map.get(name) - if callable(rv): - rv = rv() - return rv - - def fail(self, message): - """Aborts the execution of the program with a specific error - message. - - :param message: the error message to fail with. - """ - raise UsageError(message, self) - - def abort(self): - """Aborts the script.""" - raise Abort() - - def exit(self, code=0): - """Exits the application with a given exit code.""" - raise Exit(code) - - def get_usage(self): - """Helper method to get formatted usage string for the current - context and command. - """ - return self.command.get_usage(self) - - def get_help(self): - """Helper method to get formatted help page for the current - context and command. - """ - return self.command.get_help(self) - - def invoke(*args, **kwargs): # noqa: B902 - """Invokes a command callback in exactly the way it expects. There - are two ways to invoke this method: - - 1. the first argument can be a callback and all other arguments and - keyword arguments are forwarded directly to the function. - 2. the first argument is a click command object. In that case all - arguments are forwarded as well but proper click parameters - (options and click arguments) must be keyword arguments and Click - will fill in defaults. - - Note that before Click 3.2 keyword arguments were not properly filled - in against the intention of this code and no context was created. For - more information about this change and why it was done in a bugfix - release see :ref:`upgrade-to-3.2`. - """ - self, callback = args[:2] - ctx = self - - # It's also possible to invoke another command which might or - # might not have a callback. In that case we also fill - # in defaults and make a new context for this command. - if isinstance(callback, Command): - other_cmd = callback - callback = other_cmd.callback - ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) - if callback is None: - raise TypeError( - "The given command does not have a callback that can be invoked." - ) - - for param in other_cmd.params: - if param.name not in kwargs and param.expose_value: - kwargs[param.name] = param.get_default(ctx) - - args = args[2:] - with augment_usage_errors(self): - with ctx: - return callback(*args, **kwargs) - - def forward(*args, **kwargs): # noqa: B902 - """Similar to :meth:`invoke` but fills in default keyword - arguments from the current context if the other command expects - it. This cannot invoke callbacks directly, only other commands. - """ - self, cmd = args[:2] - - # It's also possible to invoke another command which might or - # might not have a callback. - if not isinstance(cmd, Command): - raise TypeError("Callback is not a command.") - - for param in self.params: - if param not in kwargs: - kwargs[param] = self.params[param] - - return self.invoke(cmd, **kwargs) - - -class BaseCommand(object): - """The base command implements the minimal API contract of commands. - Most code will never use this as it does not implement a lot of useful - functionality but it can act as the direct subclass of alternative - parsing methods that do not depend on the Click parser. - - For instance, this can be used to bridge Click and other systems like - argparse or docopt. - - Because base commands do not implement a lot of the API that other - parts of Click take for granted, they are not supported for all - operations. For instance, they cannot be used with the decorators - usually and they have no built-in callback system. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - """ - - #: the default for the :attr:`Context.allow_extra_args` flag. - allow_extra_args = False - #: the default for the :attr:`Context.allow_interspersed_args` flag. - allow_interspersed_args = True - #: the default for the :attr:`Context.ignore_unknown_options` flag. - ignore_unknown_options = False - - def __init__(self, name, context_settings=None): - #: the name the command thinks it has. Upon registering a command - #: on a :class:`Group` the group will default the command name - #: with this information. You should instead use the - #: :class:`Context`\'s :attr:`~Context.info_name` attribute. - self.name = name - if context_settings is None: - context_settings = {} - #: an optional dictionary with defaults passed to the context. - self.context_settings = context_settings - - def __repr__(self): - return "<{} {}>".format(self.__class__.__name__, self.name) - - def get_usage(self, ctx): - raise NotImplementedError("Base commands cannot get usage") - - def get_help(self, ctx): - raise NotImplementedError("Base commands cannot get help") - - def make_context(self, info_name, args, parent=None, **extra): - """This function when given an info name and arguments will kick - off the parsing and create a new :class:`Context`. It does not - invoke the actual command callback though. - - :param info_name: the info name for this invokation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it's usually - the name of the script, for commands below it it's - the name of the script. - :param args: the arguments to parse as list of strings. - :param parent: the parent context if available. - :param extra: extra keyword arguments forwarded to the context - constructor. - """ - for key, value in iteritems(self.context_settings): - if key not in extra: - extra[key] = value - ctx = Context(self, info_name=info_name, parent=parent, **extra) - with ctx.scope(cleanup=False): - self.parse_args(ctx, args) - return ctx - - def parse_args(self, ctx, args): - """Given a context and a list of arguments this creates the parser - and parses the arguments, then modifies the context as necessary. - This is automatically invoked by :meth:`make_context`. - """ - raise NotImplementedError("Base commands do not know how to parse arguments.") - - def invoke(self, ctx): - """Given a context, this invokes the command. The default - implementation is raising a not implemented error. - """ - raise NotImplementedError("Base commands are not invokable by default") - - def main( - self, - args=None, - prog_name=None, - complete_var=None, - standalone_mode=True, - **extra - ): - """This is the way to invoke a script with all the bells and - whistles as a command line application. This will always terminate - the application after a call. If this is not wanted, ``SystemExit`` - needs to be caught. - - This method is also available by directly calling the instance of - a :class:`Command`. - - .. versionadded:: 3.0 - Added the `standalone_mode` flag to control the standalone mode. - - :param args: the arguments that should be used for parsing. If not - provided, ``sys.argv[1:]`` is used. - :param prog_name: the program name that should be used. By default - the program name is constructed by taking the file - name from ``sys.argv[0]``. - :param complete_var: the environment variable that controls the - bash completion support. The default is - ``"__COMPLETE"`` with prog_name in - uppercase. - :param standalone_mode: the default behavior is to invoke the script - in standalone mode. Click will then - handle exceptions and convert them into - error messages and the function will never - return but shut down the interpreter. If - this is set to `False` they will be - propagated to the caller and the return - value of this function is the return value - of :meth:`invoke`. - :param extra: extra keyword arguments are forwarded to the context - constructor. See :class:`Context` for more information. - """ - # If we are in Python 3, we will verify that the environment is - # sane at this point or reject further execution to avoid a - # broken script. - if not PY2: - _verify_python3_env() - else: - _check_for_unicode_literals() - - if args is None: - args = get_os_args() - else: - args = list(args) - - if prog_name is None: - prog_name = make_str( - os.path.basename(sys.argv[0] if sys.argv else __file__) - ) - - # Hook for the Bash completion. This only activates if the Bash - # completion is actually enabled, otherwise this is quite a fast - # noop. - _bashcomplete(self, prog_name, complete_var) - - try: - try: - with self.make_context(prog_name, args, **extra) as ctx: - rv = self.invoke(ctx) - if not standalone_mode: - return rv - # it's not safe to `ctx.exit(rv)` here! - # note that `rv` may actually contain data like "1" which - # has obvious effects - # more subtle case: `rv=[None, None]` can come out of - # chained commands which all returned `None` -- so it's not - # even always obvious that `rv` indicates success/failure - # by its truthiness/falsiness - ctx.exit() - except (EOFError, KeyboardInterrupt): - echo(file=sys.stderr) - raise Abort() - except ClickException as e: - if not standalone_mode: - raise - e.show() - sys.exit(e.exit_code) - except IOError as e: - if e.errno == errno.EPIPE: - sys.stdout = PacifyFlushWrapper(sys.stdout) - sys.stderr = PacifyFlushWrapper(sys.stderr) - sys.exit(1) - else: - raise - except Exit as e: - if standalone_mode: - sys.exit(e.exit_code) - else: - # in non-standalone mode, return the exit code - # note that this is only reached if `self.invoke` above raises - # an Exit explicitly -- thus bypassing the check there which - # would return its result - # the results of non-standalone execution may therefore be - # somewhat ambiguous: if there are codepaths which lead to - # `ctx.exit(1)` and to `return 1`, the caller won't be able to - # tell the difference between the two - return e.exit_code - except Abort: - if not standalone_mode: - raise - echo("Aborted!", file=sys.stderr) - sys.exit(1) - - def __call__(self, *args, **kwargs): - """Alias for :meth:`main`.""" - return self.main(*args, **kwargs) - - -class Command(BaseCommand): - """Commands are the basic building block of command line interfaces in - Click. A basic command handles command line parsing and might dispatch - more parsing to commands nested below it. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - .. versionchanged:: 7.1 - Added the `no_args_is_help` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - :param callback: the callback to invoke. This is optional. - :param params: the parameters to register with this command. This can - be either :class:`Option` or :class:`Argument` objects. - :param help: the help string to use for this command. - :param epilog: like the help string but it's printed at the end of the - help page after everything else. - :param short_help: the short help to use for this command. This is - shown on the command listing of the parent command. - :param add_help_option: by default each command registers a ``--help`` - option. This can be disabled by this parameter. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is disabled by default. - If enabled this will add ``--help`` as argument - if no arguments are passed - :param hidden: hide this command from help outputs. - - :param deprecated: issues a message indicating that - the command is deprecated. - """ - - def __init__( - self, - name, - context_settings=None, - callback=None, - params=None, - help=None, - epilog=None, - short_help=None, - options_metavar="[OPTIONS]", - add_help_option=True, - no_args_is_help=False, - hidden=False, - deprecated=False, - ): - BaseCommand.__init__(self, name, context_settings) - #: the callback to execute when the command fires. This might be - #: `None` in which case nothing happens. - self.callback = callback - #: the list of parameters for this command in the order they - #: should show up in the help page and execute. Eager parameters - #: will automatically be handled before non eager ones. - self.params = params or [] - # if a form feed (page break) is found in the help text, truncate help - # text to the content preceding the first form feed - if help and "\f" in help: - help = help.split("\f", 1)[0] - self.help = help - self.epilog = epilog - self.options_metavar = options_metavar - self.short_help = short_help - self.add_help_option = add_help_option - self.no_args_is_help = no_args_is_help - self.hidden = hidden - self.deprecated = deprecated - - def get_usage(self, ctx): - """Formats the usage line into a string and returns it. - - Calls :meth:`format_usage` internally. - """ - formatter = ctx.make_formatter() - self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip("\n") - - def get_params(self, ctx): - rv = self.params - help_option = self.get_help_option(ctx) - if help_option is not None: - rv = rv + [help_option] - return rv - - def format_usage(self, ctx, formatter): - """Writes the usage line into the formatter. - - This is a low-level method called by :meth:`get_usage`. - """ - pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, " ".join(pieces)) - - def collect_usage_pieces(self, ctx): - """Returns all the pieces that go into the usage line and returns - it as a list of strings. - """ - rv = [self.options_metavar] - for param in self.get_params(ctx): - rv.extend(param.get_usage_pieces(ctx)) - return rv - - def get_help_option_names(self, ctx): - """Returns the names for the help option.""" - all_names = set(ctx.help_option_names) - for param in self.params: - all_names.difference_update(param.opts) - all_names.difference_update(param.secondary_opts) - return all_names - - def get_help_option(self, ctx): - """Returns the help option object.""" - help_options = self.get_help_option_names(ctx) - if not help_options or not self.add_help_option: - return - - def show_help(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - return Option( - help_options, - is_flag=True, - is_eager=True, - expose_value=False, - callback=show_help, - help="Show this message and exit.", - ) - - def make_parser(self, ctx): - """Creates the underlying option parser for this command.""" - parser = OptionParser(ctx) - for param in self.get_params(ctx): - param.add_to_parser(parser, ctx) - return parser - - def get_help(self, ctx): - """Formats the help into a string and returns it. - - Calls :meth:`format_help` internally. - """ - formatter = ctx.make_formatter() - self.format_help(ctx, formatter) - return formatter.getvalue().rstrip("\n") - - def get_short_help_str(self, limit=45): - """Gets short help for the command or makes it by shortening the - long help string. - """ - return ( - self.short_help - or self.help - and make_default_short_help(self.help, limit) - or "" - ) - - def format_help(self, ctx, formatter): - """Writes the help into the formatter if it exists. - - This is a low-level method called by :meth:`get_help`. - - This calls the following methods: - - - :meth:`format_usage` - - :meth:`format_help_text` - - :meth:`format_options` - - :meth:`format_epilog` - """ - self.format_usage(ctx, formatter) - self.format_help_text(ctx, formatter) - self.format_options(ctx, formatter) - self.format_epilog(ctx, formatter) - - def format_help_text(self, ctx, formatter): - """Writes the help text to the formatter if it exists.""" - if self.help: - formatter.write_paragraph() - with formatter.indentation(): - help_text = self.help - if self.deprecated: - help_text += DEPRECATED_HELP_NOTICE - formatter.write_text(help_text) - elif self.deprecated: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(DEPRECATED_HELP_NOTICE) - - def format_options(self, ctx, formatter): - """Writes all the options into the formatter if they exist.""" - opts = [] - for param in self.get_params(ctx): - rv = param.get_help_record(ctx) - if rv is not None: - opts.append(rv) - - if opts: - with formatter.section("Options"): - formatter.write_dl(opts) - - def format_epilog(self, ctx, formatter): - """Writes the epilog into the formatter if it exists.""" - if self.epilog: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(self.epilog) - - def parse_args(self, ctx, args): - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - parser = self.make_parser(ctx) - opts, args, param_order = parser.parse_args(args=args) - - for param in iter_params_for_processing(param_order, self.get_params(ctx)): - value, args = param.handle_parse_result(ctx, opts, args) - - if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail( - "Got unexpected extra argument{} ({})".format( - "s" if len(args) != 1 else "", " ".join(map(make_str, args)) - ) - ) - - ctx.args = args - return args - - def invoke(self, ctx): - """Given a context, this invokes the attached callback (if it exists) - in the right way. - """ - _maybe_show_deprecated_notice(self) - if self.callback is not None: - return ctx.invoke(self.callback, **ctx.params) - - -class MultiCommand(Command): - """A multi command is the basic implementation of a command that - dispatches to subcommands. The most common version is the - :class:`Group`. - - :param invoke_without_command: this controls how the multi command itself - is invoked. By default it's only invoked - if a subcommand is provided. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is enabled by default if - `invoke_without_command` is disabled or disabled - if it's enabled. If enabled this will add - ``--help`` as argument if no arguments are - passed. - :param subcommand_metavar: the string that is used in the documentation - to indicate the subcommand place. - :param chain: if this is set to `True` chaining of multiple subcommands - is enabled. This restricts the form of commands in that - they cannot have optional arguments but it allows - multiple commands to be chained together. - :param result_callback: the result callback to attach to this multi - command. - """ - - allow_extra_args = True - allow_interspersed_args = False - - def __init__( - self, - name=None, - invoke_without_command=False, - no_args_is_help=None, - subcommand_metavar=None, - chain=False, - result_callback=None, - **attrs - ): - Command.__init__(self, name, **attrs) - if no_args_is_help is None: - no_args_is_help = not invoke_without_command - self.no_args_is_help = no_args_is_help - self.invoke_without_command = invoke_without_command - if subcommand_metavar is None: - if chain: - subcommand_metavar = SUBCOMMANDS_METAVAR - else: - subcommand_metavar = SUBCOMMAND_METAVAR - self.subcommand_metavar = subcommand_metavar - self.chain = chain - #: The result callback that is stored. This can be set or - #: overridden with the :func:`resultcallback` decorator. - self.result_callback = result_callback - - if self.chain: - for param in self.params: - if isinstance(param, Argument) and not param.required: - raise RuntimeError( - "Multi commands in chain mode cannot have" - " optional arguments." - ) - - def collect_usage_pieces(self, ctx): - rv = Command.collect_usage_pieces(self, ctx) - rv.append(self.subcommand_metavar) - return rv - - def format_options(self, ctx, formatter): - Command.format_options(self, ctx, formatter) - self.format_commands(ctx, formatter) - - def resultcallback(self, replace=False): - """Adds a result callback to the chain command. By default if a - result callback is already registered this will chain them but - this can be disabled with the `replace` parameter. The result - callback is invoked with the return value of the subcommand - (or the list of return values from all subcommands if chaining - is enabled) as well as the parameters as they would be passed - to the main callback. - - Example:: - - @click.group() - @click.option('-i', '--input', default=23) - def cli(input): - return 42 - - @cli.resultcallback() - def process_result(result, input): - return result + input - - .. versionadded:: 3.0 - - :param replace: if set to `True` an already existing result - callback will be removed. - """ - - def decorator(f): - old_callback = self.result_callback - if old_callback is None or replace: - self.result_callback = f - return f - - def function(__value, *args, **kwargs): - return f(old_callback(__value, *args, **kwargs), *args, **kwargs) - - self.result_callback = rv = update_wrapper(function, f) - return rv - - return decorator - - def format_commands(self, ctx, formatter): - """Extra format methods for multi methods that adds all the commands - after the options. - """ - commands = [] - for subcommand in self.list_commands(ctx): - cmd = self.get_command(ctx, subcommand) - # What is this, the tool lied about a command. Ignore it - if cmd is None: - continue - if cmd.hidden: - continue - - commands.append((subcommand, cmd)) - - # allow for 3 times the default spacing - if len(commands): - limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) - - rows = [] - for subcommand, cmd in commands: - help = cmd.get_short_help_str(limit) - rows.append((subcommand, help)) - - if rows: - with formatter.section("Commands"): - formatter.write_dl(rows) - - def parse_args(self, ctx, args): - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - rest = Command.parse_args(self, ctx, args) - if self.chain: - ctx.protected_args = rest - ctx.args = [] - elif rest: - ctx.protected_args, ctx.args = rest[:1], rest[1:] - - return ctx.args - - def invoke(self, ctx): - def _process_result(value): - if self.result_callback is not None: - value = ctx.invoke(self.result_callback, value, **ctx.params) - return value - - if not ctx.protected_args: - # If we are invoked without command the chain flag controls - # how this happens. If we are not in chain mode, the return - # value here is the return value of the command. - # If however we are in chain mode, the return value is the - # return value of the result processor invoked with an empty - # list (which means that no subcommand actually was executed). - if self.invoke_without_command: - if not self.chain: - return Command.invoke(self, ctx) - with ctx: - Command.invoke(self, ctx) - return _process_result([]) - ctx.fail("Missing command.") - - # Fetch args back out - args = ctx.protected_args + ctx.args - ctx.args = [] - ctx.protected_args = [] - - # If we're not in chain mode, we only allow the invocation of a - # single command but we also inform the current context about the - # name of the command to invoke. - if not self.chain: - # Make sure the context is entered so we do not clean up - # resources until the result processor has worked. - with ctx: - cmd_name, cmd, args = self.resolve_command(ctx, args) - ctx.invoked_subcommand = cmd_name - Command.invoke(self, ctx) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) - with sub_ctx: - return _process_result(sub_ctx.command.invoke(sub_ctx)) - - # In chain mode we create the contexts step by step, but after the - # base command has been invoked. Because at that point we do not - # know the subcommands yet, the invoked subcommand attribute is - # set to ``*`` to inform the command that subcommands are executed - # but nothing else. - with ctx: - ctx.invoked_subcommand = "*" if args else None - Command.invoke(self, ctx) - - # Otherwise we make every single context and invoke them in a - # chain. In that case the return value to the result processor - # is the list of all invoked subcommand's results. - contexts = [] - while args: - cmd_name, cmd, args = self.resolve_command(ctx, args) - sub_ctx = cmd.make_context( - cmd_name, - args, - parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - ) - contexts.append(sub_ctx) - args, sub_ctx.args = sub_ctx.args, [] - - rv = [] - for sub_ctx in contexts: - with sub_ctx: - rv.append(sub_ctx.command.invoke(sub_ctx)) - return _process_result(rv) - - def resolve_command(self, ctx, args): - cmd_name = make_str(args[0]) - original_cmd_name = cmd_name - - # Get the command - cmd = self.get_command(ctx, cmd_name) - - # If we can't find the command but there is a normalization - # function available, we try with that one. - if cmd is None and ctx.token_normalize_func is not None: - cmd_name = ctx.token_normalize_func(cmd_name) - cmd = self.get_command(ctx, cmd_name) - - # If we don't find the command we want to show an error message - # to the user that it was not provided. However, there is - # something else we should do: if the first argument looks like - # an option we want to kick off parsing again for arguments to - # resolve things like --help which now should go to the main - # place. - if cmd is None and not ctx.resilient_parsing: - if split_opt(cmd_name)[0]: - self.parse_args(ctx, ctx.args) - ctx.fail("No such command '{}'.".format(original_cmd_name)) - - return cmd_name, cmd, args[1:] - - def get_command(self, ctx, cmd_name): - """Given a context and a command name, this returns a - :class:`Command` object if it exists or returns `None`. - """ - raise NotImplementedError() - - def list_commands(self, ctx): - """Returns a list of subcommand names in the order they should - appear. - """ - return [] - - -class Group(MultiCommand): - """A group allows a command to have subcommands attached. This is the - most common way to implement nesting in Click. - - :param commands: a dictionary of commands. - """ - - def __init__(self, name=None, commands=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: the registered subcommands by their exported names. - self.commands = commands or {} - - def add_command(self, cmd, name=None): - """Registers another :class:`Command` with this group. If the name - is not provided, the name of the command is used. - """ - name = name or cmd.name - if name is None: - raise TypeError("Command has no name.") - _check_multicommand(self, name, cmd, register=True) - self.commands[name] = cmd - - def command(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a command to - the group. This takes the same arguments as :func:`command` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - from .decorators import command - - def decorator(f): - cmd = command(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - - return decorator - - def group(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a group to - the group. This takes the same arguments as :func:`group` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - from .decorators import group - - def decorator(f): - cmd = group(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - - return decorator - - def get_command(self, ctx, cmd_name): - return self.commands.get(cmd_name) - - def list_commands(self, ctx): - return sorted(self.commands) - - -class CommandCollection(MultiCommand): - """A command collection is a multi command that merges multiple multi - commands together into one. This is a straightforward implementation - that accepts a list of different multi commands as sources and - provides all the commands for each of them. - """ - - def __init__(self, name=None, sources=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: The list of registered multi commands. - self.sources = sources or [] - - def add_source(self, multi_cmd): - """Adds a new multi command to the chain dispatcher.""" - self.sources.append(multi_cmd) - - def get_command(self, ctx, cmd_name): - for source in self.sources: - rv = source.get_command(ctx, cmd_name) - if rv is not None: - if self.chain: - _check_multicommand(self, cmd_name, rv) - return rv - - def list_commands(self, ctx): - rv = set() - for source in self.sources: - rv.update(source.list_commands(ctx)) - return sorted(rv) - - -class Parameter(object): - r"""A parameter to a command comes in two versions: they are either - :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently - not supported by design as some of the internals for parsing are - intentionally not finalized. - - Some settings are supported by both options and arguments. - - :param param_decls: the parameter declarations for this option or - argument. This is a list of flags or argument - names. - :param type: the type that should be used. Either a :class:`ParamType` - or a Python type. The later is converted into the former - automatically if supported. - :param required: controls if this is optional or not. - :param default: the default value if omitted. This can also be a callable, - in which case it's invoked when the default is needed - without any arguments. - :param callback: a callback that should be executed after the parameter - was matched. This is called as ``fn(ctx, param, - value)`` and needs to return the value. - :param nargs: the number of arguments to match. If not ``1`` the return - value is a tuple instead of single value. The default for - nargs is ``1`` (except if the type is a tuple, then it's - the arity of the tuple). - :param metavar: how the value is represented in the help page. - :param expose_value: if this is `True` then the value is passed onwards - to the command callback and stored on the context, - otherwise it's skipped. - :param is_eager: eager values are processed before non eager ones. This - should not be set for arguments or it will inverse the - order of processing. - :param envvar: a string or list of strings that are environment variables - that should be checked. - - .. versionchanged:: 7.1 - Empty environment variables are ignored rather than taking the - empty string value. This makes it possible for scripts to clear - variables if they can't unset them. - - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. The old callback format will still work, but it will - raise a warning to give you a chance to migrate the code easier. - """ - param_type_name = "parameter" - - def __init__( - self, - param_decls=None, - type=None, - required=False, - default=None, - callback=None, - nargs=None, - metavar=None, - expose_value=True, - is_eager=False, - envvar=None, - autocompletion=None, - ): - self.name, self.opts, self.secondary_opts = self._parse_decls( - param_decls or (), expose_value - ) - - self.type = convert_type(type, default) - - # Default nargs to what the type tells us if we have that - # information available. - if nargs is None: - if self.type.is_composite: - nargs = self.type.arity - else: - nargs = 1 - - self.required = required - self.callback = callback - self.nargs = nargs - self.multiple = False - self.expose_value = expose_value - self.default = default - self.is_eager = is_eager - self.metavar = metavar - self.envvar = envvar - self.autocompletion = autocompletion - - def __repr__(self): - return "<{} {}>".format(self.__class__.__name__, self.name) - - @property - def human_readable_name(self): - """Returns the human readable name of this parameter. This is the - same as the name for options, but the metavar for arguments. - """ - return self.name - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - metavar = self.type.get_metavar(self) - if metavar is None: - metavar = self.type.name.upper() - if self.nargs != 1: - metavar += "..." - return metavar - - def get_default(self, ctx): - """Given a context variable this calculates the default value.""" - # Otherwise go with the regular default. - if callable(self.default): - rv = self.default() - else: - rv = self.default - return self.type_cast_value(ctx, rv) - - def add_to_parser(self, parser, ctx): - pass - - def consume_value(self, ctx, opts): - value = opts.get(self.name) - if value is None: - value = self.value_from_envvar(ctx) - if value is None: - value = ctx.lookup_default(self.name) - return value - - def type_cast_value(self, ctx, value): - """Given a value this runs it properly through the type system. - This automatically handles things like `nargs` and `multiple` as - well as composite types. - """ - if self.type.is_composite: - if self.nargs <= 1: - raise TypeError( - "Attempted to invoke composite type but nargs has" - " been set to {}. This is not supported; nargs" - " needs to be set to a fixed value > 1.".format(self.nargs) - ) - if self.multiple: - return tuple(self.type(x or (), self, ctx) for x in value or ()) - return self.type(value or (), self, ctx) - - def _convert(value, level): - if level == 0: - return self.type(value, self, ctx) - return tuple(_convert(x, level - 1) for x in value or ()) - - return _convert(value, (self.nargs != 1) + bool(self.multiple)) - - def process_value(self, ctx, value): - """Given a value and context this runs the logic to convert the - value as necessary. - """ - # If the value we were given is None we do nothing. This way - # code that calls this can easily figure out if something was - # not provided. Otherwise it would be converted into an empty - # tuple for multiple invocations which is inconvenient. - if value is not None: - return self.type_cast_value(ctx, value) - - def value_is_missing(self, value): - if value is None: - return True - if (self.nargs != 1 or self.multiple) and value == (): - return True - return False - - def full_process_value(self, ctx, value): - value = self.process_value(ctx, value) - - if value is None and not ctx.resilient_parsing: - value = self.get_default(ctx) - - if self.required and self.value_is_missing(value): - raise MissingParameter(ctx=ctx, param=self) - - return value - - def resolve_envvar_value(self, ctx): - if self.envvar is None: - return - if isinstance(self.envvar, (tuple, list)): - for envvar in self.envvar: - rv = os.environ.get(envvar) - if rv is not None: - return rv - else: - rv = os.environ.get(self.envvar) - - if rv != "": - return rv - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is not None and self.nargs != 1: - rv = self.type.split_envvar_value(rv) - return rv - - def handle_parse_result(self, ctx, opts, args): - with augment_usage_errors(ctx, param=self): - value = self.consume_value(ctx, opts) - try: - value = self.full_process_value(ctx, value) - except Exception: - if not ctx.resilient_parsing: - raise - value = None - if self.callback is not None: - try: - value = invoke_param_callback(self.callback, ctx, self, value) - except Exception: - if not ctx.resilient_parsing: - raise - - if self.expose_value: - ctx.params[self.name] = value - return value, args - - def get_help_record(self, ctx): - pass - - def get_usage_pieces(self, ctx): - return [] - - def get_error_hint(self, ctx): - """Get a stringified version of the param for use in error messages to - indicate which param caused the error. - """ - hint_list = self.opts or [self.human_readable_name] - return " / ".join(repr(x) for x in hint_list) - - -class Option(Parameter): - """Options are usually optional values on the command line and - have some extra features that arguments don't have. - - All other parameters are passed onwards to the parameter constructor. - - :param show_default: controls if the default value should be shown on the - help page. Normally, defaults are not shown. If this - value is a string, it shows the string instead of the - value. This is particularly useful for dynamic options. - :param show_envvar: controls if an environment variable should be shown on - the help page. Normally, environment variables - are not shown. - :param prompt: if set to `True` or a non empty string then the user will be - prompted for input. If set to `True` the prompt will be the - option name capitalized. - :param confirmation_prompt: if set then the value will need to be confirmed - if it was prompted for. - :param hide_input: if this is `True` then the input on the prompt will be - hidden from the user. This is useful for password - input. - :param is_flag: forces this option to act as a flag. The default is - auto detection. - :param flag_value: which value should be used for this flag if it's - enabled. This is set to a boolean automatically if - the option string contains a slash to mark two options. - :param multiple: if this is set to `True` then the argument is accepted - multiple times and recorded. This is similar to ``nargs`` - in how it works but supports arbitrary number of - arguments. - :param count: this flag makes an option increment an integer. - :param allow_from_autoenv: if this is enabled then the value of this - parameter will be pulled from an environment - variable in case a prefix is defined on the - context. - :param help: the help string. - :param hidden: hide this option from help outputs. - """ - - param_type_name = "option" - - def __init__( - self, - param_decls=None, - show_default=False, - prompt=False, - confirmation_prompt=False, - hide_input=False, - is_flag=None, - flag_value=None, - multiple=False, - count=False, - allow_from_autoenv=True, - type=None, - help=None, - hidden=False, - show_choices=True, - show_envvar=False, - **attrs - ): - default_is_missing = attrs.get("default", _missing) is _missing - Parameter.__init__(self, param_decls, type=type, **attrs) - - if prompt is True: - prompt_text = self.name.replace("_", " ").capitalize() - elif prompt is False: - prompt_text = None - else: - prompt_text = prompt - self.prompt = prompt_text - self.confirmation_prompt = confirmation_prompt - self.hide_input = hide_input - self.hidden = hidden - - # Flags - if is_flag is None: - if flag_value is not None: - is_flag = True - else: - is_flag = bool(self.secondary_opts) - if is_flag and default_is_missing: - self.default = False - if flag_value is None: - flag_value = not self.default - self.is_flag = is_flag - self.flag_value = flag_value - if self.is_flag and isinstance(self.flag_value, bool) and type in [None, bool]: - self.type = BOOL - self.is_bool_flag = True - else: - self.is_bool_flag = False - - # Counting - self.count = count - if count: - if type is None: - self.type = IntRange(min=0) - if default_is_missing: - self.default = 0 - - self.multiple = multiple - self.allow_from_autoenv = allow_from_autoenv - self.help = help - self.show_default = show_default - self.show_choices = show_choices - self.show_envvar = show_envvar - - # Sanity check for stuff we don't support - if __debug__: - if self.nargs < 0: - raise TypeError("Options cannot have nargs < 0") - if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError("Cannot prompt for flags that are not bools.") - if not self.is_bool_flag and self.secondary_opts: - raise TypeError("Got secondary option for non boolean flag.") - if self.is_bool_flag and self.hide_input and self.prompt is not None: - raise TypeError("Hidden input does not work with boolean flag prompts.") - if self.count: - if self.multiple: - raise TypeError( - "Options cannot be multiple and count at the same time." - ) - elif self.is_flag: - raise TypeError( - "Options cannot be count and flags at the same time." - ) - - def _parse_decls(self, decls, expose_value): - opts = [] - secondary_opts = [] - name = None - possible_names = [] - - for decl in decls: - if isidentifier(decl): - if name is not None: - raise TypeError("Name defined twice") - name = decl - else: - split_char = ";" if decl[:1] == "/" else "/" - if split_char in decl: - first, second = decl.split(split_char, 1) - first = first.rstrip() - if first: - possible_names.append(split_opt(first)) - opts.append(first) - second = second.lstrip() - if second: - secondary_opts.append(second.lstrip()) - else: - possible_names.append(split_opt(decl)) - opts.append(decl) - - if name is None and possible_names: - possible_names.sort(key=lambda x: -len(x[0])) # group long options first - name = possible_names[0][1].replace("-", "_").lower() - if not isidentifier(name): - name = None - - if name is None: - if not expose_value: - return None, opts, secondary_opts - raise TypeError("Could not determine name for option") - - if not opts and not secondary_opts: - raise TypeError( - "No options defined but a name was passed ({}). Did you" - " mean to declare an argument instead of an option?".format(name) - ) - - return name, opts, secondary_opts - - def add_to_parser(self, parser, ctx): - kwargs = { - "dest": self.name, - "nargs": self.nargs, - "obj": self, - } - - if self.multiple: - action = "append" - elif self.count: - action = "count" - else: - action = "store" - - if self.is_flag: - kwargs.pop("nargs", None) - action_const = "{}_const".format(action) - if self.is_bool_flag and self.secondary_opts: - parser.add_option(self.opts, action=action_const, const=True, **kwargs) - parser.add_option( - self.secondary_opts, action=action_const, const=False, **kwargs - ) - else: - parser.add_option( - self.opts, action=action_const, const=self.flag_value, **kwargs - ) - else: - kwargs["action"] = action - parser.add_option(self.opts, **kwargs) - - def get_help_record(self, ctx): - if self.hidden: - return - any_prefix_is_slash = [] - - def _write_opts(opts): - rv, any_slashes = join_options(opts) - if any_slashes: - any_prefix_is_slash[:] = [True] - if not self.is_flag and not self.count: - rv += " {}".format(self.make_metavar()) - return rv - - rv = [_write_opts(self.opts)] - if self.secondary_opts: - rv.append(_write_opts(self.secondary_opts)) - - help = self.help or "" - extra = [] - if self.show_envvar: - envvar = self.envvar - if envvar is None: - if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None: - envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper()) - if envvar is not None: - extra.append( - "env var: {}".format( - ", ".join(str(d) for d in envvar) - if isinstance(envvar, (list, tuple)) - else envvar - ) - ) - if self.default is not None and (self.show_default or ctx.show_default): - if isinstance(self.show_default, string_types): - default_string = "({})".format(self.show_default) - elif isinstance(self.default, (list, tuple)): - default_string = ", ".join(str(d) for d in self.default) - elif inspect.isfunction(self.default): - default_string = "(dynamic)" - else: - default_string = self.default - extra.append("default: {}".format(default_string)) - - if self.required: - extra.append("required") - if extra: - help = "{}[{}]".format( - "{} ".format(help) if help else "", "; ".join(extra) - ) - - return ("; " if any_prefix_is_slash else " / ").join(rv), help - - def get_default(self, ctx): - # If we're a non boolean flag our default is more complex because - # we need to look at all flags in the same group to figure out - # if we're the the default one in which case we return the flag - # value as default. - if self.is_flag and not self.is_bool_flag: - for param in ctx.command.params: - if param.name == self.name and param.default: - return param.flag_value - return None - return Parameter.get_default(self, ctx) - - def prompt_for_value(self, ctx): - """This is an alternative flow that can be activated in the full - value processing if a value does not exist. It will prompt the - user until a valid value exists and then returns the processed - value as result. - """ - # Calculate the default before prompting anything to be stable. - default = self.get_default(ctx) - - # If this is a prompt for a flag we need to handle this - # differently. - if self.is_bool_flag: - return confirm(self.prompt, default) - - return prompt( - self.prompt, - default=default, - type=self.type, - hide_input=self.hide_input, - show_choices=self.show_choices, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x), - ) - - def resolve_envvar_value(self, ctx): - rv = Parameter.resolve_envvar_value(self, ctx) - if rv is not None: - return rv - if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None: - envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper()) - return os.environ.get(envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is None: - return None - value_depth = (self.nargs != 1) + bool(self.multiple) - if value_depth > 0 and rv is not None: - rv = self.type.split_envvar_value(rv) - if self.multiple and self.nargs != 1: - rv = batch(rv, self.nargs) - return rv - - def full_process_value(self, ctx, value): - if value is None and self.prompt is not None and not ctx.resilient_parsing: - return self.prompt_for_value(ctx) - return Parameter.full_process_value(self, ctx, value) - - -class Argument(Parameter): - """Arguments are positional parameters to a command. They generally - provide fewer features than options but can have infinite ``nargs`` - and are required by default. - - All parameters are passed onwards to the parameter constructor. - """ - - param_type_name = "argument" - - def __init__(self, param_decls, required=None, **attrs): - if required is None: - if attrs.get("default") is not None: - required = False - else: - required = attrs.get("nargs", 1) > 0 - Parameter.__init__(self, param_decls, required=required, **attrs) - if self.default is not None and self.nargs < 0: - raise TypeError( - "nargs=-1 in combination with a default value is not supported." - ) - - @property - def human_readable_name(self): - if self.metavar is not None: - return self.metavar - return self.name.upper() - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - var = self.type.get_metavar(self) - if not var: - var = self.name.upper() - if not self.required: - var = "[{}]".format(var) - if self.nargs != 1: - var += "..." - return var - - def _parse_decls(self, decls, expose_value): - if not decls: - if not expose_value: - return None, [], [] - raise TypeError("Could not determine name for argument") - if len(decls) == 1: - name = arg = decls[0] - name = name.replace("-", "_").lower() - else: - raise TypeError( - "Arguments take exactly one parameter declaration, got" - " {}".format(len(decls)) - ) - return name, [arg], [] - - def get_usage_pieces(self, ctx): - return [self.make_metavar()] - - def get_error_hint(self, ctx): - return repr(self.make_metavar()) - - def add_to_parser(self, parser, ctx): - parser.add_argument(dest=self.name, nargs=self.nargs, obj=self) diff --git a/venv/lib/python3.7/site-packages/click/decorators.py b/venv/lib/python3.7/site-packages/click/decorators.py deleted file mode 100644 index c7b5af6..0000000 --- a/venv/lib/python3.7/site-packages/click/decorators.py +++ /dev/null @@ -1,333 +0,0 @@ -import inspect -import sys -from functools import update_wrapper - -from ._compat import iteritems -from ._unicodefun import _check_for_unicode_literals -from .core import Argument -from .core import Command -from .core import Group -from .core import Option -from .globals import get_current_context -from .utils import echo - - -def pass_context(f): - """Marks a callback as wanting to receive the current context - object as first argument. - """ - - def new_func(*args, **kwargs): - return f(get_current_context(), *args, **kwargs) - - return update_wrapper(new_func, f) - - -def pass_obj(f): - """Similar to :func:`pass_context`, but only pass the object on the - context onwards (:attr:`Context.obj`). This is useful if that object - represents the state of a nested system. - """ - - def new_func(*args, **kwargs): - return f(get_current_context().obj, *args, **kwargs) - - return update_wrapper(new_func, f) - - -def make_pass_decorator(object_type, ensure=False): - """Given an object type this creates a decorator that will work - similar to :func:`pass_obj` but instead of passing the object of the - current context, it will find the innermost context of type - :func:`object_type`. - - This generates a decorator that works roughly like this:: - - from functools import update_wrapper - - def decorator(f): - @pass_context - def new_func(ctx, *args, **kwargs): - obj = ctx.find_object(object_type) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - :param object_type: the type of the object to pass. - :param ensure: if set to `True`, a new object will be created and - remembered on the context if it's not there yet. - """ - - def decorator(f): - def new_func(*args, **kwargs): - ctx = get_current_context() - if ensure: - obj = ctx.ensure_object(object_type) - else: - obj = ctx.find_object(object_type) - if obj is None: - raise RuntimeError( - "Managed to invoke callback without a context" - " object of type '{}' existing".format(object_type.__name__) - ) - return ctx.invoke(f, obj, *args, **kwargs) - - return update_wrapper(new_func, f) - - return decorator - - -def _make_command(f, name, attrs, cls): - if isinstance(f, Command): - raise TypeError("Attempted to convert a callback into a command twice.") - try: - params = f.__click_params__ - params.reverse() - del f.__click_params__ - except AttributeError: - params = [] - help = attrs.get("help") - if help is None: - help = inspect.getdoc(f) - if isinstance(help, bytes): - help = help.decode("utf-8") - else: - help = inspect.cleandoc(help) - attrs["help"] = help - _check_for_unicode_literals() - return cls( - name=name or f.__name__.lower().replace("_", "-"), - callback=f, - params=params, - **attrs - ) - - -def command(name=None, cls=None, **attrs): - r"""Creates a new :class:`Command` and uses the decorated function as - callback. This will also automatically attach all decorated - :func:`option`\s and :func:`argument`\s as parameters to the command. - - The name of the command defaults to the name of the function with - underscores replaced by dashes. If you want to change that, you can - pass the intended name as the first argument. - - All keyword arguments are forwarded to the underlying command class. - - Once decorated the function turns into a :class:`Command` instance - that can be invoked as a command line utility or be attached to a - command :class:`Group`. - - :param name: the name of the command. This defaults to the function - name with underscores replaced by dashes. - :param cls: the command class to instantiate. This defaults to - :class:`Command`. - """ - if cls is None: - cls = Command - - def decorator(f): - cmd = _make_command(f, name, attrs, cls) - cmd.__doc__ = f.__doc__ - return cmd - - return decorator - - -def group(name=None, **attrs): - """Creates a new :class:`Group` with a function as callback. This - works otherwise the same as :func:`command` just that the `cls` - parameter is set to :class:`Group`. - """ - attrs.setdefault("cls", Group) - return command(name, **attrs) - - -def _param_memo(f, param): - if isinstance(f, Command): - f.params.append(param) - else: - if not hasattr(f, "__click_params__"): - f.__click_params__ = [] - f.__click_params__.append(param) - - -def argument(*param_decls, **attrs): - """Attaches an argument to the command. All positional arguments are - passed as parameter declarations to :class:`Argument`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Argument` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the argument class to instantiate. This defaults to - :class:`Argument`. - """ - - def decorator(f): - ArgumentClass = attrs.pop("cls", Argument) - _param_memo(f, ArgumentClass(param_decls, **attrs)) - return f - - return decorator - - -def option(*param_decls, **attrs): - """Attaches an option to the command. All positional arguments are - passed as parameter declarations to :class:`Option`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Option` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the option class to instantiate. This defaults to - :class:`Option`. - """ - - def decorator(f): - # Issue 926, copy attrs, so pre-defined options can re-use the same cls= - option_attrs = attrs.copy() - - if "help" in option_attrs: - option_attrs["help"] = inspect.cleandoc(option_attrs["help"]) - OptionClass = option_attrs.pop("cls", Option) - _param_memo(f, OptionClass(param_decls, **option_attrs)) - return f - - return decorator - - -def confirmation_option(*param_decls, **attrs): - """Shortcut for confirmation prompts that can be ignored by passing - ``--yes`` as parameter. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - def callback(ctx, param, value): - if not value: - ctx.abort() - - @click.command() - @click.option('--yes', is_flag=True, callback=callback, - expose_value=False, prompt='Do you want to continue?') - def dropdb(): - pass - """ - - def decorator(f): - def callback(ctx, param, value): - if not value: - ctx.abort() - - attrs.setdefault("is_flag", True) - attrs.setdefault("callback", callback) - attrs.setdefault("expose_value", False) - attrs.setdefault("prompt", "Do you want to continue?") - attrs.setdefault("help", "Confirm the action without prompting.") - return option(*(param_decls or ("--yes",)), **attrs)(f) - - return decorator - - -def password_option(*param_decls, **attrs): - """Shortcut for password prompts. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - @click.command() - @click.option('--password', prompt=True, confirmation_prompt=True, - hide_input=True) - def changeadmin(password): - pass - """ - - def decorator(f): - attrs.setdefault("prompt", True) - attrs.setdefault("confirmation_prompt", True) - attrs.setdefault("hide_input", True) - return option(*(param_decls or ("--password",)), **attrs)(f) - - return decorator - - -def version_option(version=None, *param_decls, **attrs): - """Adds a ``--version`` option which immediately ends the program - printing out the version number. This is implemented as an eager - option that prints the version and exits the program in the callback. - - :param version: the version number to show. If not provided Click - attempts an auto discovery via setuptools. - :param prog_name: the name of the program (defaults to autodetection) - :param message: custom message to show instead of the default - (``'%(prog)s, version %(version)s'``) - :param others: everything else is forwarded to :func:`option`. - """ - if version is None: - if hasattr(sys, "_getframe"): - module = sys._getframe(1).f_globals.get("__name__") - else: - module = "" - - def decorator(f): - prog_name = attrs.pop("prog_name", None) - message = attrs.pop("message", "%(prog)s, version %(version)s") - - def callback(ctx, param, value): - if not value or ctx.resilient_parsing: - return - prog = prog_name - if prog is None: - prog = ctx.find_root().info_name - ver = version - if ver is None: - try: - import pkg_resources - except ImportError: - pass - else: - for dist in pkg_resources.working_set: - scripts = dist.get_entry_map().get("console_scripts") or {} - for _, entry_point in iteritems(scripts): - if entry_point.module_name == module: - ver = dist.version - break - if ver is None: - raise RuntimeError("Could not determine version") - echo(message % {"prog": prog, "version": ver}, color=ctx.color) - ctx.exit() - - attrs.setdefault("is_flag", True) - attrs.setdefault("expose_value", False) - attrs.setdefault("is_eager", True) - attrs.setdefault("help", "Show the version and exit.") - attrs["callback"] = callback - return option(*(param_decls or ("--version",)), **attrs)(f) - - return decorator - - -def help_option(*param_decls, **attrs): - """Adds a ``--help`` option which immediately ends the program - printing out the help page. This is usually unnecessary to add as - this is added by default to all commands unless suppressed. - - Like :func:`version_option`, this is implemented as eager option that - prints in the callback and exits. - - All arguments are forwarded to :func:`option`. - """ - - def decorator(f): - def callback(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - attrs.setdefault("is_flag", True) - attrs.setdefault("expose_value", False) - attrs.setdefault("help", "Show this message and exit.") - attrs.setdefault("is_eager", True) - attrs["callback"] = callback - return option(*(param_decls or ("--help",)), **attrs)(f) - - return decorator diff --git a/venv/lib/python3.7/site-packages/click/exceptions.py b/venv/lib/python3.7/site-packages/click/exceptions.py deleted file mode 100644 index 592ee38..0000000 --- a/venv/lib/python3.7/site-packages/click/exceptions.py +++ /dev/null @@ -1,253 +0,0 @@ -from ._compat import filename_to_ui -from ._compat import get_text_stderr -from ._compat import PY2 -from .utils import echo - - -def _join_param_hints(param_hint): - if isinstance(param_hint, (tuple, list)): - return " / ".join(repr(x) for x in param_hint) - return param_hint - - -class ClickException(Exception): - """An exception that Click can handle and show to the user.""" - - #: The exit code for this exception - exit_code = 1 - - def __init__(self, message): - ctor_msg = message - if PY2: - if ctor_msg is not None: - ctor_msg = ctor_msg.encode("utf-8") - Exception.__init__(self, ctor_msg) - self.message = message - - def format_message(self): - return self.message - - def __str__(self): - return self.message - - if PY2: - __unicode__ = __str__ - - def __str__(self): - return self.message.encode("utf-8") - - def show(self, file=None): - if file is None: - file = get_text_stderr() - echo("Error: {}".format(self.format_message()), file=file) - - -class UsageError(ClickException): - """An internal exception that signals a usage error. This typically - aborts any further handling. - - :param message: the error message to display. - :param ctx: optionally the context that caused this error. Click will - fill in the context automatically in some situations. - """ - - exit_code = 2 - - def __init__(self, message, ctx=None): - ClickException.__init__(self, message) - self.ctx = ctx - self.cmd = self.ctx.command if self.ctx else None - - def show(self, file=None): - if file is None: - file = get_text_stderr() - color = None - hint = "" - if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None: - hint = "Try '{} {}' for help.\n".format( - self.ctx.command_path, self.ctx.help_option_names[0] - ) - if self.ctx is not None: - color = self.ctx.color - echo("{}\n{}".format(self.ctx.get_usage(), hint), file=file, color=color) - echo("Error: {}".format(self.format_message()), file=file, color=color) - - -class BadParameter(UsageError): - """An exception that formats out a standardized error message for a - bad parameter. This is useful when thrown from a callback or type as - Click will attach contextual information to it (for instance, which - parameter it is). - - .. versionadded:: 2.0 - - :param param: the parameter object that caused this error. This can - be left out, and Click will attach this info itself - if possible. - :param param_hint: a string that shows up as parameter name. This - can be used as alternative to `param` in cases - where custom validation should happen. If it is - a string it's used as such, if it's a list then - each item is quoted and separated. - """ - - def __init__(self, message, ctx=None, param=None, param_hint=None): - UsageError.__init__(self, message, ctx) - self.param = param - self.param_hint = param_hint - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.get_error_hint(self.ctx) - else: - return "Invalid value: {}".format(self.message) - param_hint = _join_param_hints(param_hint) - - return "Invalid value for {}: {}".format(param_hint, self.message) - - -class MissingParameter(BadParameter): - """Raised if click required an option or argument but it was not - provided when invoking the script. - - .. versionadded:: 4.0 - - :param param_type: a string that indicates the type of the parameter. - The default is to inherit the parameter type from - the given `param`. Valid values are ``'parameter'``, - ``'option'`` or ``'argument'``. - """ - - def __init__( - self, message=None, ctx=None, param=None, param_hint=None, param_type=None - ): - BadParameter.__init__(self, message, ctx, param, param_hint) - self.param_type = param_type - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.get_error_hint(self.ctx) - else: - param_hint = None - param_hint = _join_param_hints(param_hint) - - param_type = self.param_type - if param_type is None and self.param is not None: - param_type = self.param.param_type_name - - msg = self.message - if self.param is not None: - msg_extra = self.param.type.get_missing_message(self.param) - if msg_extra: - if msg: - msg += ". {}".format(msg_extra) - else: - msg = msg_extra - - return "Missing {}{}{}{}".format( - param_type, - " {}".format(param_hint) if param_hint else "", - ". " if msg else ".", - msg or "", - ) - - def __str__(self): - if self.message is None: - param_name = self.param.name if self.param else None - return "missing parameter: {}".format(param_name) - else: - return self.message - - if PY2: - __unicode__ = __str__ - - def __str__(self): - return self.__unicode__().encode("utf-8") - - -class NoSuchOption(UsageError): - """Raised if click attempted to handle an option that does not - exist. - - .. versionadded:: 4.0 - """ - - def __init__(self, option_name, message=None, possibilities=None, ctx=None): - if message is None: - message = "no such option: {}".format(option_name) - UsageError.__init__(self, message, ctx) - self.option_name = option_name - self.possibilities = possibilities - - def format_message(self): - bits = [self.message] - if self.possibilities: - if len(self.possibilities) == 1: - bits.append("Did you mean {}?".format(self.possibilities[0])) - else: - possibilities = sorted(self.possibilities) - bits.append("(Possible options: {})".format(", ".join(possibilities))) - return " ".join(bits) - - -class BadOptionUsage(UsageError): - """Raised if an option is generally supplied but the use of the option - was incorrect. This is for instance raised if the number of arguments - for an option is not correct. - - .. versionadded:: 4.0 - - :param option_name: the name of the option being used incorrectly. - """ - - def __init__(self, option_name, message, ctx=None): - UsageError.__init__(self, message, ctx) - self.option_name = option_name - - -class BadArgumentUsage(UsageError): - """Raised if an argument is generally supplied but the use of the argument - was incorrect. This is for instance raised if the number of values - for an argument is not correct. - - .. versionadded:: 6.0 - """ - - def __init__(self, message, ctx=None): - UsageError.__init__(self, message, ctx) - - -class FileError(ClickException): - """Raised if a file cannot be opened.""" - - def __init__(self, filename, hint=None): - ui_filename = filename_to_ui(filename) - if hint is None: - hint = "unknown error" - ClickException.__init__(self, hint) - self.ui_filename = ui_filename - self.filename = filename - - def format_message(self): - return "Could not open file {}: {}".format(self.ui_filename, self.message) - - -class Abort(RuntimeError): - """An internal signalling exception that signals Click to abort.""" - - -class Exit(RuntimeError): - """An exception that indicates that the application should exit with some - status code. - - :param code: the status code to exit with. - """ - - __slots__ = ("exit_code",) - - def __init__(self, code=0): - self.exit_code = code diff --git a/venv/lib/python3.7/site-packages/click/formatting.py b/venv/lib/python3.7/site-packages/click/formatting.py deleted file mode 100644 index 319c7f6..0000000 --- a/venv/lib/python3.7/site-packages/click/formatting.py +++ /dev/null @@ -1,283 +0,0 @@ -from contextlib import contextmanager - -from ._compat import term_len -from .parser import split_opt -from .termui import get_terminal_size - -# Can force a width. This is used by the test system -FORCED_WIDTH = None - - -def measure_table(rows): - widths = {} - for row in rows: - for idx, col in enumerate(row): - widths[idx] = max(widths.get(idx, 0), term_len(col)) - return tuple(y for x, y in sorted(widths.items())) - - -def iter_rows(rows, col_count): - for row in rows: - row = tuple(row) - yield row + ("",) * (col_count - len(row)) - - -def wrap_text( - text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False -): - """A helper function that intelligently wraps text. By default, it - assumes that it operates on a single paragraph of text but if the - `preserve_paragraphs` parameter is provided it will intelligently - handle paragraphs (defined by two empty lines). - - If paragraphs are handled, a paragraph can be prefixed with an empty - line containing the ``\\b`` character (``\\x08``) to indicate that - no rewrapping should happen in that block. - - :param text: the text that should be rewrapped. - :param width: the maximum width for the text. - :param initial_indent: the initial indent that should be placed on the - first line as a string. - :param subsequent_indent: the indent string that should be placed on - each consecutive line. - :param preserve_paragraphs: if this flag is set then the wrapping will - intelligently handle paragraphs. - """ - from ._textwrap import TextWrapper - - text = text.expandtabs() - wrapper = TextWrapper( - width, - initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - replace_whitespace=False, - ) - if not preserve_paragraphs: - return wrapper.fill(text) - - p = [] - buf = [] - indent = None - - def _flush_par(): - if not buf: - return - if buf[0].strip() == "\b": - p.append((indent or 0, True, "\n".join(buf[1:]))) - else: - p.append((indent or 0, False, " ".join(buf))) - del buf[:] - - for line in text.splitlines(): - if not line: - _flush_par() - indent = None - else: - if indent is None: - orig_len = term_len(line) - line = line.lstrip() - indent = orig_len - term_len(line) - buf.append(line) - _flush_par() - - rv = [] - for indent, raw, text in p: - with wrapper.extra_indent(" " * indent): - if raw: - rv.append(wrapper.indent_only(text)) - else: - rv.append(wrapper.fill(text)) - - return "\n\n".join(rv) - - -class HelpFormatter(object): - """This class helps with formatting text-based help pages. It's - usually just needed for very special internal cases, but it's also - exposed so that developers can write their own fancy outputs. - - At present, it always writes into memory. - - :param indent_increment: the additional increment for each level. - :param width: the width for the text. This defaults to the terminal - width clamped to a maximum of 78. - """ - - def __init__(self, indent_increment=2, width=None, max_width=None): - self.indent_increment = indent_increment - if max_width is None: - max_width = 80 - if width is None: - width = FORCED_WIDTH - if width is None: - width = max(min(get_terminal_size()[0], max_width) - 2, 50) - self.width = width - self.current_indent = 0 - self.buffer = [] - - def write(self, string): - """Writes a unicode string into the internal buffer.""" - self.buffer.append(string) - - def indent(self): - """Increases the indentation.""" - self.current_indent += self.indent_increment - - def dedent(self): - """Decreases the indentation.""" - self.current_indent -= self.indent_increment - - def write_usage(self, prog, args="", prefix="Usage: "): - """Writes a usage line into the buffer. - - :param prog: the program name. - :param args: whitespace separated list of arguments. - :param prefix: the prefix for the first line. - """ - usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent) - text_width = self.width - self.current_indent - - if text_width >= (term_len(usage_prefix) + 20): - # The arguments will fit to the right of the prefix. - indent = " " * term_len(usage_prefix) - self.write( - wrap_text( - args, - text_width, - initial_indent=usage_prefix, - subsequent_indent=indent, - ) - ) - else: - # The prefix is too long, put the arguments on the next line. - self.write(usage_prefix) - self.write("\n") - indent = " " * (max(self.current_indent, term_len(prefix)) + 4) - self.write( - wrap_text( - args, text_width, initial_indent=indent, subsequent_indent=indent - ) - ) - - self.write("\n") - - def write_heading(self, heading): - """Writes a heading into the buffer.""" - self.write("{:>{w}}{}:\n".format("", heading, w=self.current_indent)) - - def write_paragraph(self): - """Writes a paragraph into the buffer.""" - if self.buffer: - self.write("\n") - - def write_text(self, text): - """Writes re-indented text into the buffer. This rewraps and - preserves paragraphs. - """ - text_width = max(self.width - self.current_indent, 11) - indent = " " * self.current_indent - self.write( - wrap_text( - text, - text_width, - initial_indent=indent, - subsequent_indent=indent, - preserve_paragraphs=True, - ) - ) - self.write("\n") - - def write_dl(self, rows, col_max=30, col_spacing=2): - """Writes a definition list into the buffer. This is how options - and commands are usually formatted. - - :param rows: a list of two item tuples for the terms and values. - :param col_max: the maximum width of the first column. - :param col_spacing: the number of spaces between the first and - second column. - """ - rows = list(rows) - widths = measure_table(rows) - if len(widths) != 2: - raise TypeError("Expected two columns for definition list") - - first_col = min(widths[0], col_max) + col_spacing - - for first, second in iter_rows(rows, len(widths)): - self.write("{:>{w}}{}".format("", first, w=self.current_indent)) - if not second: - self.write("\n") - continue - if term_len(first) <= first_col - col_spacing: - self.write(" " * (first_col - term_len(first))) - else: - self.write("\n") - self.write(" " * (first_col + self.current_indent)) - - text_width = max(self.width - first_col - 2, 10) - wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) - lines = wrapped_text.splitlines() - - if lines: - self.write("{}\n".format(lines[0])) - - for line in lines[1:]: - self.write( - "{:>{w}}{}\n".format( - "", line, w=first_col + self.current_indent - ) - ) - - if len(lines) > 1: - # separate long help from next option - self.write("\n") - else: - self.write("\n") - - @contextmanager - def section(self, name): - """Helpful context manager that writes a paragraph, a heading, - and the indents. - - :param name: the section name that is written as heading. - """ - self.write_paragraph() - self.write_heading(name) - self.indent() - try: - yield - finally: - self.dedent() - - @contextmanager - def indentation(self): - """A context manager that increases the indentation.""" - self.indent() - try: - yield - finally: - self.dedent() - - def getvalue(self): - """Returns the buffer contents.""" - return "".join(self.buffer) - - -def join_options(options): - """Given a list of option strings this joins them in the most appropriate - way and returns them in the form ``(formatted_string, - any_prefix_is_slash)`` where the second item in the tuple is a flag that - indicates if any of the option prefixes was a slash. - """ - rv = [] - any_prefix_is_slash = False - for opt in options: - prefix = split_opt(opt)[0] - if prefix == "/": - any_prefix_is_slash = True - rv.append((len(prefix), opt)) - - rv.sort(key=lambda x: x[0]) - - rv = ", ".join(x[1] for x in rv) - return rv, any_prefix_is_slash diff --git a/venv/lib/python3.7/site-packages/click/globals.py b/venv/lib/python3.7/site-packages/click/globals.py deleted file mode 100644 index 1649f9a..0000000 --- a/venv/lib/python3.7/site-packages/click/globals.py +++ /dev/null @@ -1,47 +0,0 @@ -from threading import local - -_local = local() - - -def get_current_context(silent=False): - """Returns the current click context. This can be used as a way to - access the current context object from anywhere. This is a more implicit - alternative to the :func:`pass_context` decorator. This function is - primarily useful for helpers such as :func:`echo` which might be - interested in changing its behavior based on the current context. - - To push the current context, :meth:`Context.scope` can be used. - - .. versionadded:: 5.0 - - :param silent: if set to `True` the return value is `None` if no context - is available. The default behavior is to raise a - :exc:`RuntimeError`. - """ - try: - return _local.stack[-1] - except (AttributeError, IndexError): - if not silent: - raise RuntimeError("There is no active click context.") - - -def push_context(ctx): - """Pushes a new context to the current stack.""" - _local.__dict__.setdefault("stack", []).append(ctx) - - -def pop_context(): - """Removes the top level from the stack.""" - _local.stack.pop() - - -def resolve_color_default(color=None): - """"Internal helper to get the default value of the color flag. If a - value is passed it's returned unchanged, otherwise it's looked up from - the current context. - """ - if color is not None: - return color - ctx = get_current_context(silent=True) - if ctx is not None: - return ctx.color diff --git a/venv/lib/python3.7/site-packages/click/parser.py b/venv/lib/python3.7/site-packages/click/parser.py deleted file mode 100644 index f43ebfe..0000000 --- a/venv/lib/python3.7/site-packages/click/parser.py +++ /dev/null @@ -1,428 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module started out as largely a copy paste from the stdlib's -optparse module with the features removed that we do not need from -optparse because we implement them in Click on a higher level (for -instance type handling, help formatting and a lot more). - -The plan is to remove more and more from here over time. - -The reason this is a different module and not optparse from the stdlib -is that there are differences in 2.x and 3.x about the error messages -generated and optparse in the stdlib uses gettext for no good reason -and might cause us issues. - -Click uses parts of optparse written by Gregory P. Ward and maintained -by the Python Software Foundation. This is limited to code in parser.py. - -Copyright 2001-2006 Gregory P. Ward. All rights reserved. -Copyright 2002-2006 Python Software Foundation. All rights reserved. -""" -import re -from collections import deque - -from .exceptions import BadArgumentUsage -from .exceptions import BadOptionUsage -from .exceptions import NoSuchOption -from .exceptions import UsageError - - -def _unpack_args(args, nargs_spec): - """Given an iterable of arguments and an iterable of nargs specifications, - it returns a tuple with all the unpacked arguments at the first index - and all remaining arguments as the second. - - The nargs specification is the number of arguments that should be consumed - or `-1` to indicate that this position should eat up all the remainders. - - Missing items are filled with `None`. - """ - args = deque(args) - nargs_spec = deque(nargs_spec) - rv = [] - spos = None - - def _fetch(c): - try: - if spos is None: - return c.popleft() - else: - return c.pop() - except IndexError: - return None - - while nargs_spec: - nargs = _fetch(nargs_spec) - if nargs == 1: - rv.append(_fetch(args)) - elif nargs > 1: - x = [_fetch(args) for _ in range(nargs)] - # If we're reversed, we're pulling in the arguments in reverse, - # so we need to turn them around. - if spos is not None: - x.reverse() - rv.append(tuple(x)) - elif nargs < 0: - if spos is not None: - raise TypeError("Cannot have two nargs < 0") - spos = len(rv) - rv.append(None) - - # spos is the position of the wildcard (star). If it's not `None`, - # we fill it with the remainder. - if spos is not None: - rv[spos] = tuple(args) - args = [] - rv[spos + 1 :] = reversed(rv[spos + 1 :]) - - return tuple(rv), list(args) - - -def _error_opt_args(nargs, opt): - if nargs == 1: - raise BadOptionUsage(opt, "{} option requires an argument".format(opt)) - raise BadOptionUsage(opt, "{} option requires {} arguments".format(opt, nargs)) - - -def split_opt(opt): - first = opt[:1] - if first.isalnum(): - return "", opt - if opt[1:2] == first: - return opt[:2], opt[2:] - return first, opt[1:] - - -def normalize_opt(opt, ctx): - if ctx is None or ctx.token_normalize_func is None: - return opt - prefix, opt = split_opt(opt) - return prefix + ctx.token_normalize_func(opt) - - -def split_arg_string(string): - """Given an argument string this attempts to split it into small parts.""" - rv = [] - for match in re.finditer( - r"('([^'\\]*(?:\\.[^'\\]*)*)'|\"([^\"\\]*(?:\\.[^\"\\]*)*)\"|\S+)\s*", - string, - re.S, - ): - arg = match.group().strip() - if arg[:1] == arg[-1:] and arg[:1] in "\"'": - arg = arg[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape") - try: - arg = type(string)(arg) - except UnicodeError: - pass - rv.append(arg) - return rv - - -class Option(object): - def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): - self._short_opts = [] - self._long_opts = [] - self.prefixes = set() - - for opt in opts: - prefix, value = split_opt(opt) - if not prefix: - raise ValueError("Invalid start character for option ({})".format(opt)) - self.prefixes.add(prefix[0]) - if len(prefix) == 1 and len(value) == 1: - self._short_opts.append(opt) - else: - self._long_opts.append(opt) - self.prefixes.add(prefix) - - if action is None: - action = "store" - - self.dest = dest - self.action = action - self.nargs = nargs - self.const = const - self.obj = obj - - @property - def takes_value(self): - return self.action in ("store", "append") - - def process(self, value, state): - if self.action == "store": - state.opts[self.dest] = value - elif self.action == "store_const": - state.opts[self.dest] = self.const - elif self.action == "append": - state.opts.setdefault(self.dest, []).append(value) - elif self.action == "append_const": - state.opts.setdefault(self.dest, []).append(self.const) - elif self.action == "count": - state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 - else: - raise ValueError("unknown action '{}'".format(self.action)) - state.order.append(self.obj) - - -class Argument(object): - def __init__(self, dest, nargs=1, obj=None): - self.dest = dest - self.nargs = nargs - self.obj = obj - - def process(self, value, state): - if self.nargs > 1: - holes = sum(1 for x in value if x is None) - if holes == len(value): - value = None - elif holes != 0: - raise BadArgumentUsage( - "argument {} takes {} values".format(self.dest, self.nargs) - ) - state.opts[self.dest] = value - state.order.append(self.obj) - - -class ParsingState(object): - def __init__(self, rargs): - self.opts = {} - self.largs = [] - self.rargs = rargs - self.order = [] - - -class OptionParser(object): - """The option parser is an internal class that is ultimately used to - parse options and arguments. It's modelled after optparse and brings - a similar but vastly simplified API. It should generally not be used - directly as the high level Click classes wrap it for you. - - It's not nearly as extensible as optparse or argparse as it does not - implement features that are implemented on a higher level (such as - types or defaults). - - :param ctx: optionally the :class:`~click.Context` where this parser - should go with. - """ - - def __init__(self, ctx=None): - #: The :class:`~click.Context` for this parser. This might be - #: `None` for some advanced use cases. - self.ctx = ctx - #: This controls how the parser deals with interspersed arguments. - #: If this is set to `False`, the parser will stop on the first - #: non-option. Click uses this to implement nested subcommands - #: safely. - self.allow_interspersed_args = True - #: This tells the parser how to deal with unknown options. By - #: default it will error out (which is sensible), but there is a - #: second mode where it will ignore it and continue processing - #: after shifting all the unknown options into the resulting args. - self.ignore_unknown_options = False - if ctx is not None: - self.allow_interspersed_args = ctx.allow_interspersed_args - self.ignore_unknown_options = ctx.ignore_unknown_options - self._short_opt = {} - self._long_opt = {} - self._opt_prefixes = {"-", "--"} - self._args = [] - - def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None): - """Adds a new option named `dest` to the parser. The destination - is not inferred (unlike with optparse) and needs to be explicitly - provided. Action can be any of ``store``, ``store_const``, - ``append``, ``appnd_const`` or ``count``. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - opts = [normalize_opt(opt, self.ctx) for opt in opts] - option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj) - self._opt_prefixes.update(option.prefixes) - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - def add_argument(self, dest, nargs=1, obj=None): - """Adds a positional argument named `dest` to the parser. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) - - def parse_args(self, args): - """Parses positional arguments and returns ``(values, args, order)`` - for the parsed options and arguments as well as the leftover - arguments if there are any. The order is a list of objects as they - appear on the command line. If arguments appear multiple times they - will be memorized multiple times as well. - """ - state = ParsingState(args) - try: - self._process_args_for_options(state) - self._process_args_for_args(state) - except UsageError: - if self.ctx is None or not self.ctx.resilient_parsing: - raise - return state.opts, state.largs, state.order - - def _process_args_for_args(self, state): - pargs, args = _unpack_args( - state.largs + state.rargs, [x.nargs for x in self._args] - ) - - for idx, arg in enumerate(self._args): - arg.process(pargs[idx], state) - - state.largs = args - state.rargs = [] - - def _process_args_for_options(self, state): - while state.rargs: - arg = state.rargs.pop(0) - arglen = len(arg) - # Double dashes always handled explicitly regardless of what - # prefixes are valid. - if arg == "--": - return - elif arg[:1] in self._opt_prefixes and arglen > 1: - self._process_opts(arg, state) - elif self.allow_interspersed_args: - state.largs.append(arg) - else: - state.rargs.insert(0, arg) - return - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt, explicit_value, state): - if opt not in self._long_opt: - possibilities = [word for word in self._long_opt if word.startswith(opt)] - raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) - - option = self._long_opt[opt] - if option.takes_value: - # At this point it's safe to modify rargs by injecting the - # explicit value, because no exception is raised in this - # branch. This means that the inserted value will be fully - # consumed. - if explicit_value is not None: - state.rargs.insert(0, explicit_value) - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - elif explicit_value is not None: - raise BadOptionUsage(opt, "{} option does not take a value".format(opt)) - - else: - value = None - - option.process(value, state) - - def _match_short_opt(self, arg, state): - stop = False - i = 1 - prefix = arg[0] - unknown_options = [] - - for ch in arg[1:]: - opt = normalize_opt(prefix + ch, self.ctx) - option = self._short_opt.get(opt) - i += 1 - - if not option: - if self.ignore_unknown_options: - unknown_options.append(ch) - continue - raise NoSuchOption(opt, ctx=self.ctx) - if option.takes_value: - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - state.rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - else: - value = None - - option.process(value, state) - - if stop: - break - - # If we got any unknown options we re-combinate the string of the - # remaining options and re-attach the prefix, then report that - # to the state as new larg. This way there is basic combinatorics - # that can be achieved while still ignoring unknown arguments. - if self.ignore_unknown_options and unknown_options: - state.largs.append("{}{}".format(prefix, "".join(unknown_options))) - - def _process_opts(self, arg, state): - explicit_value = None - # Long option handling happens in two parts. The first part is - # supporting explicitly attached values. In any case, we will try - # to long match the option first. - if "=" in arg: - long_opt, explicit_value = arg.split("=", 1) - else: - long_opt = arg - norm_long_opt = normalize_opt(long_opt, self.ctx) - - # At this point we will match the (assumed) long option through - # the long option matching code. Note that this allows options - # like "-foo" to be matched as long options. - try: - self._match_long_opt(norm_long_opt, explicit_value, state) - except NoSuchOption: - # At this point the long option matching failed, and we need - # to try with short options. However there is a special rule - # which says, that if we have a two character options prefix - # (applies to "--foo" for instance), we do not dispatch to the - # short option code and will instead raise the no option - # error. - if arg[:2] not in self._opt_prefixes: - return self._match_short_opt(arg, state) - if not self.ignore_unknown_options: - raise - state.largs.append(arg) diff --git a/venv/lib/python3.7/site-packages/click/termui.py b/venv/lib/python3.7/site-packages/click/termui.py deleted file mode 100644 index 02ef9e9..0000000 --- a/venv/lib/python3.7/site-packages/click/termui.py +++ /dev/null @@ -1,681 +0,0 @@ -import inspect -import io -import itertools -import os -import struct -import sys - -from ._compat import DEFAULT_COLUMNS -from ._compat import get_winterm_size -from ._compat import isatty -from ._compat import raw_input -from ._compat import string_types -from ._compat import strip_ansi -from ._compat import text_type -from ._compat import WIN -from .exceptions import Abort -from .exceptions import UsageError -from .globals import resolve_color_default -from .types import Choice -from .types import convert_type -from .types import Path -from .utils import echo -from .utils import LazyFile - -# The prompt functions to use. The doc tools currently override these -# functions to customize how they work. -visible_prompt_func = raw_input - -_ansi_colors = { - "black": 30, - "red": 31, - "green": 32, - "yellow": 33, - "blue": 34, - "magenta": 35, - "cyan": 36, - "white": 37, - "reset": 39, - "bright_black": 90, - "bright_red": 91, - "bright_green": 92, - "bright_yellow": 93, - "bright_blue": 94, - "bright_magenta": 95, - "bright_cyan": 96, - "bright_white": 97, -} -_ansi_reset_all = "\033[0m" - - -def hidden_prompt_func(prompt): - import getpass - - return getpass.getpass(prompt) - - -def _build_prompt( - text, suffix, show_default=False, default=None, show_choices=True, type=None -): - prompt = text - if type is not None and show_choices and isinstance(type, Choice): - prompt += " ({})".format(", ".join(map(str, type.choices))) - if default is not None and show_default: - prompt = "{} [{}]".format(prompt, _format_default(default)) - return prompt + suffix - - -def _format_default(default): - if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"): - return default.name - - return default - - -def prompt( - text, - default=None, - hide_input=False, - confirmation_prompt=False, - type=None, - value_proc=None, - prompt_suffix=": ", - show_default=True, - err=False, - show_choices=True, -): - """Prompts a user for input. This is a convenience function that can - be used to prompt a user for input later. - - If the user aborts the input by sending a interrupt signal, this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 7.0 - Added the show_choices parameter. - - .. versionadded:: 6.0 - Added unicode support for cmd.exe on Windows. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the text to show for the prompt. - :param default: the default value to use if no input happens. If this - is not given it will prompt until it's aborted. - :param hide_input: if this is set to true then the input value will - be hidden. - :param confirmation_prompt: asks for confirmation for the value. - :param type: the type to use to check the value against. - :param value_proc: if this parameter is provided it's a function that - is invoked instead of the type conversion to - convert a value. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - :param show_choices: Show or hide choices if the passed type is a Choice. - For example if type is a Choice of either day or week, - show_choices is true and text is "Group by" then the - prompt will be "Group by (day, week): ". - """ - result = None - - def prompt_func(text): - f = hidden_prompt_func if hide_input else visible_prompt_func - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(text, nl=False, err=err) - return f("") - except (KeyboardInterrupt, EOFError): - # getpass doesn't print a newline if the user aborts input with ^C. - # Allegedly this behavior is inherited from getpass(3). - # A doc bug has been filed at https://bugs.python.org/issue24711 - if hide_input: - echo(None, err=err) - raise Abort() - - if value_proc is None: - value_proc = convert_type(type, default) - - prompt = _build_prompt( - text, prompt_suffix, show_default, default, show_choices, type - ) - - while 1: - while 1: - value = prompt_func(prompt) - if value: - break - elif default is not None: - if isinstance(value_proc, Path): - # validate Path default value(exists, dir_okay etc.) - value = default - break - return default - try: - result = value_proc(value) - except UsageError as e: - echo("Error: {}".format(e.message), err=err) # noqa: B306 - continue - if not confirmation_prompt: - return result - while 1: - value2 = prompt_func("Repeat for confirmation: ") - if value2: - break - if value == value2: - return result - echo("Error: the two entered values do not match", err=err) - - -def confirm( - text, default=False, abort=False, prompt_suffix=": ", show_default=True, err=False -): - """Prompts for confirmation (yes/no question). - - If the user aborts the input by sending a interrupt signal this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the question to ask. - :param default: the default for the prompt. - :param abort: if this is set to `True` a negative answer aborts the - exception by raising :exc:`Abort`. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - prompt = _build_prompt( - text, prompt_suffix, show_default, "Y/n" if default else "y/N" - ) - while 1: - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(prompt, nl=False, err=err) - value = visible_prompt_func("").lower().strip() - except (KeyboardInterrupt, EOFError): - raise Abort() - if value in ("y", "yes"): - rv = True - elif value in ("n", "no"): - rv = False - elif value == "": - rv = default - else: - echo("Error: invalid input", err=err) - continue - break - if abort and not rv: - raise Abort() - return rv - - -def get_terminal_size(): - """Returns the current size of the terminal as tuple in the form - ``(width, height)`` in columns and rows. - """ - # If shutil has get_terminal_size() (Python 3.3 and later) use that - if sys.version_info >= (3, 3): - import shutil - - shutil_get_terminal_size = getattr(shutil, "get_terminal_size", None) - if shutil_get_terminal_size: - sz = shutil_get_terminal_size() - return sz.columns, sz.lines - - # We provide a sensible default for get_winterm_size() when being invoked - # inside a subprocess. Without this, it would not provide a useful input. - if get_winterm_size is not None: - size = get_winterm_size() - if size == (0, 0): - return (79, 24) - else: - return size - - def ioctl_gwinsz(fd): - try: - import fcntl - import termios - - cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234")) - except Exception: - return - return cr - - cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - try: - cr = ioctl_gwinsz(fd) - finally: - os.close(fd) - except Exception: - pass - if not cr or not cr[0] or not cr[1]: - cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS)) - return int(cr[1]), int(cr[0]) - - -def echo_via_pager(text_or_generator, color=None): - """This function takes a text and shows it via an environment specific - pager on stdout. - - .. versionchanged:: 3.0 - Added the `color` flag. - - :param text_or_generator: the text to page, or alternatively, a - generator emitting the text to page. - :param color: controls if the pager supports ANSI colors or not. The - default is autodetection. - """ - color = resolve_color_default(color) - - if inspect.isgeneratorfunction(text_or_generator): - i = text_or_generator() - elif isinstance(text_or_generator, string_types): - i = [text_or_generator] - else: - i = iter(text_or_generator) - - # convert every element of i to a text type if necessary - text_generator = (el if isinstance(el, string_types) else text_type(el) for el in i) - - from ._termui_impl import pager - - return pager(itertools.chain(text_generator, "\n"), color) - - -def progressbar( - iterable=None, - length=None, - label=None, - show_eta=True, - show_percent=None, - show_pos=False, - item_show_func=None, - fill_char="#", - empty_char="-", - bar_template="%(label)s [%(bar)s] %(info)s", - info_sep=" ", - width=36, - file=None, - color=None, -): - """This function creates an iterable context manager that can be used - to iterate over something while showing a progress bar. It will - either iterate over the `iterable` or `length` items (that are counted - up). While iteration happens, this function will print a rendered - progress bar to the given `file` (defaults to stdout) and will attempt - to calculate remaining time and more. By default, this progress bar - will not be rendered if the file is not a terminal. - - The context manager creates the progress bar. When the context - manager is entered the progress bar is already created. With every - iteration over the progress bar, the iterable passed to the bar is - advanced and the bar is updated. When the context manager exits, - a newline is printed and the progress bar is finalized on screen. - - Note: The progress bar is currently designed for use cases where the - total progress can be expected to take at least several seconds. - Because of this, the ProgressBar class object won't display - progress that is considered too fast, and progress where the time - between steps is less than a second. - - No printing must happen or the progress bar will be unintentionally - destroyed. - - Example usage:: - - with progressbar(items) as bar: - for item in bar: - do_something_with(item) - - Alternatively, if no iterable is specified, one can manually update the - progress bar through the `update()` method instead of directly - iterating over the progress bar. The update method accepts the number - of steps to increment the bar with:: - - with progressbar(length=chunks.total_bytes) as bar: - for chunk in chunks: - process_chunk(chunk) - bar.update(chunks.bytes) - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `color` parameter. Added a `update` method to the - progressbar object. - - :param iterable: an iterable to iterate over. If not provided the length - is required. - :param length: the number of items to iterate over. By default the - progressbar will attempt to ask the iterator about its - length, which might or might not work. If an iterable is - also provided this parameter can be used to override the - length. If an iterable is not provided the progress bar - will iterate over a range of that length. - :param label: the label to show next to the progress bar. - :param show_eta: enables or disables the estimated time display. This is - automatically disabled if the length cannot be - determined. - :param show_percent: enables or disables the percentage display. The - default is `True` if the iterable has a length or - `False` if not. - :param show_pos: enables or disables the absolute position display. The - default is `False`. - :param item_show_func: a function called with the current item which - can return a string to show the current item - next to the progress bar. Note that the current - item can be `None`! - :param fill_char: the character to use to show the filled part of the - progress bar. - :param empty_char: the character to use to show the non-filled part of - the progress bar. - :param bar_template: the format string to use as template for the bar. - The parameters in it are ``label`` for the label, - ``bar`` for the progress bar and ``info`` for the - info section. - :param info_sep: the separator between multiple info items (eta etc.) - :param width: the width of the progress bar in characters, 0 means full - terminal width - :param file: the file to write to. If this is not a terminal then - only the label is printed. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are included anywhere in the progress bar output - which is not the case by default. - """ - from ._termui_impl import ProgressBar - - color = resolve_color_default(color) - return ProgressBar( - iterable=iterable, - length=length, - show_eta=show_eta, - show_percent=show_percent, - show_pos=show_pos, - item_show_func=item_show_func, - fill_char=fill_char, - empty_char=empty_char, - bar_template=bar_template, - info_sep=info_sep, - file=file, - label=label, - width=width, - color=color, - ) - - -def clear(): - """Clears the terminal screen. This will have the effect of clearing - the whole visible space of the terminal and moving the cursor to the - top left. This does not do anything if not connected to a terminal. - - .. versionadded:: 2.0 - """ - if not isatty(sys.stdout): - return - # If we're on Windows and we don't have colorama available, then we - # clear the screen by shelling out. Otherwise we can use an escape - # sequence. - if WIN: - os.system("cls") - else: - sys.stdout.write("\033[2J\033[1;1H") - - -def style( - text, - fg=None, - bg=None, - bold=None, - dim=None, - underline=None, - blink=None, - reverse=None, - reset=True, -): - """Styles a text with ANSI styles and returns the new string. By - default the styling is self contained which means that at the end - of the string a reset code is issued. This can be prevented by - passing ``reset=False``. - - Examples:: - - click.echo(click.style('Hello World!', fg='green')) - click.echo(click.style('ATTENTION!', blink=True)) - click.echo(click.style('Some things', reverse=True, fg='cyan')) - - Supported color names: - - * ``black`` (might be a gray) - * ``red`` - * ``green`` - * ``yellow`` (might be an orange) - * ``blue`` - * ``magenta`` - * ``cyan`` - * ``white`` (might be light gray) - * ``bright_black`` - * ``bright_red`` - * ``bright_green`` - * ``bright_yellow`` - * ``bright_blue`` - * ``bright_magenta`` - * ``bright_cyan`` - * ``bright_white`` - * ``reset`` (reset the color code only) - - .. versionadded:: 2.0 - - .. versionadded:: 7.0 - Added support for bright colors. - - :param text: the string to style with ansi codes. - :param fg: if provided this will become the foreground color. - :param bg: if provided this will become the background color. - :param bold: if provided this will enable or disable bold mode. - :param dim: if provided this will enable or disable dim mode. This is - badly supported. - :param underline: if provided this will enable or disable underline. - :param blink: if provided this will enable or disable blinking. - :param reverse: if provided this will enable or disable inverse - rendering (foreground becomes background and the - other way round). - :param reset: by default a reset-all code is added at the end of the - string which means that styles do not carry over. This - can be disabled to compose styles. - """ - bits = [] - if fg: - try: - bits.append("\033[{}m".format(_ansi_colors[fg])) - except KeyError: - raise TypeError("Unknown color '{}'".format(fg)) - if bg: - try: - bits.append("\033[{}m".format(_ansi_colors[bg] + 10)) - except KeyError: - raise TypeError("Unknown color '{}'".format(bg)) - if bold is not None: - bits.append("\033[{}m".format(1 if bold else 22)) - if dim is not None: - bits.append("\033[{}m".format(2 if dim else 22)) - if underline is not None: - bits.append("\033[{}m".format(4 if underline else 24)) - if blink is not None: - bits.append("\033[{}m".format(5 if blink else 25)) - if reverse is not None: - bits.append("\033[{}m".format(7 if reverse else 27)) - bits.append(text) - if reset: - bits.append(_ansi_reset_all) - return "".join(bits) - - -def unstyle(text): - """Removes ANSI styling information from a string. Usually it's not - necessary to use this function as Click's echo function will - automatically remove styling if necessary. - - .. versionadded:: 2.0 - - :param text: the text to remove style information from. - """ - return strip_ansi(text) - - -def secho(message=None, file=None, nl=True, err=False, color=None, **styles): - """This function combines :func:`echo` and :func:`style` into one - call. As such the following two calls are the same:: - - click.secho('Hello World!', fg='green') - click.echo(click.style('Hello World!', fg='green')) - - All keyword arguments are forwarded to the underlying functions - depending on which one they go with. - - .. versionadded:: 2.0 - """ - if message is not None: - message = style(message, **styles) - return echo(message, file=file, nl=nl, err=err, color=color) - - -def edit( - text=None, editor=None, env=None, require_save=True, extension=".txt", filename=None -): - r"""Edits the given text in the defined editor. If an editor is given - (should be the full path to the executable but the regular operating - system search path is used for finding the executable) it overrides - the detected editor. Optionally, some environment variables can be - used. If the editor is closed without changes, `None` is returned. In - case a file is edited directly the return value is always `None` and - `require_save` and `extension` are ignored. - - If the editor cannot be opened a :exc:`UsageError` is raised. - - Note for Windows: to simplify cross-platform usage, the newlines are - automatically converted from POSIX to Windows and vice versa. As such, - the message here will have ``\n`` as newline markers. - - :param text: the text to edit. - :param editor: optionally the editor to use. Defaults to automatic - detection. - :param env: environment variables to forward to the editor. - :param require_save: if this is true, then not saving in the editor - will make the return value become `None`. - :param extension: the extension to tell the editor about. This defaults - to `.txt` but changing this might change syntax - highlighting. - :param filename: if provided it will edit this file instead of the - provided text contents. It will not use a temporary - file as an indirection in that case. - """ - from ._termui_impl import Editor - - editor = Editor( - editor=editor, env=env, require_save=require_save, extension=extension - ) - if filename is None: - return editor.edit(text) - editor.edit_file(filename) - - -def launch(url, wait=False, locate=False): - """This function launches the given URL (or filename) in the default - viewer application for this file type. If this is an executable, it - might launch the executable in a new session. The return value is - the exit code of the launched application. Usually, ``0`` indicates - success. - - Examples:: - - click.launch('https://click.palletsprojects.com/') - click.launch('/my/downloaded/file', locate=True) - - .. versionadded:: 2.0 - - :param url: URL or filename of the thing to launch. - :param wait: waits for the program to stop. - :param locate: if this is set to `True` then instead of launching the - application associated with the URL it will attempt to - launch a file manager with the file located. This - might have weird effects if the URL does not point to - the filesystem. - """ - from ._termui_impl import open_url - - return open_url(url, wait=wait, locate=locate) - - -# If this is provided, getchar() calls into this instead. This is used -# for unittesting purposes. -_getchar = None - - -def getchar(echo=False): - """Fetches a single character from the terminal and returns it. This - will always return a unicode character and under certain rare - circumstances this might return more than one character. The - situations which more than one character is returned is when for - whatever reason multiple characters end up in the terminal buffer or - standard input was not actually a terminal. - - Note that this will always read from the terminal, even if something - is piped into the standard input. - - Note for Windows: in rare cases when typing non-ASCII characters, this - function might wait for a second character and then return both at once. - This is because certain Unicode characters look like special-key markers. - - .. versionadded:: 2.0 - - :param echo: if set to `True`, the character read will also show up on - the terminal. The default is to not show it. - """ - f = _getchar - if f is None: - from ._termui_impl import getchar as f - return f(echo) - - -def raw_terminal(): - from ._termui_impl import raw_terminal as f - - return f() - - -def pause(info="Press any key to continue ...", err=False): - """This command stops execution and waits for the user to press any - key to continue. This is similar to the Windows batch "pause" - command. If the program is not run through a terminal, this command - will instead do nothing. - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param info: the info string to print before pausing. - :param err: if set to message goes to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - if not isatty(sys.stdin) or not isatty(sys.stdout): - return - try: - if info: - echo(info, nl=False, err=err) - try: - getchar() - except (KeyboardInterrupt, EOFError): - pass - finally: - if info: - echo(err=err) diff --git a/venv/lib/python3.7/site-packages/click/testing.py b/venv/lib/python3.7/site-packages/click/testing.py deleted file mode 100644 index a3dba3b..0000000 --- a/venv/lib/python3.7/site-packages/click/testing.py +++ /dev/null @@ -1,382 +0,0 @@ -import contextlib -import os -import shlex -import shutil -import sys -import tempfile - -from . import formatting -from . import termui -from . import utils -from ._compat import iteritems -from ._compat import PY2 -from ._compat import string_types - - -if PY2: - from cStringIO import StringIO -else: - import io - from ._compat import _find_binary_reader - - -class EchoingStdin(object): - def __init__(self, input, output): - self._input = input - self._output = output - - def __getattr__(self, x): - return getattr(self._input, x) - - def _echo(self, rv): - self._output.write(rv) - return rv - - def read(self, n=-1): - return self._echo(self._input.read(n)) - - def readline(self, n=-1): - return self._echo(self._input.readline(n)) - - def readlines(self): - return [self._echo(x) for x in self._input.readlines()] - - def __iter__(self): - return iter(self._echo(x) for x in self._input) - - def __repr__(self): - return repr(self._input) - - -def make_input_stream(input, charset): - # Is already an input stream. - if hasattr(input, "read"): - if PY2: - return input - rv = _find_binary_reader(input) - if rv is not None: - return rv - raise TypeError("Could not find binary reader for input stream.") - - if input is None: - input = b"" - elif not isinstance(input, bytes): - input = input.encode(charset) - if PY2: - return StringIO(input) - return io.BytesIO(input) - - -class Result(object): - """Holds the captured result of an invoked CLI script.""" - - def __init__( - self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None - ): - #: The runner that created the result - self.runner = runner - #: The standard output as bytes. - self.stdout_bytes = stdout_bytes - #: The standard error as bytes, or None if not available - self.stderr_bytes = stderr_bytes - #: The exit code as integer. - self.exit_code = exit_code - #: The exception that happened if one did. - self.exception = exception - #: The traceback - self.exc_info = exc_info - - @property - def output(self): - """The (standard) output as unicode string.""" - return self.stdout - - @property - def stdout(self): - """The standard output as unicode string.""" - return self.stdout_bytes.decode(self.runner.charset, "replace").replace( - "\r\n", "\n" - ) - - @property - def stderr(self): - """The standard error as unicode string.""" - if self.stderr_bytes is None: - raise ValueError("stderr not separately captured") - return self.stderr_bytes.decode(self.runner.charset, "replace").replace( - "\r\n", "\n" - ) - - def __repr__(self): - return "<{} {}>".format( - type(self).__name__, repr(self.exception) if self.exception else "okay" - ) - - -class CliRunner(object): - """The CLI runner provides functionality to invoke a Click command line - script for unittesting purposes in a isolated environment. This only - works in single-threaded systems without any concurrency as it changes the - global interpreter state. - - :param charset: the character set for the input and output data. This is - UTF-8 by default and should not be changed currently as - the reporting to Click only works in Python 2 properly. - :param env: a dictionary with environment variables for overriding. - :param echo_stdin: if this is set to `True`, then reading from stdin writes - to stdout. This is useful for showing examples in - some circumstances. Note that regular prompts - will automatically echo the input. - :param mix_stderr: if this is set to `False`, then stdout and stderr are - preserved as independent streams. This is useful for - Unix-philosophy apps that have predictable stdout and - noisy stderr, such that each may be measured - independently - """ - - def __init__(self, charset=None, env=None, echo_stdin=False, mix_stderr=True): - if charset is None: - charset = "utf-8" - self.charset = charset - self.env = env or {} - self.echo_stdin = echo_stdin - self.mix_stderr = mix_stderr - - def get_default_prog_name(self, cli): - """Given a command object it will return the default program name - for it. The default is the `name` attribute or ``"root"`` if not - set. - """ - return cli.name or "root" - - def make_env(self, overrides=None): - """Returns the environment overrides for invoking a script.""" - rv = dict(self.env) - if overrides: - rv.update(overrides) - return rv - - @contextlib.contextmanager - def isolation(self, input=None, env=None, color=False): - """A context manager that sets up the isolation for invoking of a - command line tool. This sets up stdin with the given input data - and `os.environ` with the overrides from the given dictionary. - This also rebinds some internals in Click to be mocked (like the - prompt functionality). - - This is automatically done in the :meth:`invoke` method. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param input: the input stream to put into sys.stdin. - :param env: the environment overrides as dictionary. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - input = make_input_stream(input, self.charset) - - old_stdin = sys.stdin - old_stdout = sys.stdout - old_stderr = sys.stderr - old_forced_width = formatting.FORCED_WIDTH - formatting.FORCED_WIDTH = 80 - - env = self.make_env(env) - - if PY2: - bytes_output = StringIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - sys.stdout = bytes_output - if not self.mix_stderr: - bytes_error = StringIO() - sys.stderr = bytes_error - else: - bytes_output = io.BytesIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - input = io.TextIOWrapper(input, encoding=self.charset) - sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset) - if not self.mix_stderr: - bytes_error = io.BytesIO() - sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset) - - if self.mix_stderr: - sys.stderr = sys.stdout - - sys.stdin = input - - def visible_input(prompt=None): - sys.stdout.write(prompt or "") - val = input.readline().rstrip("\r\n") - sys.stdout.write("{}\n".format(val)) - sys.stdout.flush() - return val - - def hidden_input(prompt=None): - sys.stdout.write("{}\n".format(prompt or "")) - sys.stdout.flush() - return input.readline().rstrip("\r\n") - - def _getchar(echo): - char = sys.stdin.read(1) - if echo: - sys.stdout.write(char) - sys.stdout.flush() - return char - - default_color = color - - def should_strip_ansi(stream=None, color=None): - if color is None: - return not default_color - return not color - - old_visible_prompt_func = termui.visible_prompt_func - old_hidden_prompt_func = termui.hidden_prompt_func - old__getchar_func = termui._getchar - old_should_strip_ansi = utils.should_strip_ansi - termui.visible_prompt_func = visible_input - termui.hidden_prompt_func = hidden_input - termui._getchar = _getchar - utils.should_strip_ansi = should_strip_ansi - - old_env = {} - try: - for key, value in iteritems(env): - old_env[key] = os.environ.get(key) - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - yield (bytes_output, not self.mix_stderr and bytes_error) - finally: - for key, value in iteritems(old_env): - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - sys.stdout = old_stdout - sys.stderr = old_stderr - sys.stdin = old_stdin - termui.visible_prompt_func = old_visible_prompt_func - termui.hidden_prompt_func = old_hidden_prompt_func - termui._getchar = old__getchar_func - utils.should_strip_ansi = old_should_strip_ansi - formatting.FORCED_WIDTH = old_forced_width - - def invoke( - self, - cli, - args=None, - input=None, - env=None, - catch_exceptions=True, - color=False, - **extra - ): - """Invokes a command in an isolated environment. The arguments are - forwarded directly to the command line script, the `extra` keyword - arguments are passed to the :meth:`~clickpkg.Command.main` function of - the command. - - This returns a :class:`Result` object. - - .. versionadded:: 3.0 - The ``catch_exceptions`` parameter was added. - - .. versionchanged:: 3.0 - The result object now has an `exc_info` attribute with the - traceback if available. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param cli: the command to invoke - :param args: the arguments to invoke. It may be given as an iterable - or a string. When given as string it will be interpreted - as a Unix shell command. More details at - :func:`shlex.split`. - :param input: the input data for `sys.stdin`. - :param env: the environment overrides. - :param catch_exceptions: Whether to catch any other exceptions than - ``SystemExit``. - :param extra: the keyword arguments to pass to :meth:`main`. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - exc_info = None - with self.isolation(input=input, env=env, color=color) as outstreams: - exception = None - exit_code = 0 - - if isinstance(args, string_types): - args = shlex.split(args) - - try: - prog_name = extra.pop("prog_name") - except KeyError: - prog_name = self.get_default_prog_name(cli) - - try: - cli.main(args=args or (), prog_name=prog_name, **extra) - except SystemExit as e: - exc_info = sys.exc_info() - exit_code = e.code - if exit_code is None: - exit_code = 0 - - if exit_code != 0: - exception = e - - if not isinstance(exit_code, int): - sys.stdout.write(str(exit_code)) - sys.stdout.write("\n") - exit_code = 1 - - except Exception as e: - if not catch_exceptions: - raise - exception = e - exit_code = 1 - exc_info = sys.exc_info() - finally: - sys.stdout.flush() - stdout = outstreams[0].getvalue() - if self.mix_stderr: - stderr = None - else: - stderr = outstreams[1].getvalue() - - return Result( - runner=self, - stdout_bytes=stdout, - stderr_bytes=stderr, - exit_code=exit_code, - exception=exception, - exc_info=exc_info, - ) - - @contextlib.contextmanager - def isolated_filesystem(self): - """A context manager that creates a temporary folder and changes - the current working directory to it for isolated filesystem tests. - """ - cwd = os.getcwd() - t = tempfile.mkdtemp() - os.chdir(t) - try: - yield t - finally: - os.chdir(cwd) - try: - shutil.rmtree(t) - except (OSError, IOError): # noqa: B014 - pass diff --git a/venv/lib/python3.7/site-packages/click/types.py b/venv/lib/python3.7/site-packages/click/types.py deleted file mode 100644 index 505c39f..0000000 --- a/venv/lib/python3.7/site-packages/click/types.py +++ /dev/null @@ -1,762 +0,0 @@ -import os -import stat -from datetime import datetime - -from ._compat import _get_argv_encoding -from ._compat import filename_to_ui -from ._compat import get_filesystem_encoding -from ._compat import get_streerror -from ._compat import open_stream -from ._compat import PY2 -from ._compat import text_type -from .exceptions import BadParameter -from .utils import LazyFile -from .utils import safecall - - -class ParamType(object): - """Helper for converting values through types. The following is - necessary for a valid type: - - * it needs a name - * it needs to pass through None unchanged - * it needs to convert from a string - * it needs to convert its result type through unchanged - (eg: needs to be idempotent) - * it needs to be able to deal with param and context being `None`. - This can be the case when the object is used with prompt - inputs. - """ - - is_composite = False - - #: the descriptive name of this type - name = None - - #: if a list of this type is expected and the value is pulled from a - #: string environment variable, this is what splits it up. `None` - #: means any whitespace. For all parameters the general rule is that - #: whitespace splits them up. The exception are paths and files which - #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on - #: Windows). - envvar_list_splitter = None - - def __call__(self, value, param=None, ctx=None): - if value is not None: - return self.convert(value, param, ctx) - - def get_metavar(self, param): - """Returns the metavar default for this param if it provides one.""" - - def get_missing_message(self, param): - """Optionally might return extra information about a missing - parameter. - - .. versionadded:: 2.0 - """ - - def convert(self, value, param, ctx): - """Converts the value. This is not invoked for values that are - `None` (the missing value). - """ - return value - - def split_envvar_value(self, rv): - """Given a value from an environment variable this splits it up - into small chunks depending on the defined envvar list splitter. - - If the splitter is set to `None`, which means that whitespace splits, - then leading and trailing whitespace is ignored. Otherwise, leading - and trailing splitters usually lead to empty items being included. - """ - return (rv or "").split(self.envvar_list_splitter) - - def fail(self, message, param=None, ctx=None): - """Helper method to fail with an invalid value message.""" - raise BadParameter(message, ctx=ctx, param=param) - - -class CompositeParamType(ParamType): - is_composite = True - - @property - def arity(self): - raise NotImplementedError() - - -class FuncParamType(ParamType): - def __init__(self, func): - self.name = func.__name__ - self.func = func - - def convert(self, value, param, ctx): - try: - return self.func(value) - except ValueError: - try: - value = text_type(value) - except UnicodeError: - value = str(value).decode("utf-8", "replace") - self.fail(value, param, ctx) - - -class UnprocessedParamType(ParamType): - name = "text" - - def convert(self, value, param, ctx): - return value - - def __repr__(self): - return "UNPROCESSED" - - -class StringParamType(ParamType): - name = "text" - - def convert(self, value, param, ctx): - if isinstance(value, bytes): - enc = _get_argv_encoding() - try: - value = value.decode(enc) - except UnicodeError: - fs_enc = get_filesystem_encoding() - if fs_enc != enc: - try: - value = value.decode(fs_enc) - except UnicodeError: - value = value.decode("utf-8", "replace") - else: - value = value.decode("utf-8", "replace") - return value - return value - - def __repr__(self): - return "STRING" - - -class Choice(ParamType): - """The choice type allows a value to be checked against a fixed set - of supported values. All of these values have to be strings. - - You should only pass a list or tuple of choices. Other iterables - (like generators) may lead to surprising results. - - The resulting value will always be one of the originally passed choices - regardless of ``case_sensitive`` or any ``ctx.token_normalize_func`` - being specified. - - See :ref:`choice-opts` for an example. - - :param case_sensitive: Set to false to make choices case - insensitive. Defaults to true. - """ - - name = "choice" - - def __init__(self, choices, case_sensitive=True): - self.choices = choices - self.case_sensitive = case_sensitive - - def get_metavar(self, param): - return "[{}]".format("|".join(self.choices)) - - def get_missing_message(self, param): - return "Choose from:\n\t{}.".format(",\n\t".join(self.choices)) - - def convert(self, value, param, ctx): - # Match through normalization and case sensitivity - # first do token_normalize_func, then lowercase - # preserve original `value` to produce an accurate message in - # `self.fail` - normed_value = value - normed_choices = {choice: choice for choice in self.choices} - - if ctx is not None and ctx.token_normalize_func is not None: - normed_value = ctx.token_normalize_func(value) - normed_choices = { - ctx.token_normalize_func(normed_choice): original - for normed_choice, original in normed_choices.items() - } - - if not self.case_sensitive: - if PY2: - lower = str.lower - else: - lower = str.casefold - - normed_value = lower(normed_value) - normed_choices = { - lower(normed_choice): original - for normed_choice, original in normed_choices.items() - } - - if normed_value in normed_choices: - return normed_choices[normed_value] - - self.fail( - "invalid choice: {}. (choose from {})".format( - value, ", ".join(self.choices) - ), - param, - ctx, - ) - - def __repr__(self): - return "Choice('{}')".format(list(self.choices)) - - -class DateTime(ParamType): - """The DateTime type converts date strings into `datetime` objects. - - The format strings which are checked are configurable, but default to some - common (non-timezone aware) ISO 8601 formats. - - When specifying *DateTime* formats, you should only pass a list or a tuple. - Other iterables, like generators, may lead to surprising results. - - The format strings are processed using ``datetime.strptime``, and this - consequently defines the format strings which are allowed. - - Parsing is tried using each format, in order, and the first format which - parses successfully is used. - - :param formats: A list or tuple of date format strings, in the order in - which they should be tried. Defaults to - ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, - ``'%Y-%m-%d %H:%M:%S'``. - """ - - name = "datetime" - - def __init__(self, formats=None): - self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"] - - def get_metavar(self, param): - return "[{}]".format("|".join(self.formats)) - - def _try_to_convert_date(self, value, format): - try: - return datetime.strptime(value, format) - except ValueError: - return None - - def convert(self, value, param, ctx): - # Exact match - for format in self.formats: - dtime = self._try_to_convert_date(value, format) - if dtime: - return dtime - - self.fail( - "invalid datetime format: {}. (choose from {})".format( - value, ", ".join(self.formats) - ) - ) - - def __repr__(self): - return "DateTime" - - -class IntParamType(ParamType): - name = "integer" - - def convert(self, value, param, ctx): - try: - return int(value) - except ValueError: - self.fail("{} is not a valid integer".format(value), param, ctx) - - def __repr__(self): - return "INT" - - -class IntRange(IntParamType): - """A parameter that works similar to :data:`click.INT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - - name = "integer range" - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = IntParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if ( - self.min is not None - and rv < self.min - or self.max is not None - and rv > self.max - ): - if self.min is None: - self.fail( - "{} is bigger than the maximum valid value {}.".format( - rv, self.max - ), - param, - ctx, - ) - elif self.max is None: - self.fail( - "{} is smaller than the minimum valid value {}.".format( - rv, self.min - ), - param, - ctx, - ) - else: - self.fail( - "{} is not in the valid range of {} to {}.".format( - rv, self.min, self.max - ), - param, - ctx, - ) - return rv - - def __repr__(self): - return "IntRange({}, {})".format(self.min, self.max) - - -class FloatParamType(ParamType): - name = "float" - - def convert(self, value, param, ctx): - try: - return float(value) - except ValueError: - self.fail( - "{} is not a valid floating point value".format(value), param, ctx - ) - - def __repr__(self): - return "FLOAT" - - -class FloatRange(FloatParamType): - """A parameter that works similar to :data:`click.FLOAT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - - name = "float range" - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = FloatParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if ( - self.min is not None - and rv < self.min - or self.max is not None - and rv > self.max - ): - if self.min is None: - self.fail( - "{} is bigger than the maximum valid value {}.".format( - rv, self.max - ), - param, - ctx, - ) - elif self.max is None: - self.fail( - "{} is smaller than the minimum valid value {}.".format( - rv, self.min - ), - param, - ctx, - ) - else: - self.fail( - "{} is not in the valid range of {} to {}.".format( - rv, self.min, self.max - ), - param, - ctx, - ) - return rv - - def __repr__(self): - return "FloatRange({}, {})".format(self.min, self.max) - - -class BoolParamType(ParamType): - name = "boolean" - - def convert(self, value, param, ctx): - if isinstance(value, bool): - return bool(value) - value = value.lower() - if value in ("true", "t", "1", "yes", "y"): - return True - elif value in ("false", "f", "0", "no", "n"): - return False - self.fail("{} is not a valid boolean".format(value), param, ctx) - - def __repr__(self): - return "BOOL" - - -class UUIDParameterType(ParamType): - name = "uuid" - - def convert(self, value, param, ctx): - import uuid - - try: - if PY2 and isinstance(value, text_type): - value = value.encode("ascii") - return uuid.UUID(value) - except ValueError: - self.fail("{} is not a valid UUID value".format(value), param, ctx) - - def __repr__(self): - return "UUID" - - -class File(ParamType): - """Declares a parameter to be a file for reading or writing. The file - is automatically closed once the context tears down (after the command - finished working). - - Files can be opened for reading or writing. The special value ``-`` - indicates stdin or stdout depending on the mode. - - By default, the file is opened for reading text data, but it can also be - opened in binary mode or for writing. The encoding parameter can be used - to force a specific encoding. - - The `lazy` flag controls if the file should be opened immediately or upon - first IO. The default is to be non-lazy for standard input and output - streams as well as files opened for reading, `lazy` otherwise. When opening a - file lazily for reading, it is still opened temporarily for validation, but - will not be held open until first IO. lazy is mainly useful when opening - for writing to avoid creating the file until it is needed. - - Starting with Click 2.0, files can also be opened atomically in which - case all writes go into a separate file in the same folder and upon - completion the file will be moved over to the original location. This - is useful if a file regularly read by other users is modified. - - See :ref:`file-args` for more information. - """ - - name = "filename" - envvar_list_splitter = os.path.pathsep - - def __init__( - self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False - ): - self.mode = mode - self.encoding = encoding - self.errors = errors - self.lazy = lazy - self.atomic = atomic - - def resolve_lazy_flag(self, value): - if self.lazy is not None: - return self.lazy - if value == "-": - return False - elif "w" in self.mode: - return True - return False - - def convert(self, value, param, ctx): - try: - if hasattr(value, "read") or hasattr(value, "write"): - return value - - lazy = self.resolve_lazy_flag(value) - - if lazy: - f = LazyFile( - value, self.mode, self.encoding, self.errors, atomic=self.atomic - ) - if ctx is not None: - ctx.call_on_close(f.close_intelligently) - return f - - f, should_close = open_stream( - value, self.mode, self.encoding, self.errors, atomic=self.atomic - ) - # If a context is provided, we automatically close the file - # at the end of the context execution (or flush out). If a - # context does not exist, it's the caller's responsibility to - # properly close the file. This for instance happens when the - # type is used with prompts. - if ctx is not None: - if should_close: - ctx.call_on_close(safecall(f.close)) - else: - ctx.call_on_close(safecall(f.flush)) - return f - except (IOError, OSError) as e: # noqa: B014 - self.fail( - "Could not open file: {}: {}".format( - filename_to_ui(value), get_streerror(e) - ), - param, - ctx, - ) - - -class Path(ParamType): - """The path type is similar to the :class:`File` type but it performs - different checks. First of all, instead of returning an open file - handle it returns just the filename. Secondly, it can perform various - basic checks about what the file or directory should be. - - .. versionchanged:: 6.0 - `allow_dash` was added. - - :param exists: if set to true, the file or directory needs to exist for - this value to be valid. If this is not required and a - file does indeed not exist, then all further checks are - silently skipped. - :param file_okay: controls if a file is a possible value. - :param dir_okay: controls if a directory is a possible value. - :param writable: if true, a writable check is performed. - :param readable: if true, a readable check is performed. - :param resolve_path: if this is true, then the path is fully resolved - before the value is passed onwards. This means - that it's absolute and symlinks are resolved. It - will not expand a tilde-prefix, as this is - supposed to be done by the shell only. - :param allow_dash: If this is set to `True`, a single dash to indicate - standard streams is permitted. - :param path_type: optionally a string type that should be used to - represent the path. The default is `None` which - means the return value will be either bytes or - unicode depending on what makes most sense given the - input data Click deals with. - """ - - envvar_list_splitter = os.path.pathsep - - def __init__( - self, - exists=False, - file_okay=True, - dir_okay=True, - writable=False, - readable=True, - resolve_path=False, - allow_dash=False, - path_type=None, - ): - self.exists = exists - self.file_okay = file_okay - self.dir_okay = dir_okay - self.writable = writable - self.readable = readable - self.resolve_path = resolve_path - self.allow_dash = allow_dash - self.type = path_type - - if self.file_okay and not self.dir_okay: - self.name = "file" - self.path_type = "File" - elif self.dir_okay and not self.file_okay: - self.name = "directory" - self.path_type = "Directory" - else: - self.name = "path" - self.path_type = "Path" - - def coerce_path_result(self, rv): - if self.type is not None and not isinstance(rv, self.type): - if self.type is text_type: - rv = rv.decode(get_filesystem_encoding()) - else: - rv = rv.encode(get_filesystem_encoding()) - return rv - - def convert(self, value, param, ctx): - rv = value - - is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-") - - if not is_dash: - if self.resolve_path: - rv = os.path.realpath(rv) - - try: - st = os.stat(rv) - except OSError: - if not self.exists: - return self.coerce_path_result(rv) - self.fail( - "{} '{}' does not exist.".format( - self.path_type, filename_to_ui(value) - ), - param, - ctx, - ) - - if not self.file_okay and stat.S_ISREG(st.st_mode): - self.fail( - "{} '{}' is a file.".format(self.path_type, filename_to_ui(value)), - param, - ctx, - ) - if not self.dir_okay and stat.S_ISDIR(st.st_mode): - self.fail( - "{} '{}' is a directory.".format( - self.path_type, filename_to_ui(value) - ), - param, - ctx, - ) - if self.writable and not os.access(value, os.W_OK): - self.fail( - "{} '{}' is not writable.".format( - self.path_type, filename_to_ui(value) - ), - param, - ctx, - ) - if self.readable and not os.access(value, os.R_OK): - self.fail( - "{} '{}' is not readable.".format( - self.path_type, filename_to_ui(value) - ), - param, - ctx, - ) - - return self.coerce_path_result(rv) - - -class Tuple(CompositeParamType): - """The default behavior of Click is to apply a type on a value directly. - This works well in most cases, except for when `nargs` is set to a fixed - count and different types should be used for different items. In this - case the :class:`Tuple` type can be used. This type can only be used - if `nargs` is set to a fixed number. - - For more information see :ref:`tuple-type`. - - This can be selected by using a Python tuple literal as a type. - - :param types: a list of types that should be used for the tuple items. - """ - - def __init__(self, types): - self.types = [convert_type(ty) for ty in types] - - @property - def name(self): - return "<{}>".format(" ".join(ty.name for ty in self.types)) - - @property - def arity(self): - return len(self.types) - - def convert(self, value, param, ctx): - if len(value) != len(self.types): - raise TypeError( - "It would appear that nargs is set to conflict with the" - " composite type arity." - ) - return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) - - -def convert_type(ty, default=None): - """Converts a callable or python type into the most appropriate - param type. - """ - guessed_type = False - if ty is None and default is not None: - if isinstance(default, tuple): - ty = tuple(map(type, default)) - else: - ty = type(default) - guessed_type = True - - if isinstance(ty, tuple): - return Tuple(ty) - if isinstance(ty, ParamType): - return ty - if ty is text_type or ty is str or ty is None: - return STRING - if ty is int: - return INT - # Booleans are only okay if not guessed. This is done because for - # flags the default value is actually a bit of a lie in that it - # indicates which of the flags is the one we want. See get_default() - # for more information. - if ty is bool and not guessed_type: - return BOOL - if ty is float: - return FLOAT - if guessed_type: - return STRING - - # Catch a common mistake - if __debug__: - try: - if issubclass(ty, ParamType): - raise AssertionError( - "Attempted to use an uninstantiated parameter type ({}).".format(ty) - ) - except TypeError: - pass - return FuncParamType(ty) - - -#: A dummy parameter type that just does nothing. From a user's -#: perspective this appears to just be the same as `STRING` but internally -#: no string conversion takes place. This is necessary to achieve the -#: same bytes/unicode behavior on Python 2/3 in situations where you want -#: to not convert argument types. This is usually useful when working -#: with file paths as they can appear in bytes and unicode. -#: -#: For path related uses the :class:`Path` type is a better choice but -#: there are situations where an unprocessed type is useful which is why -#: it is is provided. -#: -#: .. versionadded:: 4.0 -UNPROCESSED = UnprocessedParamType() - -#: A unicode string parameter type which is the implicit default. This -#: can also be selected by using ``str`` as type. -STRING = StringParamType() - -#: An integer parameter. This can also be selected by using ``int`` as -#: type. -INT = IntParamType() - -#: A floating point value parameter. This can also be selected by using -#: ``float`` as type. -FLOAT = FloatParamType() - -#: A boolean parameter. This is the default for boolean flags. This can -#: also be selected by using ``bool`` as a type. -BOOL = BoolParamType() - -#: A UUID parameter. -UUID = UUIDParameterType() diff --git a/venv/lib/python3.7/site-packages/click/utils.py b/venv/lib/python3.7/site-packages/click/utils.py deleted file mode 100644 index 79265e7..0000000 --- a/venv/lib/python3.7/site-packages/click/utils.py +++ /dev/null @@ -1,455 +0,0 @@ -import os -import sys - -from ._compat import _default_text_stderr -from ._compat import _default_text_stdout -from ._compat import auto_wrap_for_ansi -from ._compat import binary_streams -from ._compat import filename_to_ui -from ._compat import get_filesystem_encoding -from ._compat import get_streerror -from ._compat import is_bytes -from ._compat import open_stream -from ._compat import PY2 -from ._compat import should_strip_ansi -from ._compat import string_types -from ._compat import strip_ansi -from ._compat import text_streams -from ._compat import text_type -from ._compat import WIN -from .globals import resolve_color_default - -if not PY2: - from ._compat import _find_binary_writer -elif WIN: - from ._winconsole import _get_windows_argv - from ._winconsole import _hash_py_argv - from ._winconsole import _initial_argv_hash - -echo_native_types = string_types + (bytes, bytearray) - - -def _posixify(name): - return "-".join(name.split()).lower() - - -def safecall(func): - """Wraps a function so that it swallows exceptions.""" - - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception: - pass - - return wrapper - - -def make_str(value): - """Converts a value into a valid string.""" - if isinstance(value, bytes): - try: - return value.decode(get_filesystem_encoding()) - except UnicodeError: - return value.decode("utf-8", "replace") - return text_type(value) - - -def make_default_short_help(help, max_length=45): - """Return a condensed version of help string.""" - words = help.split() - total_length = 0 - result = [] - done = False - - for word in words: - if word[-1:] == ".": - done = True - new_length = 1 + len(word) if result else len(word) - if total_length + new_length > max_length: - result.append("...") - done = True - else: - if result: - result.append(" ") - result.append(word) - if done: - break - total_length += new_length - - return "".join(result) - - -class LazyFile(object): - """A lazy file works like a regular file but it does not fully open - the file but it does perform some basic checks early to see if the - filename parameter does make sense. This is useful for safely opening - files for writing. - """ - - def __init__( - self, filename, mode="r", encoding=None, errors="strict", atomic=False - ): - self.name = filename - self.mode = mode - self.encoding = encoding - self.errors = errors - self.atomic = atomic - - if filename == "-": - self._f, self.should_close = open_stream(filename, mode, encoding, errors) - else: - if "r" in mode: - # Open and close the file in case we're opening it for - # reading so that we can catch at least some errors in - # some cases early. - open(filename, mode).close() - self._f = None - self.should_close = True - - def __getattr__(self, name): - return getattr(self.open(), name) - - def __repr__(self): - if self._f is not None: - return repr(self._f) - return "".format(self.name, self.mode) - - def open(self): - """Opens the file if it's not yet open. This call might fail with - a :exc:`FileError`. Not handling this error will produce an error - that Click shows. - """ - if self._f is not None: - return self._f - try: - rv, self.should_close = open_stream( - self.name, self.mode, self.encoding, self.errors, atomic=self.atomic - ) - except (IOError, OSError) as e: # noqa: E402 - from .exceptions import FileError - - raise FileError(self.name, hint=get_streerror(e)) - self._f = rv - return rv - - def close(self): - """Closes the underlying file, no matter what.""" - if self._f is not None: - self._f.close() - - def close_intelligently(self): - """This function only closes the file if it was opened by the lazy - file wrapper. For instance this will never close stdin. - """ - if self.should_close: - self.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close_intelligently() - - def __iter__(self): - self.open() - return iter(self._f) - - -class KeepOpenFile(object): - def __init__(self, file): - self._file = file - - def __getattr__(self, name): - return getattr(self._file, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - pass - - def __repr__(self): - return repr(self._file) - - def __iter__(self): - return iter(self._file) - - -def echo(message=None, file=None, nl=True, err=False, color=None): - """Prints a message plus a newline to the given file or stdout. On - first sight, this looks like the print function, but it has improved - support for handling Unicode and binary data that does not fail no - matter how badly configured the system is. - - Primarily it means that you can print binary data as well as Unicode - data on both 2.x and 3.x to the given file in the most appropriate way - possible. This is a very carefree function in that it will try its - best to not fail. As of Click 6.0 this includes support for unicode - output on the Windows console. - - In addition to that, if `colorama`_ is installed, the echo function will - also support clever handling of ANSI codes. Essentially it will then - do the following: - - - add transparent handling of ANSI color codes on Windows. - - hide ANSI codes automatically if the destination file is not a - terminal. - - .. _colorama: https://pypi.org/project/colorama/ - - .. versionchanged:: 6.0 - As of Click 6.0 the echo function will properly support unicode - output on the windows console. Not that click does not modify - the interpreter in any way which means that `sys.stdout` or the - print statement or function will still not provide unicode support. - - .. versionchanged:: 2.0 - Starting with version 2.0 of Click, the echo function will work - with colorama if it's installed. - - .. versionadded:: 3.0 - The `err` parameter was added. - - .. versionchanged:: 4.0 - Added the `color` flag. - - :param message: the message to print - :param file: the file to write to (defaults to ``stdout``) - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``. This is faster and easier than calling - :func:`get_text_stderr` yourself. - :param nl: if set to `True` (the default) a newline is printed afterwards. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. - """ - if file is None: - if err: - file = _default_text_stderr() - else: - file = _default_text_stdout() - - # Convert non bytes/text into the native string type. - if message is not None and not isinstance(message, echo_native_types): - message = text_type(message) - - if nl: - message = message or u"" - if isinstance(message, text_type): - message += u"\n" - else: - message += b"\n" - - # If there is a message, and we're in Python 3, and the value looks - # like bytes, we manually need to find the binary stream and write the - # message in there. This is done separately so that most stream - # types will work as you would expect. Eg: you can write to StringIO - # for other cases. - if message and not PY2 and is_bytes(message): - binary_file = _find_binary_writer(file) - if binary_file is not None: - file.flush() - binary_file.write(message) - binary_file.flush() - return - - # ANSI-style support. If there is no message or we are dealing with - # bytes nothing is happening. If we are connected to a file we want - # to strip colors. If we are on windows we either wrap the stream - # to strip the color or we use the colorama support to translate the - # ansi codes to API calls. - if message and not is_bytes(message): - color = resolve_color_default(color) - if should_strip_ansi(file, color): - message = strip_ansi(message) - elif WIN: - if auto_wrap_for_ansi is not None: - file = auto_wrap_for_ansi(file) - elif not color: - message = strip_ansi(message) - - if message: - file.write(message) - file.flush() - - -def get_binary_stream(name): - """Returns a system stream for byte processing. This essentially - returns the stream from the sys module with the given name but it - solves some compatibility issues between different Python versions. - Primarily this function is necessary for getting binary streams on - Python 3. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - """ - opener = binary_streams.get(name) - if opener is None: - raise TypeError("Unknown standard stream '{}'".format(name)) - return opener() - - -def get_text_stream(name, encoding=None, errors="strict"): - """Returns a system stream for text processing. This usually returns - a wrapped stream around a binary stream returned from - :func:`get_binary_stream` but it also can take shortcuts on Python 3 - for already correctly configured streams. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - :param encoding: overrides the detected default encoding. - :param errors: overrides the default error mode. - """ - opener = text_streams.get(name) - if opener is None: - raise TypeError("Unknown standard stream '{}'".format(name)) - return opener(encoding, errors) - - -def open_file( - filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False -): - """This is similar to how the :class:`File` works but for manual - usage. Files are opened non lazy by default. This can open regular - files as well as stdin/stdout if ``'-'`` is passed. - - If stdin/stdout is returned the stream is wrapped so that the context - manager will not close the stream accidentally. This makes it possible - to always use the function like this without having to worry to - accidentally close a standard stream:: - - with open_file(filename) as f: - ... - - .. versionadded:: 3.0 - - :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). - :param mode: the mode in which to open the file. - :param encoding: the encoding to use. - :param errors: the error handling for this file. - :param lazy: can be flipped to true to open the file lazily. - :param atomic: in atomic mode writes go into a temporary file and it's - moved on close. - """ - if lazy: - return LazyFile(filename, mode, encoding, errors, atomic=atomic) - f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic) - if not should_close: - f = KeepOpenFile(f) - return f - - -def get_os_args(): - """This returns the argument part of sys.argv in the most appropriate - form for processing. What this means is that this return value is in - a format that works for Click to process but does not necessarily - correspond well to what's actually standard for the interpreter. - - On most environments the return value is ``sys.argv[:1]`` unchanged. - However if you are on Windows and running Python 2 the return value - will actually be a list of unicode strings instead because the - default behavior on that platform otherwise will not be able to - carry all possible values that sys.argv can have. - - .. versionadded:: 6.0 - """ - # We can only extract the unicode argv if sys.argv has not been - # changed since the startup of the application. - if PY2 and WIN and _initial_argv_hash == _hash_py_argv(): - return _get_windows_argv() - return sys.argv[1:] - - -def format_filename(filename, shorten=False): - """Formats a filename for user display. The main purpose of this - function is to ensure that the filename can be displayed at all. This - will decode the filename to unicode if necessary in a way that it will - not fail. Optionally, it can shorten the filename to not include the - full path to the filename. - - :param filename: formats a filename for UI display. This will also convert - the filename into unicode without failing. - :param shorten: this optionally shortens the filename to strip of the - path that leads up to it. - """ - if shorten: - filename = os.path.basename(filename) - return filename_to_ui(filename) - - -def get_app_dir(app_name, roaming=True, force_posix=False): - r"""Returns the config folder for the application. The default behavior - is to return whatever is most appropriate for the operating system. - - To give you an idea, for an app called ``"Foo Bar"``, something like - the following folders could be returned: - - Mac OS X: - ``~/Library/Application Support/Foo Bar`` - Mac OS X (POSIX): - ``~/.foo-bar`` - Unix: - ``~/.config/foo-bar`` - Unix (POSIX): - ``~/.foo-bar`` - Win XP (roaming): - ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` - Win XP (not roaming): - ``C:\Documents and Settings\\Application Data\Foo Bar`` - Win 7 (roaming): - ``C:\Users\\AppData\Roaming\Foo Bar`` - Win 7 (not roaming): - ``C:\Users\\AppData\Local\Foo Bar`` - - .. versionadded:: 2.0 - - :param app_name: the application name. This should be properly capitalized - and can contain whitespace. - :param roaming: controls if the folder should be roaming or not on Windows. - Has no affect otherwise. - :param force_posix: if this is set to `True` then on any POSIX system the - folder will be stored in the home folder with a leading - dot instead of the XDG config home or darwin's - application support folder. - """ - if WIN: - key = "APPDATA" if roaming else "LOCALAPPDATA" - folder = os.environ.get(key) - if folder is None: - folder = os.path.expanduser("~") - return os.path.join(folder, app_name) - if force_posix: - return os.path.join(os.path.expanduser("~/.{}".format(_posixify(app_name)))) - if sys.platform == "darwin": - return os.path.join( - os.path.expanduser("~/Library/Application Support"), app_name - ) - return os.path.join( - os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")), - _posixify(app_name), - ) - - -class PacifyFlushWrapper(object): - """This wrapper is used to catch and suppress BrokenPipeErrors resulting - from ``.flush()`` being called on broken pipe during the shutdown/final-GC - of the Python interpreter. Notably ``.flush()`` is always called on - ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any - other cleanup code, and the case where the underlying file is not a broken - pipe, all calls and attributes are proxied. - """ - - def __init__(self, wrapped): - self.wrapped = wrapped - - def flush(self): - try: - self.wrapped.flush() - except IOError as e: - import errno - - if e.errno != errno.EPIPE: - raise - - def __getattr__(self, attr): - return getattr(self.wrapped, attr) diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/DESCRIPTION.rst b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/DESCRIPTION.rst deleted file mode 100644 index 2f7b1f5..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,76 +0,0 @@ -Copyright (c) 2018 Beto Dealmeida - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -Description: console_log - =========== - - This module provides a WSGI middleware that allows you to log to the - browser console from Python: - - .. code:: - - import logging - - from flask import Flask - - from console_log import ConsoleLog - - console = logging.getLogger('console') - console.setLevel(logging.DEBUG) - - app = Flask(__name__) - - @app.route('/') - def hello(): - logger.error('Error logged from Python') - logger.warning('Warning logged from Python') - logger.info('Info logged from Python') - logger.debug('Debug logged from Python') - logger.debug({'foo': ['bar', 'baz']}) - return "Hello World!" - - app.wsgi_app = ConsoleLog(app.wsgi_app, console) - - The logged messages will then show up in the browser console. - - - How it works - ============ - - The new WSGI app does two things: - - 1. Creates a websocket backchannel. - 2. Injects Javascript code into HTML responses, fetching data from the - websocket channel and logging them to console. - -Platform: UNKNOWN -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Provides-Extra: dev -Provides-Extra: examples diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/METADATA b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/METADATA deleted file mode 100644 index c6fc29b..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/METADATA +++ /dev/null @@ -1,95 +0,0 @@ -Metadata-Version: 2.0 -Name: console-log -Version: 0.2.10 -Summary: Log to browser console -Home-page: https://github.com/betodealmeida/consolelog -Author: Beto Dealmeida -Author-email: beto@lyft.com -License: MIT License -Requires-Dist: gevent -Requires-Dist: gevent-websocket -Requires-Dist: werkzeug -Requires-Dist: wsgigzip -Provides-Extra: dev -Requires-Dist: nose; extra == 'dev' -Requires-Dist: pipreqs; extra == 'dev' -Requires-Dist: twine; extra == 'dev' -Provides-Extra: examples -Requires-Dist: flask; extra == 'examples' - -Copyright (c) 2018 Beto Dealmeida - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -Description: console_log - =========== - - This module provides a WSGI middleware that allows you to log to the - browser console from Python: - - .. code:: - - import logging - - from flask import Flask - - from console_log import ConsoleLog - - console = logging.getLogger('console') - console.setLevel(logging.DEBUG) - - app = Flask(__name__) - - @app.route('/') - def hello(): - logger.error('Error logged from Python') - logger.warning('Warning logged from Python') - logger.info('Info logged from Python') - logger.debug('Debug logged from Python') - logger.debug({'foo': ['bar', 'baz']}) - return "Hello World!" - - app.wsgi_app = ConsoleLog(app.wsgi_app, console) - - The logged messages will then show up in the browser console. - - - How it works - ============ - - The new WSGI app does two things: - - 1. Creates a websocket backchannel. - 2. Injects Javascript code into HTML responses, fetching data from the - websocket channel and logging them to console. - -Platform: UNKNOWN -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Provides-Extra: dev -Provides-Extra: examples diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/RECORD b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/RECORD deleted file mode 100644 index e5020f8..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -__pycache__/console_log.cpython-37.pyc,, -console_log-0.2.10.dist-info/DESCRIPTION.rst,sha256=cf_tJhRidEY9tiuXXN0sOyTOJvbl3kGcT9TUP_fECeA,3027 -console_log-0.2.10.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -console_log-0.2.10.dist-info/METADATA,sha256=g55Ez3pJ-AT-bZJ4mfs-acg9iaKa6d_tggagV7vbsRI,3544 -console_log-0.2.10.dist-info/RECORD,, -console_log-0.2.10.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 -console_log-0.2.10.dist-info/metadata.json,sha256=msMuqEW2XvUVWEgGk7vLBezqSKQGNiMIQYtYat-MyVI,645 -console_log-0.2.10.dist-info/top_level.txt,sha256=L8ibfMkDJYTl4bJqXkTqei_S-9CHXytVFnmFE79aUzU,12 -console_log.py,sha256=Vexc_X2xAB3_qmg5XggxV8h8-MB2p9JeOW9P6CvwFUg,3090 diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/WHEEL b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/WHEEL deleted file mode 100644 index 7332a41..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.30.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/metadata.json b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/metadata.json deleted file mode 100644 index 77faf9d..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"extensions": {"python.details": {"contacts": [{"email": "beto@lyft.com", "name": "Beto Dealmeida", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/betodealmeida/consolelog"}}}, "extras": ["dev", "examples"], "generator": "bdist_wheel (0.30.0)", "license": "MIT License", "metadata_version": "2.0", "name": "console-log", "run_requires": [{"extra": "examples", "requires": ["flask"]}, {"requires": ["gevent", "gevent-websocket", "werkzeug", "wsgigzip"]}, {"extra": "dev", "requires": ["nose", "pipreqs", "twine"]}], "summary": "Log to browser console", "version": "0.2.10"} \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/top_level.txt deleted file mode 100644 index d3a814f..0000000 --- a/venv/lib/python3.7/site-packages/console_log-0.2.10.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -console_log diff --git a/venv/lib/python3.7/site-packages/console_log.py b/venv/lib/python3.7/site-packages/console_log.py deleted file mode 100644 index c986c9c..0000000 --- a/venv/lib/python3.7/site-packages/console_log.py +++ /dev/null @@ -1,105 +0,0 @@ -import json -import logging -import os.path - -from gevent.queue import Queue -from geventwebsocket import WebSocketError -from werkzeug.exceptions import RequestTimeout -from werkzeug.wrappers import Response -from wsgigzip import gzip - - -# map between Python levels and the console method in Javascript -levels = { - logging.CRITICAL: 'error', - logging.ERROR: 'error', - logging.WARNING: 'warn', - logging.INFO: 'info', - logging.DEBUG: 'debug', - logging.NOTSET: 'log', -} - - -class DictHandler(logging.Handler): - def __init__(self, queue): - super().__init__() - self.queue = queue - - def emit(self, record): - record.pathname = os.path.abspath(record.pathname) - message = { - 'level': levels[record.levelno], - 'content': record.msg, - } - try: - payload = json.dumps(message) - except TypeError: - message['content'] = repr(record.msg) - payload = json.dumps(message) - - self.queue.put_nowait(payload) - - -JAVASCRIPT = """ -console.log('Starting...'); - -const ws = new WebSocket("ws://{base}/__ws__"); -ws.onmessage = function (event) {{ - const msg = JSON.parse(event.data); - console[msg.level](msg.content); -}}; -""" - - -class ConsoleLog: - def __init__(self, app, logger, js_path='/__console__.js'): - self.app = app - self.queue = Queue() - self.logger = logger - self.js_path = js_path - - handler = DictHandler(self.queue) - self.logger.addHandler(handler) - - def __call__(self, environ, start_response): - if 'wsgi.websocket' in environ: - ws = environ["wsgi.websocket"] - while not ws.closed: - message = self.queue.get() - try: - ws.send(message) - except WebSocketError: - break - raise RequestTimeout() - elif environ["PATH_INFO"] == self.js_path: - if environ.get('HTTP_HOST'): - base = environ['HTTP_HOST'] - else: - host = environ['SERVER_NAME'] - port = environ['SERVER_PORT'] - base = ':'.join((host, port)) - response = Response(JAVASCRIPT.format(base=base)) - return response(environ, start_response) - - # request non-compressed response - http_accept_encoding = environ.pop('HTTP_ACCEPT_ENCODING', '') - response = Response.from_app(self.app, environ) - - # inject JS - if response.mimetype == 'text/html': - response = self.inject(response) - - # compress response, if necessary - if http_accept_encoding: - environ['HTTP_ACCEPT_ENCODING'] = http_accept_encoding - response = gzip()(response) - - return response(environ, start_response) - - def inject(self, response): - code = ''.format(self.js_path) - data = response.get_data() - payload = data.decode(response.charset) - response.data = '{code}\n{payload}'.format(code=code, payload=payload) - - return response diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/LICENSE.txt b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/LICENSE.txt deleted file mode 100644 index 00aa6b9..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (c) 2013, Open Knowledge Foundation, Friedrich Lindenberg, - Gregor Aisch - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/METADATA b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/METADATA deleted file mode 100644 index d16bf0b..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/METADATA +++ /dev/null @@ -1,50 +0,0 @@ -Metadata-Version: 2.1 -Name: dataset -Version: 1.3.1 -Summary: Toolkit for Python-based database access. -Home-page: http://github.com/pudo/dataset -Author: Friedrich Lindenberg, Gregor Aisch, Stefan Wehrmeyer -Author-email: friedrich.lindenberg@gmail.com -License: MIT -Keywords: sql sqlalchemy etl loading utility -Platform: UNKNOWN -Classifier: Development Status :: 3 - Alpha -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Description-Content-Type: text/markdown -Requires-Dist: sqlalchemy (>=1.3.1) -Requires-Dist: alembic (>=0.6.2) -Provides-Extra: dev -Requires-Dist: pip ; extra == 'dev' -Requires-Dist: nose ; extra == 'dev' -Requires-Dist: wheel ; extra == 'dev' -Requires-Dist: flake8 ; extra == 'dev' -Requires-Dist: coverage ; extra == 'dev' -Requires-Dist: psycopg2-binary ; extra == 'dev' -Requires-Dist: PyMySQL ; extra == 'dev' - -dataset: databases for lazy people -================================== - -![build](https://github.com/pudo/dataset/workflows/build/badge.svg) - -In short, **dataset** makes reading and writing data in databases as simple as reading and writing JSON files. - -[Read the docs](https://dataset.readthedocs.io/) - -To install dataset, fetch it with ``pip``: - -```bash -$ pip install dataset -``` - -**Note:** as of version 1.0, **dataset** is split into two packages, with the -data export features now extracted into a stand-alone package, **datafreeze**. -See the relevant repository [here](https://github.com/pudo/datafreeze). - - diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/RECORD b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/RECORD deleted file mode 100644 index b236ac1..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/RECORD +++ /dev/null @@ -1,19 +0,0 @@ -dataset-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -dataset-1.3.1.dist-info/LICENSE.txt,sha256=Vgm6HhOV7-JiCwHQp6ZhJstX-bYr7vndLOKWlzRqaZI,1108 -dataset-1.3.1.dist-info/METADATA,sha256=i145t3x92yoosw0q02eoagRqlThaxnLlMEVDhBHU9A0,1733 -dataset-1.3.1.dist-info/RECORD,, -dataset-1.3.1.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 -dataset-1.3.1.dist-info/namespace_packages.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -dataset-1.3.1.dist-info/top_level.txt,sha256=azD5Wy9MBv9bfMbTscYXdFdDxcIUlm2bl46qT0i1ra4,8 -dataset/__init__.py,sha256=FBxK1e-pmxtrnJjweIz4_d7ZOuDXnSrAIIlSVk-Bg1w,2253 -dataset/__pycache__/__init__.cpython-37.pyc,, -dataset/__pycache__/chunked.cpython-37.pyc,, -dataset/__pycache__/database.cpython-37.pyc,, -dataset/__pycache__/table.cpython-37.pyc,, -dataset/__pycache__/types.cpython-37.pyc,, -dataset/__pycache__/util.cpython-37.pyc,, -dataset/chunked.py,sha256=KcT689u-QTbUgL1pW_EALC27eE07pKYgw14iTCMzOgE,2485 -dataset/database.py,sha256=3-hThm-fQcPv5myUcxvq8U4191viNPGLBK98QD9DqfI,9809 -dataset/table.py,sha256=BS57A30_gTyh6lLeXKLUv9rkWLdhZOC9isPrARpm-aw,27036 -dataset/types.py,sha256=rTXlXNpYIaVjkmUgjlzQeCgWwjEEIlLcKzWb1IC2eLY,1461 -dataset/util.py,sha256=qrOSYPZq2c3bMtoUGWtANUxgbz6xxYz9BXJJoxTPP6k,3368 diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/WHEEL b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/WHEEL deleted file mode 100644 index ef99c6c..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.34.2) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/namespace_packages.txt b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/namespace_packages.txt deleted file mode 100644 index 8b13789..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/namespace_packages.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/top_level.txt deleted file mode 100644 index caba867..0000000 --- a/venv/lib/python3.7/site-packages/dataset-1.3.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -dataset diff --git a/venv/lib/python3.7/site-packages/dataset/__init__.py b/venv/lib/python3.7/site-packages/dataset/__init__.py deleted file mode 100644 index a0fc789..0000000 --- a/venv/lib/python3.7/site-packages/dataset/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -import os -import warnings -from dataset.database import Database -from dataset.table import Table -from dataset.util import row_type - -# shut up useless SA warning: -warnings.filterwarnings( - 'ignore', 'Unicode type received non-unicode bind param value.') -warnings.filterwarnings( - 'ignore', 'Skipping unsupported ALTER for creation of implicit constraint') - -__all__ = ['Database', 'Table', 'freeze', 'connect'] -__version__ = '1.3.1' - - -def connect(url=None, schema=None, reflect_metadata=True, engine_kwargs=None, - reflect_views=True, ensure_schema=True, row_type=row_type): - """ Opens a new connection to a database. - - *url* can be any valid `SQLAlchemy engine URL`_. If *url* is not defined - it will try to use *DATABASE_URL* from environment variable. Returns an - instance of :py:class:`Database `. Set *reflect_metadata* - to False if you don't want the entire database schema to be pre-loaded. - This significantly speeds up connecting to large databases with lots of - tables. *reflect_views* can be set to False if you don't want views to be - loaded. Additionally, *engine_kwargs* will be directly passed to - SQLAlchemy, e.g. set *engine_kwargs={'pool_recycle': 3600}* will avoid `DB - connection timeout`_. Set *row_type* to an alternate dict-like class to - change the type of container rows are stored in.:: - - db = dataset.connect('sqlite:///factbook.db') - - One of the main features of `dataset` is to automatically create tables and - columns as data is inserted. This behaviour can optionally be disabled via - the `ensure_schema` argument. It can also be overridden in a lot of the - data manipulation methods using the `ensure` flag. - - .. _SQLAlchemy Engine URL: http://docs.sqlalchemy.org/en/latest/core/engines.html#sqlalchemy.create_engine - .. _DB connection timeout: http://docs.sqlalchemy.org/en/latest/core/pooling.html#setting-pool-recycle - """ - if url is None: - url = os.environ.get('DATABASE_URL', 'sqlite://') - - return Database(url, schema=schema, reflect_metadata=reflect_metadata, - engine_kwargs=engine_kwargs, reflect_views=reflect_views, - ensure_schema=ensure_schema, row_type=row_type) diff --git a/venv/lib/python3.7/site-packages/dataset/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/dataset/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 6057bd8..0000000 Binary files a/venv/lib/python3.7/site-packages/dataset/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dataset/__pycache__/chunked.cpython-37.pyc b/venv/lib/python3.7/site-packages/dataset/__pycache__/chunked.cpython-37.pyc deleted file mode 100644 index c83663e..0000000 Binary files a/venv/lib/python3.7/site-packages/dataset/__pycache__/chunked.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dataset/__pycache__/database.cpython-37.pyc b/venv/lib/python3.7/site-packages/dataset/__pycache__/database.cpython-37.pyc deleted file mode 100644 index 7ede241..0000000 Binary files a/venv/lib/python3.7/site-packages/dataset/__pycache__/database.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dataset/__pycache__/table.cpython-37.pyc b/venv/lib/python3.7/site-packages/dataset/__pycache__/table.cpython-37.pyc deleted file mode 100644 index e9dbed2..0000000 Binary files a/venv/lib/python3.7/site-packages/dataset/__pycache__/table.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dataset/__pycache__/types.cpython-37.pyc b/venv/lib/python3.7/site-packages/dataset/__pycache__/types.cpython-37.pyc deleted file mode 100644 index 3d501aa..0000000 Binary files a/venv/lib/python3.7/site-packages/dataset/__pycache__/types.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dataset/__pycache__/util.cpython-37.pyc b/venv/lib/python3.7/site-packages/dataset/__pycache__/util.cpython-37.pyc deleted file mode 100644 index 1ae2a26..0000000 Binary files a/venv/lib/python3.7/site-packages/dataset/__pycache__/util.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dataset/chunked.py b/venv/lib/python3.7/site-packages/dataset/chunked.py deleted file mode 100644 index a5ca158..0000000 --- a/venv/lib/python3.7/site-packages/dataset/chunked.py +++ /dev/null @@ -1,85 +0,0 @@ -import itertools - - -class InvalidCallback(ValueError): - pass - - -class _Chunker(object): - def __init__(self, table, chunksize, callback): - self.queue = [] - self.table = table - self.chunksize = chunksize - if callback and not callable(callback): - raise InvalidCallback - self.callback = callback - - def flush(self): - self.queue.clear() - - def _queue_add(self, item): - self.queue.append(item) - if len(self.queue) >= self.chunksize: - self.flush() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - self.flush() - - -class ChunkedInsert(_Chunker): - """Batch up insert operations - with ChunkedInsert(my_table) as inserter: - inserter(row) - - Rows will be inserted in groups of `chunksize` (defaulting to 1000). An - optional callback can be provided that will be called before the insert. - This callback takes one parameter which is the queue which is about to be - inserted into the database - """ - - def __init__(self, table, chunksize=1000, callback=None): - self.fields = set() - super().__init__(table, chunksize, callback) - - def insert(self, item): - self.fields.update(item.keys()) - super()._queue_add(item) - - def flush(self): - for item in self.queue: - for field in self.fields: - item[field] = item.get(field) - if self.callback is not None: - self.callback(self.queue) - self.table.insert_many(self.queue) - super().flush() - - -class ChunkedUpdate(_Chunker): - """Batch up update operations - with ChunkedUpdate(my_table) as updater: - updater(row) - - Rows will be updated in groups of `chunksize` (defaulting to 1000). An - optional callback can be provided that will be called before the update. - This callback takes one parameter which is the queue which is about to be - updated into the database - """ - - def __init__(self, table, keys, chunksize=1000, callback=None): - self.keys = keys - super().__init__(table, chunksize, callback) - - def update(self, item): - super()._queue_add(item) - - def flush(self): - if self.callback is not None: - self.callback(self.queue) - self.queue.sort(key=dict.keys) - for fields, items in itertools.groupby(self.queue, key=dict.keys): - self.table.update_many(list(items), self.keys) - super().flush() diff --git a/venv/lib/python3.7/site-packages/dataset/database.py b/venv/lib/python3.7/site-packages/dataset/database.py deleted file mode 100644 index 87d85be..0000000 --- a/venv/lib/python3.7/site-packages/dataset/database.py +++ /dev/null @@ -1,273 +0,0 @@ -import logging -import threading -from urllib.parse import parse_qs, urlparse - -from sqlalchemy import create_engine -from sqlalchemy.sql import text -from sqlalchemy.schema import MetaData -from sqlalchemy.util import safe_reraise -from sqlalchemy.engine.reflection import Inspector - -from alembic.migration import MigrationContext -from alembic.operations import Operations - -from dataset.table import Table -from dataset.util import ResultIter, row_type, safe_url, QUERY_STEP -from dataset.util import normalize_table_name -from dataset.types import Types - -log = logging.getLogger(__name__) - - -class Database(object): - """A database object represents a SQL database with multiple tables.""" - - def __init__(self, url, schema=None, reflect_metadata=True, - engine_kwargs=None, reflect_views=True, - ensure_schema=True, row_type=row_type): - """Configure and connect to the database.""" - if engine_kwargs is None: - engine_kwargs = {} - - parsed_url = urlparse(url) - # if parsed_url.scheme.lower() in 'sqlite': - # # ref: https://github.com/pudo/dataset/issues/163 - # if 'poolclass' not in engine_kwargs: - # engine_kwargs['poolclass'] = StaticPool - - self.lock = threading.RLock() - self.local = threading.local() - - if len(parsed_url.query): - query = parse_qs(parsed_url.query) - if schema is None: - schema_qs = query.get('schema', query.get('searchpath', [])) - if len(schema_qs): - schema = schema_qs.pop() - - self.schema = schema - self.engine = create_engine(url, **engine_kwargs) - self.types = Types(self.engine.dialect.name) - self.url = url - self.row_type = row_type - self.ensure_schema = ensure_schema - self._tables = {} - - @property - def executable(self): - """Connection against which statements will be executed.""" - if not hasattr(self.local, 'conn'): - self.local.conn = self.engine.connect() - return self.local.conn - - @property - def op(self): - """Get an alembic operations context.""" - ctx = MigrationContext.configure(self.executable) - return Operations(ctx) - - @property - def inspect(self): - """Get a SQLAlchemy inspector.""" - return Inspector.from_engine(self.executable) - - @property - def metadata(self): - """Return a SQLAlchemy schema cache object.""" - return MetaData(schema=self.schema, bind=self.executable) - - @property - def in_transaction(self): - """Check if this database is in a transactional context.""" - if not hasattr(self.local, 'tx'): - return False - return len(self.local.tx) > 0 - - def _flush_tables(self): - """Clear the table metadata after transaction rollbacks.""" - for table in self._tables.values(): - table._table = None - - def begin(self): - """Enter a transaction explicitly. - - No data will be written until the transaction has been committed. - """ - if not hasattr(self.local, 'tx'): - self.local.tx = [] - self.local.tx.append(self.executable.begin()) - - def commit(self): - """Commit the current transaction. - - Make all statements executed since the transaction was begun permanent. - """ - if hasattr(self.local, 'tx') and self.local.tx: - tx = self.local.tx.pop() - tx.commit() - self._flush_tables() - - def rollback(self): - """Roll back the current transaction. - - Discard all statements executed since the transaction was begun. - """ - if hasattr(self.local, 'tx') and self.local.tx: - tx = self.local.tx.pop() - tx.rollback() - self._flush_tables() - - def __enter__(self): - """Start a transaction.""" - self.begin() - return self - - def __exit__(self, error_type, error_value, traceback): - """End a transaction by committing or rolling back.""" - if error_type is None: - try: - self.commit() - except Exception: - with safe_reraise(): - self.rollback() - else: - self.rollback() - - def close(self): - """Close database connections. Makes this object unusable.""" - self.engine.dispose() - self._tables = {} - self.engine = None - - @property - def tables(self): - """Get a listing of all tables that exist in the database.""" - return self.inspect.get_table_names(schema=self.schema) - - @property - def views(self): - """Get a listing of all views that exist in the database.""" - return self.inspect.get_view_names(schema=self.schema) - - def __contains__(self, table_name): - """Check if the given table name exists in the database.""" - try: - table_name = normalize_table_name(table_name) - if table_name in self.tables: - return True - if table_name in self.views: - return True - return False - except ValueError: - return False - - def create_table(self, table_name, primary_id=None, primary_type=None): - """Create a new table. - - Either loads a table or creates it if it doesn't exist yet. You can - define the name and type of the primary key field, if a new table is to - be created. The default is to create an auto-incrementing integer, - ``id``. You can also set the primary key to be a string or big integer. - The caller will be responsible for the uniqueness of ``primary_id`` if - it is defined as a text type. - - Returns a :py:class:`Table ` instance. - :: - - table = db.create_table('population') - - # custom id and type - table2 = db.create_table('population2', 'age') - table3 = db.create_table('population3', - primary_id='city', - primary_type=db.types.text) - # custom length of String - table4 = db.create_table('population4', - primary_id='city', - primary_type=db.types.string(25)) - # no primary key - table5 = db.create_table('population5', - primary_id=False) - """ - assert not isinstance(primary_type, str), \ - 'Text-based primary_type support is dropped, use db.types.' - table_name = normalize_table_name(table_name) - with self.lock: - if table_name not in self._tables: - self._tables[table_name] = Table(self, table_name, - primary_id=primary_id, - primary_type=primary_type, - auto_create=True) - return self._tables.get(table_name) - - def load_table(self, table_name): - """Load a table. - - This will fail if the tables does not already exist in the database. If - the table exists, its columns will be reflected and are available on - the :py:class:`Table ` object. - - Returns a :py:class:`Table ` instance. - :: - - table = db.load_table('population') - """ - table_name = normalize_table_name(table_name) - with self.lock: - if table_name not in self._tables: - self._tables[table_name] = Table(self, table_name) - return self._tables.get(table_name) - - def get_table(self, table_name, primary_id=None, primary_type=None): - """Load or create a table. - - This is now the same as ``create_table``. - :: - - table = db.get_table('population') - # you can also use the short-hand syntax: - table = db['population'] - """ - return self.create_table(table_name, primary_id, primary_type) - - def __getitem__(self, table_name): - """Get a given table.""" - return self.get_table(table_name) - - def _ipython_key_completions_(self): - """Completion for table names with IPython.""" - return self.tables - - def query(self, query, *args, **kwargs): - """Run a statement on the database directly. - - Allows for the execution of arbitrary read/write queries. A query can - either be a plain text string, or a `SQLAlchemy expression - `_. - If a plain string is passed in, it will be converted to an expression - automatically. - - Further positional and keyword arguments will be used for parameter - binding. To include a positional argument in your query, use question - marks in the query (i.e. ``SELECT * FROM tbl WHERE a = ?```). For - keyword arguments, use a bind parameter (i.e. ``SELECT * FROM tbl - WHERE a = :foo``). - :: - - statement = 'SELECT user, COUNT(*) c FROM photos GROUP BY user' - for row in db.query(statement): - print(row['user'], row['c']) - - The returned iterator will yield each result sequentially. - """ - if isinstance(query, str): - query = text(query) - _step = kwargs.pop('_step', QUERY_STEP) - if _step is False or _step == 0: - _step = None - rp = self.executable.execute(query, *args, **kwargs) - return ResultIter(rp, row_type=self.row_type, step=_step) - - def __repr__(self): - """Text representation contains the URL.""" - return '' % safe_url(self.url) diff --git a/venv/lib/python3.7/site-packages/dataset/table.py b/venv/lib/python3.7/site-packages/dataset/table.py deleted file mode 100644 index 9fc3332..0000000 --- a/venv/lib/python3.7/site-packages/dataset/table.py +++ /dev/null @@ -1,719 +0,0 @@ -import logging -import warnings -import threading - -from sqlalchemy.sql import and_, expression -from sqlalchemy.sql.expression import bindparam, ClauseElement -from sqlalchemy.schema import Column, Index -from sqlalchemy import func, select, false -from sqlalchemy.schema import Table as SQLATable -from sqlalchemy.exc import NoSuchTableError - -from dataset.types import Types -from dataset.util import index_name, ensure_tuple -from dataset.util import DatasetException, ResultIter, QUERY_STEP -from dataset.util import normalize_table_name, pad_chunk_columns -from dataset.util import normalize_column_name, normalize_column_key - - -log = logging.getLogger(__name__) - - -class Table(object): - """Represents a table in a database and exposes common operations.""" - PRIMARY_DEFAULT = 'id' - - def __init__(self, database, table_name, primary_id=None, - primary_type=None, auto_create=False): - """Initialise the table from database schema.""" - self.db = database - self.name = normalize_table_name(table_name) - self._table = None - self._columns = None - self._indexes = [] - self._primary_id = primary_id if primary_id is not None \ - else self.PRIMARY_DEFAULT - self._primary_type = primary_type if primary_type is not None \ - else Types.integer - self._auto_create = auto_create - - @property - def exists(self): - """Check to see if the table currently exists in the database.""" - if self._table is not None: - return True - return self.name in self.db - - @property - def table(self): - """Get a reference to the table, which may be reflected or created.""" - if self._table is None: - self._sync_table(()) - return self._table - - @property - def _column_keys(self): - """Get a dictionary of all columns and their case mapping.""" - if not self.exists: - return {} - with self.db.lock: - if self._columns is None: - # Initialise the table if it doesn't exist - table = self.table - self._columns = {} - for column in table.columns: - name = normalize_column_name(column.name) - key = normalize_column_key(name) - if key in self._columns: - log.warning("Duplicate column: %s", name) - self._columns[key] = name - return self._columns - - def _flush_metadata(self): - with self.db.lock: - self._columns = None - - @property - def columns(self): - """Get a listing of all columns that exist in the table.""" - return list(self._column_keys.values()) - - def has_column(self, column): - """Check if a column with the given name exists on this table.""" - key = normalize_column_key(normalize_column_name(column)) - return key in self._column_keys - - def _get_column_name(self, name): - """Find the best column name with case-insensitive matching.""" - name = normalize_column_name(name) - key = normalize_column_key(name) - return self._column_keys.get(key, name) - - def insert(self, row, ensure=None, types=None): - """Add a ``row`` dict by inserting it into the table. - - If ``ensure`` is set, any of the keys of the row are not - table columns, they will be created automatically. - - During column creation, ``types`` will be checked for a key - matching the name of a column to be created, and the given - SQLAlchemy column type will be used. Otherwise, the type is - guessed from the row value, defaulting to a simple unicode - field. - :: - - data = dict(title='I am a banana!') - table.insert(data) - - Returns the inserted row's primary key. - """ - row = self._sync_columns(row, ensure, types=types) - res = self.db.executable.execute(self.table.insert(row)) - if len(res.inserted_primary_key) > 0: - return res.inserted_primary_key[0] - return True - - def insert_ignore(self, row, keys, ensure=None, types=None): - """Add a ``row`` dict into the table if the row does not exist. - - If rows with matching ``keys`` exist no change is made. - - Setting ``ensure`` results in automatically creating missing columns, - i.e., keys of the row are not table columns. - - During column creation, ``types`` will be checked for a key - matching the name of a column to be created, and the given - SQLAlchemy column type will be used. Otherwise, the type is - guessed from the row value, defaulting to a simple unicode - field. - :: - - data = dict(id=10, title='I am a banana!') - table.insert_ignore(data, ['id']) - """ - row = self._sync_columns(row, ensure, types=types) - if self._check_ensure(ensure): - self.create_index(keys) - args, _ = self._keys_to_args(row, keys) - if self.count(**args) == 0: - return self.insert(row, ensure=False) - return False - - def insert_many(self, rows, chunk_size=1000, ensure=None, types=None): - """Add many rows at a time. - - This is significantly faster than adding them one by one. Per default - the rows are processed in chunks of 1000 per commit, unless you specify - a different ``chunk_size``. - - See :py:meth:`insert() ` for details on - the other parameters. - :: - - rows = [dict(name='Dolly')] * 10000 - table.insert_many(rows) - """ - # Sync table before inputting rows. - sync_row = {} - for row in rows: - # Only get non-existing columns. - sync_keys = list(sync_row.keys()) - for key in [k for k in row.keys() if k not in sync_keys]: - # Get a sample of the new column(s) from the row. - sync_row[key] = row[key] - self._sync_columns(sync_row, ensure, types=types) - - # Get columns name list to be used for padding later. - columns = sync_row.keys() - - chunk = [] - for index, row in enumerate(rows): - chunk.append(row) - - # Insert when chunk_size is fulfilled or this is the last row - if len(chunk) == chunk_size or index == len(rows) - 1: - chunk = pad_chunk_columns(chunk, columns) - self.table.insert().execute(chunk) - chunk = [] - - def update(self, row, keys, ensure=None, types=None, return_count=False): - """Update a row in the table. - - The update is managed via the set of column names stated in ``keys``: - they will be used as filters for the data to be updated, using the - values in ``row``. - :: - - # update all entries with id matching 10, setting their title - # columns - data = dict(id=10, title='I am a banana!') - table.update(data, ['id']) - - If keys in ``row`` update columns not present in the table, they will - be created based on the settings of ``ensure`` and ``types``, matching - the behavior of :py:meth:`insert() `. - """ - row = self._sync_columns(row, ensure, types=types) - args, row = self._keys_to_args(row, keys) - clause = self._args_to_clause(args) - if not len(row): - return self.count(clause) - stmt = self.table.update(whereclause=clause, values=row) - rp = self.db.executable.execute(stmt) - if rp.supports_sane_rowcount(): - return rp.rowcount - if return_count: - return self.count(clause) - - def update_many(self, rows, keys, chunk_size=1000, ensure=None, - types=None): - """Update many rows in the table at a time. - - This is significantly faster than updating them one by one. Per default - the rows are processed in chunks of 1000 per commit, unless you specify - a different ``chunk_size``. - - See :py:meth:`update() ` for details on - the other parameters. - """ - # Convert keys to a list if not a list or tuple. - keys = keys if type(keys) in (list, tuple) else [keys] - - chunk = [] - columns = [] - for index, row in enumerate(rows): - chunk.append(row) - for col in row.keys(): - if col not in columns: - columns.append(col) - - # bindparam requires names to not conflict (cannot be "id" for id) - for key in keys: - row['_%s' % key] = row[key] - - # Update when chunk_size is fulfilled or this is the last row - if len(chunk) == chunk_size or index == len(rows) - 1: - cl = [self.table.c[k] == bindparam('_%s' % k) for k in keys] - stmt = self.table.update( - whereclause=and_(*cl), - values={ - col: bindparam(col, required=False) for col in columns - } - ) - self.db.executable.execute(stmt, chunk) - chunk = [] - - def upsert(self, row, keys, ensure=None, types=None): - """An UPSERT is a smart combination of insert and update. - - If rows with matching ``keys`` exist they will be updated, otherwise a - new row is inserted in the table. - :: - - data = dict(id=10, title='I am a banana!') - table.upsert(data, ['id']) - """ - row = self._sync_columns(row, ensure, types=types) - if self._check_ensure(ensure): - self.create_index(keys) - row_count = self.update(row, keys, ensure=False, return_count=True) - if row_count == 0: - return self.insert(row, ensure=False) - return True - - def upsert_many(self, rows, keys, chunk_size=1000, ensure=None, - types=None): - """ - Sorts multiple input rows into upserts and inserts. Inserts are passed - to insert_many and upserts are updated. - - See :py:meth:`upsert() ` and - :py:meth:`insert_many() `. - """ - # Convert keys to a list if not a list or tuple. - keys = keys if type(keys) in (list, tuple) else [keys] - - to_insert = [] - to_update = [] - for row in rows: - if self.find_one(**{key: row.get(key) for key in keys}): - # Row exists - update it. - to_update.append(row) - else: - # Row doesn't exist - insert it. - to_insert.append(row) - - # Insert non-existing rows. - self.insert_many(to_insert, chunk_size, ensure, types) - - # Update existing rows. - self.update_many(to_update, keys, chunk_size, ensure, types) - - def delete(self, *clauses, **filters): - """Delete rows from the table. - - Keyword arguments can be used to add column-based filters. The filter - criterion will always be equality: - :: - - table.delete(place='Berlin') - - If no arguments are given, all records are deleted. - """ - if not self.exists: - return False - clause = self._args_to_clause(filters, clauses=clauses) - stmt = self.table.delete(whereclause=clause) - rp = self.db.executable.execute(stmt) - return rp.rowcount > 0 - - def _reflect_table(self): - """Load the tables definition from the database.""" - with self.db.lock: - self._flush_metadata() - try: - self._table = SQLATable(self.name, - self.db.metadata, - schema=self.db.schema, - autoload=True) - except NoSuchTableError: - self._table = None - - def _threading_warn(self): - if self.db.in_transaction and threading.active_count() > 1: - warnings.warn("Changing the database schema inside a transaction " - "in a multi-threaded environment is likely to lead " - "to race conditions and synchronization issues.", - RuntimeWarning) - - def _sync_table(self, columns): - """Lazy load, create or adapt the table structure in the database.""" - self._flush_metadata() - if self._table is None: - # Load an existing table from the database. - self._reflect_table() - if self._table is None: - # Create the table with an initial set of columns. - if not self._auto_create: - raise DatasetException("Table does not exist: %s" % self.name) - # Keep the lock scope small because this is run very often. - with self.db.lock: - self._threading_warn() - self._table = SQLATable(self.name, - self.db.metadata, - schema=self.db.schema) - if self._primary_id is not False: - # This can go wrong on DBMS like MySQL and SQLite where - # tables cannot have no columns. - primary_id = self._primary_id - primary_type = self._primary_type - increment = primary_type in [Types.integer, Types.bigint] - column = Column(primary_id, primary_type, - primary_key=True, - autoincrement=increment) - self._table.append_column(column) - for column in columns: - if not column.name == self._primary_id: - self._table.append_column(column) - self._table.create(self.db.executable, checkfirst=True) - elif len(columns): - with self.db.lock: - self._reflect_table() - self._threading_warn() - for column in columns: - if not self.has_column(column.name): - self.db.op.add_column(self.name, - column, - self.db.schema) - self._reflect_table() - - def _sync_columns(self, row, ensure, types=None): - """Create missing columns (or the table) prior to writes. - - If automatic schema generation is disabled (``ensure`` is ``False``), - this will remove any keys from the ``row`` for which there is no - matching column. - """ - ensure = self._check_ensure(ensure) - types = types or {} - types = {self._get_column_name(k): v for (k, v) in types.items()} - out = {} - sync_columns = {} - for name, value in row.items(): - name = self._get_column_name(name) - if self.has_column(name): - out[name] = value - elif ensure: - _type = types.get(name) - if _type is None: - _type = self.db.types.guess(value) - sync_columns[name] = Column(name, _type) - out[name] = value - self._sync_table(sync_columns.values()) - return out - - def _check_ensure(self, ensure): - if ensure is None: - return self.db.ensure_schema - return ensure - - def _generate_clause(self, column, op, value): - if op in ('like',): - return self.table.c[column].like(value) - if op in ('ilike',): - return self.table.c[column].ilike(value) - if op in ('>', 'gt'): - return self.table.c[column] > value - if op in ('<', 'lt'): - return self.table.c[column] < value - if op in ('>=', 'gte'): - return self.table.c[column] >= value - if op in ('<=', 'lte'): - return self.table.c[column] <= value - if op in ('=', '==', 'is'): - return self.table.c[column] == value - if op in ('!=', '<>', 'not'): - return self.table.c[column] != value - if op in ('in'): - return self.table.c[column].in_(value) - if op in ('between', '..'): - start, end = value - return self.table.c[column].between(start, end) - if op in ('startswith',): - return self.table.c[column].like('%' + value) - if op in ('endswith',): - return self.table.c[column].like(value + '%') - return false() - - def _args_to_clause(self, args, clauses=()): - clauses = list(clauses) - for column, value in args.items(): - column = self._get_column_name(column) - if not self.has_column(column): - clauses.append(false()) - elif isinstance(value, (list, tuple, set)): - clauses.append(self._generate_clause(column, 'in', value)) - elif isinstance(value, dict): - for op, op_value in value.items(): - clauses.append(self._generate_clause(column, op, op_value)) - else: - clauses.append(self._generate_clause(column, '=', value)) - return and_(*clauses) - - def _args_to_order_by(self, order_by): - orderings = [] - for ordering in ensure_tuple(order_by): - if ordering is None: - continue - column = ordering.lstrip('-') - column = self._get_column_name(column) - if not self.has_column(column): - continue - if ordering.startswith('-'): - orderings.append(self.table.c[column].desc()) - else: - orderings.append(self.table.c[column].asc()) - return orderings - - def _keys_to_args(self, row, keys): - keys = ensure_tuple(keys) - keys = [self._get_column_name(k) for k in keys] - row = row.copy() - args = {k: row.pop(k, None) for k in keys} - return args, row - - def create_column(self, name, type, **kwargs): - """Create a new column ``name`` of a specified type. - :: - - table.create_column('created_at', db.types.datetime) - - `type` corresponds to an SQLAlchemy type as described by - `dataset.db.Types`. Additional keyword arguments are passed - to the constructor of `Column`, so that default values, and - options like `nullable` and `unique` can be set. - :: - - table.create_column('key', unique=True, nullable=False) - table.create_column('food', default='banana') - """ - name = self._get_column_name(name) - if self.has_column(name): - log.debug("Column exists: %s" % name) - return - self._sync_table((Column(name, type, **kwargs),)) - - def create_column_by_example(self, name, value): - """ - Explicitly create a new column ``name`` with a type that is appropriate - to store the given example ``value``. The type is guessed in the same - way as for the insert method with ``ensure=True``. - :: - - table.create_column_by_example('length', 4.2) - - If a column of the same name already exists, no action is taken, even - if it is not of the type we would have created. - """ - type_ = self.db.types.guess(value) - self.create_column(name, type_) - - def drop_column(self, name): - """Drop the column ``name``. - :: - table.drop_column('created_at') - """ - if self.db.engine.dialect.name == 'sqlite': - raise RuntimeError("SQLite does not support dropping columns.") - name = self._get_column_name(name) - with self.db.lock: - if not self.exists or not self.has_column(name): - log.debug("Column does not exist: %s", name) - return - - self._threading_warn() - self.db.op.drop_column( - self.table.name, - name, - self.table.schema - ) - self._reflect_table() - - def drop(self): - """Drop the table from the database. - - Deletes both the schema and all the contents within it. - """ - with self.db.lock: - if self.exists: - self._threading_warn() - self.table.drop(self.db.executable, checkfirst=True) - self._table = None - self._flush_metadata() - - def has_index(self, columns): - """Check if an index exists to cover the given ``columns``.""" - if not self.exists: - return False - columns = set([self._get_column_name(c) for c in columns]) - if columns in self._indexes: - return True - for column in columns: - if not self.has_column(column): - return False - indexes = self.db.inspect.get_indexes(self.name, schema=self.db.schema) - for index in indexes: - if columns == set(index.get('column_names', [])): - self._indexes.append(columns) - return True - return False - - def create_index(self, columns, name=None, **kw): - """Create an index to speed up queries on a table. - - If no ``name`` is given a random name is created. - :: - - table.create_index(['name', 'country']) - """ - columns = [self._get_column_name(c) for c in ensure_tuple(columns)] - with self.db.lock: - if not self.exists: - raise DatasetException("Table has not been created yet.") - - for column in columns: - if not self.has_column(column): - return - - if not self.has_index(columns): - self._threading_warn() - name = name or index_name(self.name, columns) - columns = [self.table.c[c] for c in columns] - idx = Index(name, *columns, **kw) - idx.create(self.db.executable) - - def find(self, *_clauses, **kwargs): - """Perform a simple search on the table. - - Simply pass keyword arguments as ``filter``. - :: - - results = table.find(country='France') - results = table.find(country='France', year=1980) - - Using ``_limit``:: - - # just return the first 10 rows - results = table.find(country='France', _limit=10) - - You can sort the results by single or multiple columns. Append a minus - sign to the column name for descending order:: - - # sort results by a column 'year' - results = table.find(country='France', order_by='year') - # return all rows sorted by multiple columns (descending by year) - results = table.find(order_by=['country', '-year']) - - To perform complex queries with advanced filters or to perform - aggregation, use :py:meth:`db.query() ` - instead. - """ - if not self.exists: - return iter([]) - - _limit = kwargs.pop('_limit', None) - _offset = kwargs.pop('_offset', 0) - order_by = kwargs.pop('order_by', None) - _streamed = kwargs.pop('_streamed', False) - _step = kwargs.pop('_step', QUERY_STEP) - if _step is False or _step == 0: - _step = None - - order_by = self._args_to_order_by(order_by) - args = self._args_to_clause(kwargs, clauses=_clauses) - query = self.table.select(whereclause=args, - limit=_limit, - offset=_offset) - if len(order_by): - query = query.order_by(*order_by) - - conn = self.db.executable - if _streamed: - conn = self.db.engine.connect() - conn = conn.execution_options(stream_results=True) - - return ResultIter(conn.execute(query), - row_type=self.db.row_type, - step=_step) - - def find_one(self, *args, **kwargs): - """Get a single result from the table. - - Works just like :py:meth:`find() ` but returns one - result, or ``None``. - :: - - row = table.find_one(country='United States') - """ - if not self.exists: - return None - - kwargs['_limit'] = 1 - kwargs['_step'] = None - resiter = self.find(*args, **kwargs) - try: - for row in resiter: - return row - finally: - resiter.close() - - def count(self, *_clauses, **kwargs): - """Return the count of results for the given filter set.""" - # NOTE: this does not have support for limit and offset since I can't - # see how this is useful. Still, there might be compatibility issues - # with people using these flags. Let's see how it goes. - if not self.exists: - return 0 - - args = self._args_to_clause(kwargs, clauses=_clauses) - query = select([func.count()], whereclause=args) - query = query.select_from(self.table) - rp = self.db.executable.execute(query) - return rp.fetchone()[0] - - def __len__(self): - """Return the number of rows in the table.""" - return self.count() - - def distinct(self, *args, **_filter): - """Return all the unique (distinct) values for the given ``columns``. - :: - - # returns only one row per year, ignoring the rest - table.distinct('year') - # works with multiple columns, too - table.distinct('year', 'country') - # you can also combine this with a filter - table.distinct('year', country='China') - """ - if not self.exists: - return iter([]) - - columns = [] - clauses = [] - for column in args: - if isinstance(column, ClauseElement): - clauses.append(column) - else: - if not self.has_column(column): - raise DatasetException("No such column: %s" % column) - columns.append(self.table.c[column]) - - clause = self._args_to_clause(_filter, clauses=clauses) - if not len(columns): - return iter([]) - - q = expression.select(columns, - distinct=True, - whereclause=clause, - order_by=[c.asc() for c in columns]) - return self.db.query(q) - - # Legacy methods for running find queries. - all = find - - def __iter__(self): - """Return all rows of the table as simple dictionaries. - - Allows for iterating over all rows in the table without explicetly - calling :py:meth:`find() `. - :: - - for row in table: - print(row) - """ - return self.find() - - def __repr__(self): - """Get table representation.""" - return '' % self.table.name diff --git a/venv/lib/python3.7/site-packages/dataset/types.py b/venv/lib/python3.7/site-packages/dataset/types.py deleted file mode 100644 index d30d3c1..0000000 --- a/venv/lib/python3.7/site-packages/dataset/types.py +++ /dev/null @@ -1,49 +0,0 @@ -from datetime import datetime, date - -from sqlalchemy import Integer, UnicodeText, Float, BigInteger -from sqlalchemy import Boolean, Date, DateTime, Unicode, JSON -from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.types import TypeEngine - - -class Types(object): - """A holder class for easy access to SQLAlchemy type names.""" - integer = Integer - string = Unicode - text = UnicodeText - float = Float - bigint = BigInteger - boolean = Boolean - date = Date - datetime = DateTime - - def __init__(self, dialect=None): - self._dialect = dialect - - @property - def json(self): - if self._dialect is not None and self._dialect == 'postgresql': - return JSONB - return JSON - - def guess(self, sample): - """Given a single sample, guess the column type for the field. - - If the sample is an instance of an SQLAlchemy type, the type will be - used instead. - """ - if isinstance(sample, TypeEngine): - return sample - if isinstance(sample, bool): - return self.boolean - elif isinstance(sample, int): - return self.bigint - elif isinstance(sample, float): - return self.float - elif isinstance(sample, datetime): - return self.datetime - elif isinstance(sample, date): - return self.date - elif isinstance(sample, dict): - return self.json - return self.text diff --git a/venv/lib/python3.7/site-packages/dataset/util.py b/venv/lib/python3.7/site-packages/dataset/util.py deleted file mode 100644 index 2b0f8c3..0000000 --- a/venv/lib/python3.7/site-packages/dataset/util.py +++ /dev/null @@ -1,124 +0,0 @@ -from hashlib import sha1 -from urllib.parse import urlparse -from collections import OrderedDict -from collections.abc import Iterable - -QUERY_STEP = 1000 -row_type = OrderedDict - - -class DatasetException(Exception): - pass - - -def convert_row(row_type, row): - if row is None: - return None - return row_type(row.items()) - - -def iter_result_proxy(rp, step=None): - """Iterate over the ResultProxy.""" - while True: - if step is None: - chunk = rp.fetchall() - else: - chunk = rp.fetchmany(size=step) - if not chunk: - break - for row in chunk: - yield row - - -class ResultIter(object): - """ SQLAlchemy ResultProxies are not iterable to get a - list of dictionaries. This is to wrap them. """ - - def __init__(self, result_proxy, row_type=row_type, step=None): - self.row_type = row_type - self.result_proxy = result_proxy - self.keys = list(result_proxy.keys()) - self._iter = iter_result_proxy(result_proxy, step=step) - - def __next__(self): - try: - return convert_row(self.row_type, next(self._iter)) - except StopIteration: - self.close() - raise - - next = __next__ - - def __iter__(self): - return self - - def close(self): - self.result_proxy.close() - - -def normalize_column_name(name): - """Check if a string is a reasonable thing to use as a column name.""" - if not isinstance(name, str): - raise ValueError('%r is not a valid column name.' % name) - - # limit to 63 characters - name = name.strip()[:63] - # column names can be 63 *bytes* max in postgresql - if isinstance(name, str): - while len(name.encode('utf-8')) >= 64: - name = name[:len(name) - 1] - - if not len(name) or '.' in name or '-' in name: - raise ValueError('%r is not a valid column name.' % name) - return name - - -def normalize_column_key(name): - """Return a comparable column name.""" - if name is None or not isinstance(name, str): - return None - return name.upper().strip().replace(' ', '') - - -def normalize_table_name(name): - """Check if the table name is obviously invalid.""" - if not isinstance(name, str): - raise ValueError("Invalid table name: %r" % name) - name = name.strip()[:63] - if not len(name): - raise ValueError("Invalid table name: %r" % name) - return name - - -def safe_url(url): - """Remove password from printed connection URLs.""" - parsed = urlparse(url) - if parsed.password is not None: - pwd = ':%s@' % parsed.password - url = url.replace(pwd, ':*****@') - return url - - -def index_name(table, columns): - """Generate an artificial index name.""" - sig = '||'.join(columns) - key = sha1(sig.encode('utf-8')).hexdigest()[:16] - return 'ix_%s_%s' % (table, key) - - -def ensure_tuple(obj): - """Try and make the given argument into a tuple.""" - if obj is None: - return tuple() - if isinstance(obj, Iterable) and not isinstance(obj, (str, bytes)): - return tuple(obj) - return obj, - - -def pad_chunk_columns(chunk, columns): - """Given a set of items to be inserted, make sure they all have the - same columns by padding columns with None if they are missing.""" - for record in chunk: - for column in columns: - record.setdefault(column, None) - return chunk diff --git a/venv/lib/python3.7/site-packages/dateutil/__init__.py b/venv/lib/python3.7/site-packages/dateutil/__init__.py deleted file mode 100644 index 0defb82..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# -*- coding: utf-8 -*- -try: - from ._version import version as __version__ -except ImportError: - __version__ = 'unknown' - -__all__ = ['easter', 'parser', 'relativedelta', 'rrule', 'tz', - 'utils', 'zoneinfo'] diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 7c6dbc3..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/_common.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/_common.cpython-37.pyc deleted file mode 100644 index 29c2f41..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/_common.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/_version.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/_version.cpython-37.pyc deleted file mode 100644 index 372d549..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/_version.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/easter.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/easter.cpython-37.pyc deleted file mode 100644 index 5d84c76..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/easter.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/relativedelta.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/relativedelta.cpython-37.pyc deleted file mode 100644 index 942d66b..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/relativedelta.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/rrule.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/rrule.cpython-37.pyc deleted file mode 100644 index af5e0a2..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/rrule.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/tzwin.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/tzwin.cpython-37.pyc deleted file mode 100644 index d3631f3..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/tzwin.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/__pycache__/utils.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/__pycache__/utils.cpython-37.pyc deleted file mode 100644 index 1fd7b17..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/__pycache__/utils.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/_common.py b/venv/lib/python3.7/site-packages/dateutil/_common.py deleted file mode 100644 index 4eb2659..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/_common.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Common code used in multiple modules. -""" - - -class weekday(object): - __slots__ = ["weekday", "n"] - - def __init__(self, weekday, n=None): - self.weekday = weekday - self.n = n - - def __call__(self, n): - if n == self.n: - return self - else: - return self.__class__(self.weekday, n) - - def __eq__(self, other): - try: - if self.weekday != other.weekday or self.n != other.n: - return False - except AttributeError: - return False - return True - - def __hash__(self): - return hash(( - self.weekday, - self.n, - )) - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday] - if not self.n: - return s - else: - return "%s(%+d)" % (s, self.n) - -# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.7/site-packages/dateutil/_version.py b/venv/lib/python3.7/site-packages/dateutil/_version.py deleted file mode 100644 index eac1209..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = '2.8.1' diff --git a/venv/lib/python3.7/site-packages/dateutil/easter.py b/venv/lib/python3.7/site-packages/dateutil/easter.py deleted file mode 100644 index 53b7c78..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/easter.py +++ /dev/null @@ -1,89 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a generic easter computing method for any given year, using -Western, Orthodox or Julian algorithms. -""" - -import datetime - -__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"] - -EASTER_JULIAN = 1 -EASTER_ORTHODOX = 2 -EASTER_WESTERN = 3 - - -def easter(year, method=EASTER_WESTERN): - """ - This method was ported from the work done by GM Arts, - on top of the algorithm by Claus Tondering, which was - based in part on the algorithm of Ouding (1940), as - quoted in "Explanatory Supplement to the Astronomical - Almanac", P. Kenneth Seidelmann, editor. - - This algorithm implements three different easter - calculation methods: - - 1 - Original calculation in Julian calendar, valid in - dates after 326 AD - 2 - Original method, with date converted to Gregorian - calendar, valid in years 1583 to 4099 - 3 - Revised method, in Gregorian calendar, valid in - years 1583 to 4099 as well - - These methods are represented by the constants: - - * ``EASTER_JULIAN = 1`` - * ``EASTER_ORTHODOX = 2`` - * ``EASTER_WESTERN = 3`` - - The default method is method 3. - - More about the algorithm may be found at: - - `GM Arts: Easter Algorithms `_ - - and - - `The Calendar FAQ: Easter `_ - - """ - - if not (1 <= method <= 3): - raise ValueError("invalid method") - - # g - Golden year - 1 - # c - Century - # h - (23 - Epact) mod 30 - # i - Number of days from March 21 to Paschal Full Moon - # j - Weekday for PFM (0=Sunday, etc) - # p - Number of days from March 21 to Sunday on or before PFM - # (-6 to 28 methods 1 & 3, to 56 for method 2) - # e - Extra days to add for method 2 (converting Julian - # date to Gregorian date) - - y = year - g = y % 19 - e = 0 - if method < 3: - # Old method - i = (19*g + 15) % 30 - j = (y + y//4 + i) % 7 - if method == 2: - # Extra dates to convert Julian to Gregorian date - e = 10 - if y > 1600: - e = e + y//100 - 16 - (y//100 - 16)//4 - else: - # New method - c = y//100 - h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30 - i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11)) - j = (y + y//4 + i + 2 - c + c//4) % 7 - - # p can be from -6 to 56 corresponding to dates 22 March to 23 May - # (later dates apply to method 2, although 23 May never actually occurs) - p = i - j + e - d = 1 + (p + 27 + (p + 6)//40) % 31 - m = 3 + (p + 26)//30 - return datetime.date(int(y), int(m), int(d)) diff --git a/venv/lib/python3.7/site-packages/dateutil/parser/__init__.py b/venv/lib/python3.7/site-packages/dateutil/parser/__init__.py deleted file mode 100644 index d174b0e..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/parser/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -# -*- coding: utf-8 -*- -from ._parser import parse, parser, parserinfo, ParserError -from ._parser import DEFAULTPARSER, DEFAULTTZPARSER -from ._parser import UnknownTimezoneWarning - -from ._parser import __doc__ - -from .isoparser import isoparser, isoparse - -__all__ = ['parse', 'parser', 'parserinfo', - 'isoparse', 'isoparser', - 'ParserError', - 'UnknownTimezoneWarning'] - - -### -# Deprecate portions of the private interface so that downstream code that -# is improperly relying on it is given *some* notice. - - -def __deprecated_private_func(f): - from functools import wraps - import warnings - - msg = ('{name} is a private function and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=f.__name__) - - @wraps(f) - def deprecated_func(*args, **kwargs): - warnings.warn(msg, DeprecationWarning) - return f(*args, **kwargs) - - return deprecated_func - -def __deprecate_private_class(c): - import warnings - - msg = ('{name} is a private class and may break without warning, ' - 'it will be moved and or renamed in future versions.') - msg = msg.format(name=c.__name__) - - class private_class(c): - __doc__ = c.__doc__ - - def __init__(self, *args, **kwargs): - warnings.warn(msg, DeprecationWarning) - super(private_class, self).__init__(*args, **kwargs) - - private_class.__name__ = c.__name__ - - return private_class - - -from ._parser import _timelex, _resultbase -from ._parser import _tzparser, _parsetz - -_timelex = __deprecate_private_class(_timelex) -_tzparser = __deprecate_private_class(_tzparser) -_resultbase = __deprecate_private_class(_resultbase) -_parsetz = __deprecated_private_func(_parsetz) diff --git a/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 3b82d79..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/_parser.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/_parser.cpython-37.pyc deleted file mode 100644 index 789d8f8..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/_parser.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/isoparser.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/isoparser.cpython-37.pyc deleted file mode 100644 index 010a1bb..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/parser/__pycache__/isoparser.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/parser/_parser.py b/venv/lib/python3.7/site-packages/dateutil/parser/_parser.py deleted file mode 100644 index 458aa6a..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/parser/_parser.py +++ /dev/null @@ -1,1609 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a generic date/time string parser which is able to parse -most known formats to represent a date and/or time. - -This module attempts to be forgiving with regards to unlikely input formats, -returning a datetime object even for dates which are ambiguous. If an element -of a date/time stamp is omitted, the following rules are applied: - -- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour - on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is - specified. -- If a time zone is omitted, a timezone-naive datetime is returned. - -If any other elements are missing, they are taken from the -:class:`datetime.datetime` object passed to the parameter ``default``. If this -results in a day number exceeding the valid number of days per month, the -value falls back to the end of the month. - -Additional resources about date/time string formats can be found below: - -- `A summary of the international standard date and time notation - `_ -- `W3C Date and Time Formats `_ -- `Time Formats (Planetary Rings Node) `_ -- `CPAN ParseDate module - `_ -- `Java SimpleDateFormat Class - `_ -""" -from __future__ import unicode_literals - -import datetime -import re -import string -import time -import warnings - -from calendar import monthrange -from io import StringIO - -import six -from six import integer_types, text_type - -from decimal import Decimal - -from warnings import warn - -from .. import relativedelta -from .. import tz - -__all__ = ["parse", "parserinfo", "ParserError"] - - -# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth -# making public and/or figuring out if there is something we can -# take off their plate. -class _timelex(object): - # Fractional seconds are sometimes split by a comma - _split_decimal = re.compile("([.,])") - - def __init__(self, instream): - if six.PY2: - # In Python 2, we can't duck type properly because unicode has - # a 'decode' function, and we'd be double-decoding - if isinstance(instream, (bytes, bytearray)): - instream = instream.decode() - else: - if getattr(instream, 'decode', None) is not None: - instream = instream.decode() - - if isinstance(instream, text_type): - instream = StringIO(instream) - elif getattr(instream, 'read', None) is None: - raise TypeError('Parser must be a string or character stream, not ' - '{itype}'.format(itype=instream.__class__.__name__)) - - self.instream = instream - self.charstack = [] - self.tokenstack = [] - self.eof = False - - def get_token(self): - """ - This function breaks the time string into lexical units (tokens), which - can be parsed by the parser. Lexical units are demarcated by changes in - the character set, so any continuous string of letters is considered - one unit, any continuous string of numbers is considered one unit. - - The main complication arises from the fact that dots ('.') can be used - both as separators (e.g. "Sep.20.2009") or decimal points (e.g. - "4:30:21.447"). As such, it is necessary to read the full context of - any dot-separated strings before breaking it into tokens; as such, this - function maintains a "token stack", for when the ambiguous context - demands that multiple tokens be parsed at once. - """ - if self.tokenstack: - return self.tokenstack.pop(0) - - seenletters = False - token = None - state = None - - while not self.eof: - # We only realize that we've reached the end of a token when we - # find a character that's not part of the current token - since - # that character may be part of the next token, it's stored in the - # charstack. - if self.charstack: - nextchar = self.charstack.pop(0) - else: - nextchar = self.instream.read(1) - while nextchar == '\x00': - nextchar = self.instream.read(1) - - if not nextchar: - self.eof = True - break - elif not state: - # First character of the token - determines if we're starting - # to parse a word, a number or something else. - token = nextchar - if self.isword(nextchar): - state = 'a' - elif self.isnum(nextchar): - state = '0' - elif self.isspace(nextchar): - token = ' ' - break # emit token - else: - break # emit token - elif state == 'a': - # If we've already started reading a word, we keep reading - # letters until we find something that's not part of a word. - seenletters = True - if self.isword(nextchar): - token += nextchar - elif nextchar == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0': - # If we've already started reading a number, we keep reading - # numbers until we find something that doesn't fit. - if self.isnum(nextchar): - token += nextchar - elif nextchar == '.' or (nextchar == ',' and len(token) >= 2): - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == 'a.': - # If we've seen some letters and a dot separator, continue - # parsing, and the tokens will be broken up later. - seenletters = True - if nextchar == '.' or self.isword(nextchar): - token += nextchar - elif self.isnum(nextchar) and token[-1] == '.': - token += nextchar - state = '0.' - else: - self.charstack.append(nextchar) - break # emit token - elif state == '0.': - # If we've seen at least one dot separator, keep going, we'll - # break up the tokens later. - if nextchar == '.' or self.isnum(nextchar): - token += nextchar - elif self.isword(nextchar) and token[-1] == '.': - token += nextchar - state = 'a.' - else: - self.charstack.append(nextchar) - break # emit token - - if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or - token[-1] in '.,')): - l = self._split_decimal.split(token) - token = l[0] - for tok in l[1:]: - if tok: - self.tokenstack.append(tok) - - if state == '0.' and token.count('.') == 0: - token = token.replace(',', '.') - - return token - - def __iter__(self): - return self - - def __next__(self): - token = self.get_token() - if token is None: - raise StopIteration - - return token - - def next(self): - return self.__next__() # Python 2.x support - - @classmethod - def split(cls, s): - return list(cls(s)) - - @classmethod - def isword(cls, nextchar): - """ Whether or not the next character is part of a word """ - return nextchar.isalpha() - - @classmethod - def isnum(cls, nextchar): - """ Whether the next character is part of a number """ - return nextchar.isdigit() - - @classmethod - def isspace(cls, nextchar): - """ Whether the next character is whitespace """ - return nextchar.isspace() - - -class _resultbase(object): - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def _repr(self, classname): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (classname, ", ".join(l)) - - def __len__(self): - return (sum(getattr(self, attr) is not None - for attr in self.__slots__)) - - def __repr__(self): - return self._repr(self.__class__.__name__) - - -class parserinfo(object): - """ - Class which handles what inputs are accepted. Subclass this to customize - the language and acceptable values for each parameter. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM - and YMD. Default is ``False``. - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken - to be the year, otherwise the last number is taken to be the year. - Default is ``False``. - """ - - # m from a.m/p.m, t from ISO T separator - JUMP = [" ", ".", ",", ";", "-", "/", "'", - "at", "on", "and", "ad", "m", "t", "of", - "st", "nd", "rd", "th"] - - WEEKDAYS = [("Mon", "Monday"), - ("Tue", "Tuesday"), # TODO: "Tues" - ("Wed", "Wednesday"), - ("Thu", "Thursday"), # TODO: "Thurs" - ("Fri", "Friday"), - ("Sat", "Saturday"), - ("Sun", "Sunday")] - MONTHS = [("Jan", "January"), - ("Feb", "February"), # TODO: "Febr" - ("Mar", "March"), - ("Apr", "April"), - ("May", "May"), - ("Jun", "June"), - ("Jul", "July"), - ("Aug", "August"), - ("Sep", "Sept", "September"), - ("Oct", "October"), - ("Nov", "November"), - ("Dec", "December")] - HMS = [("h", "hour", "hours"), - ("m", "minute", "minutes"), - ("s", "second", "seconds")] - AMPM = [("am", "a"), - ("pm", "p")] - UTCZONE = ["UTC", "GMT", "Z", "z"] - PERTAIN = ["of"] - TZOFFSET = {} - # TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate", - # "Anno Domini", "Year of Our Lord"] - - def __init__(self, dayfirst=False, yearfirst=False): - self._jump = self._convert(self.JUMP) - self._weekdays = self._convert(self.WEEKDAYS) - self._months = self._convert(self.MONTHS) - self._hms = self._convert(self.HMS) - self._ampm = self._convert(self.AMPM) - self._utczone = self._convert(self.UTCZONE) - self._pertain = self._convert(self.PERTAIN) - - self.dayfirst = dayfirst - self.yearfirst = yearfirst - - self._year = time.localtime().tm_year - self._century = self._year // 100 * 100 - - def _convert(self, lst): - dct = {} - for i, v in enumerate(lst): - if isinstance(v, tuple): - for v in v: - dct[v.lower()] = i - else: - dct[v.lower()] = i - return dct - - def jump(self, name): - return name.lower() in self._jump - - def weekday(self, name): - try: - return self._weekdays[name.lower()] - except KeyError: - pass - return None - - def month(self, name): - try: - return self._months[name.lower()] + 1 - except KeyError: - pass - return None - - def hms(self, name): - try: - return self._hms[name.lower()] - except KeyError: - return None - - def ampm(self, name): - try: - return self._ampm[name.lower()] - except KeyError: - return None - - def pertain(self, name): - return name.lower() in self._pertain - - def utczone(self, name): - return name.lower() in self._utczone - - def tzoffset(self, name): - if name in self._utczone: - return 0 - - return self.TZOFFSET.get(name) - - def convertyear(self, year, century_specified=False): - """ - Converts two-digit years to year within [-50, 49] - range of self._year (current local time) - """ - - # Function contract is that the year is always positive - assert year >= 0 - - if year < 100 and not century_specified: - # assume current century to start - year += self._century - - if year >= self._year + 50: # if too far in future - year -= 100 - elif year < self._year - 50: # if too far in past - year += 100 - - return year - - def validate(self, res): - # move to info - if res.year is not None: - res.year = self.convertyear(res.year, res.century_specified) - - if ((res.tzoffset == 0 and not res.tzname) or - (res.tzname == 'Z' or res.tzname == 'z')): - res.tzname = "UTC" - res.tzoffset = 0 - elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname): - res.tzoffset = 0 - return True - - -class _ymd(list): - def __init__(self, *args, **kwargs): - super(self.__class__, self).__init__(*args, **kwargs) - self.century_specified = False - self.dstridx = None - self.mstridx = None - self.ystridx = None - - @property - def has_year(self): - return self.ystridx is not None - - @property - def has_month(self): - return self.mstridx is not None - - @property - def has_day(self): - return self.dstridx is not None - - def could_be_day(self, value): - if self.has_day: - return False - elif not self.has_month: - return 1 <= value <= 31 - elif not self.has_year: - # Be permissive, assume leap year - month = self[self.mstridx] - return 1 <= value <= monthrange(2000, month)[1] - else: - month = self[self.mstridx] - year = self[self.ystridx] - return 1 <= value <= monthrange(year, month)[1] - - def append(self, val, label=None): - if hasattr(val, '__len__'): - if val.isdigit() and len(val) > 2: - self.century_specified = True - if label not in [None, 'Y']: # pragma: no cover - raise ValueError(label) - label = 'Y' - elif val > 100: - self.century_specified = True - if label not in [None, 'Y']: # pragma: no cover - raise ValueError(label) - label = 'Y' - - super(self.__class__, self).append(int(val)) - - if label == 'M': - if self.has_month: - raise ValueError('Month is already set') - self.mstridx = len(self) - 1 - elif label == 'D': - if self.has_day: - raise ValueError('Day is already set') - self.dstridx = len(self) - 1 - elif label == 'Y': - if self.has_year: - raise ValueError('Year is already set') - self.ystridx = len(self) - 1 - - def _resolve_from_stridxs(self, strids): - """ - Try to resolve the identities of year/month/day elements using - ystridx, mstridx, and dstridx, if enough of these are specified. - """ - if len(self) == 3 and len(strids) == 2: - # we can back out the remaining stridx value - missing = [x for x in range(3) if x not in strids.values()] - key = [x for x in ['y', 'm', 'd'] if x not in strids] - assert len(missing) == len(key) == 1 - key = key[0] - val = missing[0] - strids[key] = val - - assert len(self) == len(strids) # otherwise this should not be called - out = {key: self[strids[key]] for key in strids} - return (out.get('y'), out.get('m'), out.get('d')) - - def resolve_ymd(self, yearfirst, dayfirst): - len_ymd = len(self) - year, month, day = (None, None, None) - - strids = (('y', self.ystridx), - ('m', self.mstridx), - ('d', self.dstridx)) - - strids = {key: val for key, val in strids if val is not None} - if (len(self) == len(strids) > 0 or - (len(self) == 3 and len(strids) == 2)): - return self._resolve_from_stridxs(strids) - - mstridx = self.mstridx - - if len_ymd > 3: - raise ValueError("More than three YMD values") - elif len_ymd == 1 or (mstridx is not None and len_ymd == 2): - # One member, or two members with a month string - if mstridx is not None: - month = self[mstridx] - # since mstridx is 0 or 1, self[mstridx-1] always - # looks up the other element - other = self[mstridx - 1] - else: - other = self[0] - - if len_ymd > 1 or mstridx is None: - if other > 31: - year = other - else: - day = other - - elif len_ymd == 2: - # Two members with numbers - if self[0] > 31: - # 99-01 - year, month = self - elif self[1] > 31: - # 01-99 - month, year = self - elif dayfirst and self[1] <= 12: - # 13-01 - day, month = self - else: - # 01-13 - month, day = self - - elif len_ymd == 3: - # Three members - if mstridx == 0: - if self[1] > 31: - # Apr-2003-25 - month, year, day = self - else: - month, day, year = self - elif mstridx == 1: - if self[0] > 31 or (yearfirst and self[2] <= 31): - # 99-Jan-01 - year, month, day = self - else: - # 01-Jan-01 - # Give precedence to day-first, since - # two-digit years is usually hand-written. - day, month, year = self - - elif mstridx == 2: - # WTF!? - if self[1] > 31: - # 01-99-Jan - day, year, month = self - else: - # 99-01-Jan - year, day, month = self - - else: - if (self[0] > 31 or - self.ystridx == 0 or - (yearfirst and self[1] <= 12 and self[2] <= 31)): - # 99-01-01 - if dayfirst and self[2] <= 12: - year, day, month = self - else: - year, month, day = self - elif self[0] > 12 or (dayfirst and self[1] <= 12): - # 13-01-01 - day, month, year = self - else: - # 01-13-01 - month, day, year = self - - return year, month, day - - -class parser(object): - def __init__(self, info=None): - self.info = info or parserinfo() - - def parse(self, timestr, default=None, - ignoretz=False, tzinfos=None, **kwargs): - """ - Parse the date/time string into a :class:`datetime.datetime` object. - - :param timestr: - Any date/time string using the supported formats. - - :param default: - The default datetime object, if this is a datetime object and not - ``None``, elements specified in ``timestr`` replace elements in the - default object. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a - naive :class:`datetime.datetime` object is returned. - - :param tzinfos: - Additional time zone names / aliases which may be present in the - string. This argument maps time zone names (and optionally offsets - from those time zones) to time zones. This parameter can be a - dictionary with timezone aliases mapping time zone names to time - zones or a function taking two parameters (``tzname`` and - ``tzoffset``) and returning a time zone. - - The timezones to which the names are mapped can be an integer - offset from UTC in seconds or a :class:`tzinfo` object. - - .. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from dateutil.parser import parse - >>> from dateutil.tz import gettz - >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} - >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) - >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, - tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) - - This parameter is ignored if ``ignoretz`` is set. - - :param \\*\\*kwargs: - Keyword arguments as passed to ``_parse()``. - - :return: - Returns a :class:`datetime.datetime` object or, if the - ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the - first element being a :class:`datetime.datetime` object, the second - a tuple containing the fuzzy tokens. - - :raises ParserError: - Raised for invalid or unknown string format, if the provided - :class:`tzinfo` is not in a valid format, or if an invalid date - would be created. - - :raises TypeError: - Raised for non-string or character stream input. - - :raises OverflowError: - Raised if the parsed date exceeds the largest valid C integer on - your system. - """ - - if default is None: - default = datetime.datetime.now().replace(hour=0, minute=0, - second=0, microsecond=0) - - res, skipped_tokens = self._parse(timestr, **kwargs) - - if res is None: - raise ParserError("Unknown string format: %s", timestr) - - if len(res) == 0: - raise ParserError("String does not contain a date: %s", timestr) - - try: - ret = self._build_naive(res, default) - except ValueError as e: - six.raise_from(ParserError(e.args[0] + ": %s", timestr), e) - - if not ignoretz: - ret = self._build_tzaware(ret, res, tzinfos) - - if kwargs.get('fuzzy_with_tokens', False): - return ret, skipped_tokens - else: - return ret - - class _result(_resultbase): - __slots__ = ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond", - "tzname", "tzoffset", "ampm","any_unused_tokens"] - - def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False, - fuzzy_with_tokens=False): - """ - Private method which performs the heavy lifting of parsing, called from - ``parse()``, which passes on its ``kwargs`` to this function. - - :param timestr: - The string to parse. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM - and YMD. If set to ``None``, this value is retrieved from the - current :class:`parserinfo` object (which itself defaults to - ``False``). - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken - to be the year, otherwise the last number is taken to be the year. - If this is set to ``None``, the value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param fuzzy: - Whether to allow fuzzy parsing, allowing for string like "Today is - January 1, 2047 at 8:21:00AM". - - :param fuzzy_with_tokens: - If ``True``, ``fuzzy`` is automatically set to True, and the parser - will return a tuple where the first element is the parsed - :class:`datetime.datetime` datetimestamp and the second element is - a tuple containing the portions of the string which were ignored: - - .. doctest:: - - >>> from dateutil.parser import parse - >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) - (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) - - """ - if fuzzy_with_tokens: - fuzzy = True - - info = self.info - - if dayfirst is None: - dayfirst = info.dayfirst - - if yearfirst is None: - yearfirst = info.yearfirst - - res = self._result() - l = _timelex.split(timestr) # Splits the timestr into tokens - - skipped_idxs = [] - - # year/month/day list - ymd = _ymd() - - len_l = len(l) - i = 0 - try: - while i < len_l: - - # Check if it's a number - value_repr = l[i] - try: - value = float(value_repr) - except ValueError: - value = None - - if value is not None: - # Numeric token - i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy) - - # Check weekday - elif info.weekday(l[i]) is not None: - value = info.weekday(l[i]) - res.weekday = value - - # Check month name - elif info.month(l[i]) is not None: - value = info.month(l[i]) - ymd.append(value, 'M') - - if i + 1 < len_l: - if l[i + 1] in ('-', '/'): - # Jan-01[-99] - sep = l[i + 1] - ymd.append(l[i + 2]) - - if i + 3 < len_l and l[i + 3] == sep: - # Jan-01-99 - ymd.append(l[i + 4]) - i += 2 - - i += 2 - - elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and - info.pertain(l[i + 2])): - # Jan of 01 - # In this case, 01 is clearly year - if l[i + 4].isdigit(): - # Convert it here to become unambiguous - value = int(l[i + 4]) - year = str(info.convertyear(value)) - ymd.append(year, 'Y') - else: - # Wrong guess - pass - # TODO: not hit in tests - i += 4 - - # Check am/pm - elif info.ampm(l[i]) is not None: - value = info.ampm(l[i]) - val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy) - - if val_is_ampm: - res.hour = self._adjust_ampm(res.hour, value) - res.ampm = value - - elif fuzzy: - skipped_idxs.append(i) - - # Check for a timezone name - elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]): - res.tzname = l[i] - res.tzoffset = info.tzoffset(res.tzname) - - # Check for something like GMT+3, or BRST+3. Notice - # that it doesn't mean "I am 3 hours after GMT", but - # "my time +3 is GMT". If found, we reverse the - # logic so that timezone parsing code will get it - # right. - if i + 1 < len_l and l[i + 1] in ('+', '-'): - l[i + 1] = ('+', '-')[l[i + 1] == '+'] - res.tzoffset = None - if info.utczone(res.tzname): - # With something like GMT+3, the timezone - # is *not* GMT. - res.tzname = None - - # Check for a numbered timezone - elif res.hour is not None and l[i] in ('+', '-'): - signal = (-1, 1)[l[i] == '+'] - len_li = len(l[i + 1]) - - # TODO: check that l[i + 1] is integer? - if len_li == 4: - # -0300 - hour_offset = int(l[i + 1][:2]) - min_offset = int(l[i + 1][2:]) - elif i + 2 < len_l and l[i + 2] == ':': - # -03:00 - hour_offset = int(l[i + 1]) - min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like? - i += 2 - elif len_li <= 2: - # -[0]3 - hour_offset = int(l[i + 1][:2]) - min_offset = 0 - else: - raise ValueError(timestr) - - res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60) - - # Look for a timezone name between parenthesis - if (i + 5 < len_l and - info.jump(l[i + 2]) and l[i + 3] == '(' and - l[i + 5] == ')' and - 3 <= len(l[i + 4]) and - self._could_be_tzname(res.hour, res.tzname, - None, l[i + 4])): - # -0300 (BRST) - res.tzname = l[i + 4] - i += 4 - - i += 1 - - # Check jumps - elif not (info.jump(l[i]) or fuzzy): - raise ValueError(timestr) - - else: - skipped_idxs.append(i) - i += 1 - - # Process year/month/day - year, month, day = ymd.resolve_ymd(yearfirst, dayfirst) - - res.century_specified = ymd.century_specified - res.year = year - res.month = month - res.day = day - - except (IndexError, ValueError): - return None, None - - if not info.validate(res): - return None, None - - if fuzzy_with_tokens: - skipped_tokens = self._recombine_skipped(l, skipped_idxs) - return res, tuple(skipped_tokens) - else: - return res, None - - def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy): - # Token is a number - value_repr = tokens[idx] - try: - value = self._to_decimal(value_repr) - except Exception as e: - six.raise_from(ValueError('Unknown numeric token'), e) - - len_li = len(value_repr) - - len_l = len(tokens) - - if (len(ymd) == 3 and len_li in (2, 4) and - res.hour is None and - (idx + 1 >= len_l or - (tokens[idx + 1] != ':' and - info.hms(tokens[idx + 1]) is None))): - # 19990101T23[59] - s = tokens[idx] - res.hour = int(s[:2]) - - if len_li == 4: - res.minute = int(s[2:]) - - elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6): - # YYMMDD or HHMMSS[.ss] - s = tokens[idx] - - if not ymd and '.' not in tokens[idx]: - ymd.append(s[:2]) - ymd.append(s[2:4]) - ymd.append(s[4:]) - else: - # 19990101T235959[.59] - - # TODO: Check if res attributes already set. - res.hour = int(s[:2]) - res.minute = int(s[2:4]) - res.second, res.microsecond = self._parsems(s[4:]) - - elif len_li in (8, 12, 14): - # YYYYMMDD - s = tokens[idx] - ymd.append(s[:4], 'Y') - ymd.append(s[4:6]) - ymd.append(s[6:8]) - - if len_li > 8: - res.hour = int(s[8:10]) - res.minute = int(s[10:12]) - - if len_li > 12: - res.second = int(s[12:]) - - elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None: - # HH[ ]h or MM[ ]m or SS[.ss][ ]s - hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True) - (idx, hms) = self._parse_hms(idx, tokens, info, hms_idx) - if hms is not None: - # TODO: checking that hour/minute/second are not - # already set? - self._assign_hms(res, value_repr, hms) - - elif idx + 2 < len_l and tokens[idx + 1] == ':': - # HH:MM[:SS[.ss]] - res.hour = int(value) - value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this? - (res.minute, res.second) = self._parse_min_sec(value) - - if idx + 4 < len_l and tokens[idx + 3] == ':': - res.second, res.microsecond = self._parsems(tokens[idx + 4]) - - idx += 2 - - idx += 2 - - elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'): - sep = tokens[idx + 1] - ymd.append(value_repr) - - if idx + 2 < len_l and not info.jump(tokens[idx + 2]): - if tokens[idx + 2].isdigit(): - # 01-01[-01] - ymd.append(tokens[idx + 2]) - else: - # 01-Jan[-01] - value = info.month(tokens[idx + 2]) - - if value is not None: - ymd.append(value, 'M') - else: - raise ValueError() - - if idx + 3 < len_l and tokens[idx + 3] == sep: - # We have three members - value = info.month(tokens[idx + 4]) - - if value is not None: - ymd.append(value, 'M') - else: - ymd.append(tokens[idx + 4]) - idx += 2 - - idx += 1 - idx += 1 - - elif idx + 1 >= len_l or info.jump(tokens[idx + 1]): - if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None: - # 12 am - hour = int(value) - res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2])) - idx += 1 - else: - # Year, month or day - ymd.append(value) - idx += 1 - - elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24): - # 12am - hour = int(value) - res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1])) - idx += 1 - - elif ymd.could_be_day(value): - ymd.append(value) - - elif not fuzzy: - raise ValueError() - - return idx - - def _find_hms_idx(self, idx, tokens, info, allow_jump): - len_l = len(tokens) - - if idx+1 < len_l and info.hms(tokens[idx+1]) is not None: - # There is an "h", "m", or "s" label following this token. We take - # assign the upcoming label to the current token. - # e.g. the "12" in 12h" - hms_idx = idx + 1 - - elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and - info.hms(tokens[idx+2]) is not None): - # There is a space and then an "h", "m", or "s" label. - # e.g. the "12" in "12 h" - hms_idx = idx + 2 - - elif idx > 0 and info.hms(tokens[idx-1]) is not None: - # There is a "h", "m", or "s" preceding this token. Since neither - # of the previous cases was hit, there is no label following this - # token, so we use the previous label. - # e.g. the "04" in "12h04" - hms_idx = idx-1 - - elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and - info.hms(tokens[idx-2]) is not None): - # If we are looking at the final token, we allow for a - # backward-looking check to skip over a space. - # TODO: Are we sure this is the right condition here? - hms_idx = idx - 2 - - else: - hms_idx = None - - return hms_idx - - def _assign_hms(self, res, value_repr, hms): - # See GH issue #427, fixing float rounding - value = self._to_decimal(value_repr) - - if hms == 0: - # Hour - res.hour = int(value) - if value % 1: - res.minute = int(60*(value % 1)) - - elif hms == 1: - (res.minute, res.second) = self._parse_min_sec(value) - - elif hms == 2: - (res.second, res.microsecond) = self._parsems(value_repr) - - def _could_be_tzname(self, hour, tzname, tzoffset, token): - return (hour is not None and - tzname is None and - tzoffset is None and - len(token) <= 5 and - (all(x in string.ascii_uppercase for x in token) - or token in self.info.UTCZONE)) - - def _ampm_valid(self, hour, ampm, fuzzy): - """ - For fuzzy parsing, 'a' or 'am' (both valid English words) - may erroneously trigger the AM/PM flag. Deal with that - here. - """ - val_is_ampm = True - - # If there's already an AM/PM flag, this one isn't one. - if fuzzy and ampm is not None: - val_is_ampm = False - - # If AM/PM is found and hour is not, raise a ValueError - if hour is None: - if fuzzy: - val_is_ampm = False - else: - raise ValueError('No hour specified with AM or PM flag.') - elif not 0 <= hour <= 12: - # If AM/PM is found, it's a 12 hour clock, so raise - # an error for invalid range - if fuzzy: - val_is_ampm = False - else: - raise ValueError('Invalid hour specified for 12-hour clock.') - - return val_is_ampm - - def _adjust_ampm(self, hour, ampm): - if hour < 12 and ampm == 1: - hour += 12 - elif hour == 12 and ampm == 0: - hour = 0 - return hour - - def _parse_min_sec(self, value): - # TODO: Every usage of this function sets res.second to the return - # value. Are there any cases where second will be returned as None and - # we *don't* want to set res.second = None? - minute = int(value) - second = None - - sec_remainder = value % 1 - if sec_remainder: - second = int(60 * sec_remainder) - return (minute, second) - - def _parse_hms(self, idx, tokens, info, hms_idx): - # TODO: Is this going to admit a lot of false-positives for when we - # just happen to have digits and "h", "m" or "s" characters in non-date - # text? I guess hex hashes won't have that problem, but there's plenty - # of random junk out there. - if hms_idx is None: - hms = None - new_idx = idx - elif hms_idx > idx: - hms = info.hms(tokens[hms_idx]) - new_idx = hms_idx - else: - # Looking backwards, increment one. - hms = info.hms(tokens[hms_idx]) + 1 - new_idx = idx - - return (new_idx, hms) - - # ------------------------------------------------------------------ - # Handling for individual tokens. These are kept as methods instead - # of functions for the sake of customizability via subclassing. - - def _parsems(self, value): - """Parse a I[.F] seconds value into (seconds, microseconds).""" - if "." not in value: - return int(value), 0 - else: - i, f = value.split(".") - return int(i), int(f.ljust(6, "0")[:6]) - - def _to_decimal(self, val): - try: - decimal_value = Decimal(val) - # See GH 662, edge case, infinite value should not be converted - # via `_to_decimal` - if not decimal_value.is_finite(): - raise ValueError("Converted decimal value is infinite or NaN") - except Exception as e: - msg = "Could not convert %s to decimal" % val - six.raise_from(ValueError(msg), e) - else: - return decimal_value - - # ------------------------------------------------------------------ - # Post-Parsing construction of datetime output. These are kept as - # methods instead of functions for the sake of customizability via - # subclassing. - - def _build_tzinfo(self, tzinfos, tzname, tzoffset): - if callable(tzinfos): - tzdata = tzinfos(tzname, tzoffset) - else: - tzdata = tzinfos.get(tzname) - # handle case where tzinfo is paased an options that returns None - # eg tzinfos = {'BRST' : None} - if isinstance(tzdata, datetime.tzinfo) or tzdata is None: - tzinfo = tzdata - elif isinstance(tzdata, text_type): - tzinfo = tz.tzstr(tzdata) - elif isinstance(tzdata, integer_types): - tzinfo = tz.tzoffset(tzname, tzdata) - else: - raise TypeError("Offset must be tzinfo subclass, tz string, " - "or int offset.") - return tzinfo - - def _build_tzaware(self, naive, res, tzinfos): - if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)): - tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset) - aware = naive.replace(tzinfo=tzinfo) - aware = self._assign_tzname(aware, res.tzname) - - elif res.tzname and res.tzname in time.tzname: - aware = naive.replace(tzinfo=tz.tzlocal()) - - # Handle ambiguous local datetime - aware = self._assign_tzname(aware, res.tzname) - - # This is mostly relevant for winter GMT zones parsed in the UK - if (aware.tzname() != res.tzname and - res.tzname in self.info.UTCZONE): - aware = aware.replace(tzinfo=tz.UTC) - - elif res.tzoffset == 0: - aware = naive.replace(tzinfo=tz.UTC) - - elif res.tzoffset: - aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset)) - - elif not res.tzname and not res.tzoffset: - # i.e. no timezone information was found. - aware = naive - - elif res.tzname: - # tz-like string was parsed but we don't know what to do - # with it - warnings.warn("tzname {tzname} identified but not understood. " - "Pass `tzinfos` argument in order to correctly " - "return a timezone-aware datetime. In a future " - "version, this will raise an " - "exception.".format(tzname=res.tzname), - category=UnknownTimezoneWarning) - aware = naive - - return aware - - def _build_naive(self, res, default): - repl = {} - for attr in ("year", "month", "day", "hour", - "minute", "second", "microsecond"): - value = getattr(res, attr) - if value is not None: - repl[attr] = value - - if 'day' not in repl: - # If the default day exceeds the last day of the month, fall back - # to the end of the month. - cyear = default.year if res.year is None else res.year - cmonth = default.month if res.month is None else res.month - cday = default.day if res.day is None else res.day - - if cday > monthrange(cyear, cmonth)[1]: - repl['day'] = monthrange(cyear, cmonth)[1] - - naive = default.replace(**repl) - - if res.weekday is not None and not res.day: - naive = naive + relativedelta.relativedelta(weekday=res.weekday) - - return naive - - def _assign_tzname(self, dt, tzname): - if dt.tzname() != tzname: - new_dt = tz.enfold(dt, fold=1) - if new_dt.tzname() == tzname: - return new_dt - - return dt - - def _recombine_skipped(self, tokens, skipped_idxs): - """ - >>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"] - >>> skipped_idxs = [0, 1, 2, 5] - >>> _recombine_skipped(tokens, skipped_idxs) - ["foo bar", "baz"] - """ - skipped_tokens = [] - for i, idx in enumerate(sorted(skipped_idxs)): - if i > 0 and idx - 1 == skipped_idxs[i - 1]: - skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx] - else: - skipped_tokens.append(tokens[idx]) - - return skipped_tokens - - -DEFAULTPARSER = parser() - - -def parse(timestr, parserinfo=None, **kwargs): - """ - - Parse a string in one of the supported formats, using the - ``parserinfo`` parameters. - - :param timestr: - A string containing a date/time stamp. - - :param parserinfo: - A :class:`parserinfo` object containing parameters for the parser. - If ``None``, the default arguments to the :class:`parserinfo` - constructor are used. - - The ``**kwargs`` parameter takes the following keyword arguments: - - :param default: - The default datetime object, if this is a datetime object and not - ``None``, elements specified in ``timestr`` replace elements in the - default object. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a naive - :class:`datetime` object is returned. - - :param tzinfos: - Additional time zone names / aliases which may be present in the - string. This argument maps time zone names (and optionally offsets - from those time zones) to time zones. This parameter can be a - dictionary with timezone aliases mapping time zone names to time - zones or a function taking two parameters (``tzname`` and - ``tzoffset``) and returning a time zone. - - The timezones to which the names are mapped can be an integer - offset from UTC in seconds or a :class:`tzinfo` object. - - .. doctest:: - :options: +NORMALIZE_WHITESPACE - - >>> from dateutil.parser import parse - >>> from dateutil.tz import gettz - >>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")} - >>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200)) - >>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos) - datetime.datetime(2012, 1, 19, 17, 21, - tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago')) - - This parameter is ignored if ``ignoretz`` is set. - - :param dayfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the day (``True``) or month (``False``). If - ``yearfirst`` is set to ``True``, this distinguishes between YDM and - YMD. If set to ``None``, this value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param yearfirst: - Whether to interpret the first value in an ambiguous 3-integer date - (e.g. 01/05/09) as the year. If ``True``, the first number is taken to - be the year, otherwise the last number is taken to be the year. If - this is set to ``None``, the value is retrieved from the current - :class:`parserinfo` object (which itself defaults to ``False``). - - :param fuzzy: - Whether to allow fuzzy parsing, allowing for string like "Today is - January 1, 2047 at 8:21:00AM". - - :param fuzzy_with_tokens: - If ``True``, ``fuzzy`` is automatically set to True, and the parser - will return a tuple where the first element is the parsed - :class:`datetime.datetime` datetimestamp and the second element is - a tuple containing the portions of the string which were ignored: - - .. doctest:: - - >>> from dateutil.parser import parse - >>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True) - (datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at ')) - - :return: - Returns a :class:`datetime.datetime` object or, if the - ``fuzzy_with_tokens`` option is ``True``, returns a tuple, the - first element being a :class:`datetime.datetime` object, the second - a tuple containing the fuzzy tokens. - - :raises ValueError: - Raised for invalid or unknown string format, if the provided - :class:`tzinfo` is not in a valid format, or if an invalid date - would be created. - - :raises OverflowError: - Raised if the parsed date exceeds the largest valid C integer on - your system. - """ - if parserinfo: - return parser(parserinfo).parse(timestr, **kwargs) - else: - return DEFAULTPARSER.parse(timestr, **kwargs) - - -class _tzparser(object): - - class _result(_resultbase): - - __slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset", - "start", "end"] - - class _attr(_resultbase): - __slots__ = ["month", "week", "weekday", - "yday", "jyday", "day", "time"] - - def __repr__(self): - return self._repr("") - - def __init__(self): - _resultbase.__init__(self) - self.start = self._attr() - self.end = self._attr() - - def parse(self, tzstr): - res = self._result() - l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x] - used_idxs = list() - try: - - len_l = len(l) - - i = 0 - while i < len_l: - # BRST+3[BRDT[+2]] - j = i - while j < len_l and not [x for x in l[j] - if x in "0123456789:,-+"]: - j += 1 - if j != i: - if not res.stdabbr: - offattr = "stdoffset" - res.stdabbr = "".join(l[i:j]) - else: - offattr = "dstoffset" - res.dstabbr = "".join(l[i:j]) - - for ii in range(j): - used_idxs.append(ii) - i = j - if (i < len_l and (l[i] in ('+', '-') or l[i][0] in - "0123456789")): - if l[i] in ('+', '-'): - # Yes, that's right. See the TZ variable - # documentation. - signal = (1, -1)[l[i] == '+'] - used_idxs.append(i) - i += 1 - else: - signal = -1 - len_li = len(l[i]) - if len_li == 4: - # -0300 - setattr(res, offattr, (int(l[i][:2]) * 3600 + - int(l[i][2:]) * 60) * signal) - elif i + 1 < len_l and l[i + 1] == ':': - # -03:00 - setattr(res, offattr, - (int(l[i]) * 3600 + - int(l[i + 2]) * 60) * signal) - used_idxs.append(i) - i += 2 - elif len_li <= 2: - # -[0]3 - setattr(res, offattr, - int(l[i][:2]) * 3600 * signal) - else: - return None - used_idxs.append(i) - i += 1 - if res.dstabbr: - break - else: - break - - - if i < len_l: - for j in range(i, len_l): - if l[j] == ';': - l[j] = ',' - - assert l[i] == ',' - - i += 1 - - if i >= len_l: - pass - elif (8 <= l.count(',') <= 9 and - not [y for x in l[i:] if x != ',' - for y in x if y not in "0123456789+-"]): - # GMT0BST,3,0,30,3600,10,0,26,7200[,3600] - for x in (res.start, res.end): - x.month = int(l[i]) - used_idxs.append(i) - i += 2 - if l[i] == '-': - value = int(l[i + 1]) * -1 - used_idxs.append(i) - i += 1 - else: - value = int(l[i]) - used_idxs.append(i) - i += 2 - if value: - x.week = value - x.weekday = (int(l[i]) - 1) % 7 - else: - x.day = int(l[i]) - used_idxs.append(i) - i += 2 - x.time = int(l[i]) - used_idxs.append(i) - i += 2 - if i < len_l: - if l[i] in ('-', '+'): - signal = (-1, 1)[l[i] == "+"] - used_idxs.append(i) - i += 1 - else: - signal = 1 - used_idxs.append(i) - res.dstoffset = (res.stdoffset + int(l[i]) * signal) - - # This was a made-up format that is not in normal use - warn(('Parsed time zone "%s"' % tzstr) + - 'is in a non-standard dateutil-specific format, which ' + - 'is now deprecated; support for parsing this format ' + - 'will be removed in future versions. It is recommended ' + - 'that you switch to a standard format like the GNU ' + - 'TZ variable format.', tz.DeprecatedTzFormatWarning) - elif (l.count(',') == 2 and l[i:].count('/') <= 2 and - not [y for x in l[i:] if x not in (',', '/', 'J', 'M', - '.', '-', ':') - for y in x if y not in "0123456789"]): - for x in (res.start, res.end): - if l[i] == 'J': - # non-leap year day (1 based) - used_idxs.append(i) - i += 1 - x.jyday = int(l[i]) - elif l[i] == 'M': - # month[-.]week[-.]weekday - used_idxs.append(i) - i += 1 - x.month = int(l[i]) - used_idxs.append(i) - i += 1 - assert l[i] in ('-', '.') - used_idxs.append(i) - i += 1 - x.week = int(l[i]) - if x.week == 5: - x.week = -1 - used_idxs.append(i) - i += 1 - assert l[i] in ('-', '.') - used_idxs.append(i) - i += 1 - x.weekday = (int(l[i]) - 1) % 7 - else: - # year day (zero based) - x.yday = int(l[i]) + 1 - - used_idxs.append(i) - i += 1 - - if i < len_l and l[i] == '/': - used_idxs.append(i) - i += 1 - # start time - len_li = len(l[i]) - if len_li == 4: - # -0300 - x.time = (int(l[i][:2]) * 3600 + - int(l[i][2:]) * 60) - elif i + 1 < len_l and l[i + 1] == ':': - # -03:00 - x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60 - used_idxs.append(i) - i += 2 - if i + 1 < len_l and l[i + 1] == ':': - used_idxs.append(i) - i += 2 - x.time += int(l[i]) - elif len_li <= 2: - # -[0]3 - x.time = (int(l[i][:2]) * 3600) - else: - return None - used_idxs.append(i) - i += 1 - - assert i == len_l or l[i] == ',' - - i += 1 - - assert i >= len_l - - except (IndexError, ValueError, AssertionError): - return None - - unused_idxs = set(range(len_l)).difference(used_idxs) - res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"}) - return res - - -DEFAULTTZPARSER = _tzparser() - - -def _parsetz(tzstr): - return DEFAULTTZPARSER.parse(tzstr) - - -class ParserError(ValueError): - """Error class for representing failure to parse a datetime string.""" - def __str__(self): - try: - return self.args[0] % self.args[1:] - except (TypeError, IndexError): - return super(ParserError, self).__str__() - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, str(self)) - - -class UnknownTimezoneWarning(RuntimeWarning): - """Raised when the parser finds a timezone it cannot parse into a tzinfo""" -# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.7/site-packages/dateutil/parser/isoparser.py b/venv/lib/python3.7/site-packages/dateutil/parser/isoparser.py deleted file mode 100644 index 48f86a3..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/parser/isoparser.py +++ /dev/null @@ -1,411 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers a parser for ISO-8601 strings - -It is intended to support all valid date, time and datetime formats per the -ISO-8601 specification. - -..versionadded:: 2.7.0 -""" -from datetime import datetime, timedelta, time, date -import calendar -from dateutil import tz - -from functools import wraps - -import re -import six - -__all__ = ["isoparse", "isoparser"] - - -def _takes_ascii(f): - @wraps(f) - def func(self, str_in, *args, **kwargs): - # If it's a stream, read the whole thing - str_in = getattr(str_in, 'read', lambda: str_in)() - - # If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII - if isinstance(str_in, six.text_type): - # ASCII is the same in UTF-8 - try: - str_in = str_in.encode('ascii') - except UnicodeEncodeError as e: - msg = 'ISO-8601 strings should contain only ASCII characters' - six.raise_from(ValueError(msg), e) - - return f(self, str_in, *args, **kwargs) - - return func - - -class isoparser(object): - def __init__(self, sep=None): - """ - :param sep: - A single character that separates date and time portions. If - ``None``, the parser will accept any single character. - For strict ISO-8601 adherence, pass ``'T'``. - """ - if sep is not None: - if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'): - raise ValueError('Separator must be a single, non-numeric ' + - 'ASCII character') - - sep = sep.encode('ascii') - - self._sep = sep - - @_takes_ascii - def isoparse(self, dt_str): - """ - Parse an ISO-8601 datetime string into a :class:`datetime.datetime`. - - An ISO-8601 datetime string consists of a date portion, followed - optionally by a time portion - the date and time portions are separated - by a single character separator, which is ``T`` in the official - standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be - combined with a time portion. - - Supported date formats are: - - Common: - - - ``YYYY`` - - ``YYYY-MM`` or ``YYYYMM`` - - ``YYYY-MM-DD`` or ``YYYYMMDD`` - - Uncommon: - - - ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0) - - ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day - - The ISO week and day numbering follows the same logic as - :func:`datetime.date.isocalendar`. - - Supported time formats are: - - - ``hh`` - - ``hh:mm`` or ``hhmm`` - - ``hh:mm:ss`` or ``hhmmss`` - - ``hh:mm:ss.ssssss`` (Up to 6 sub-second digits) - - Midnight is a special case for `hh`, as the standard supports both - 00:00 and 24:00 as a representation. The decimal separator can be - either a dot or a comma. - - - .. caution:: - - Support for fractional components other than seconds is part of the - ISO-8601 standard, but is not currently implemented in this parser. - - Supported time zone offset formats are: - - - `Z` (UTC) - - `±HH:MM` - - `±HHMM` - - `±HH` - - Offsets will be represented as :class:`dateutil.tz.tzoffset` objects, - with the exception of UTC, which will be represented as - :class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such - as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`. - - :param dt_str: - A string or stream containing only an ISO-8601 datetime string - - :return: - Returns a :class:`datetime.datetime` representing the string. - Unspecified components default to their lowest value. - - .. warning:: - - As of version 2.7.0, the strictness of the parser should not be - considered a stable part of the contract. Any valid ISO-8601 string - that parses correctly with the default settings will continue to - parse correctly in future versions, but invalid strings that - currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not - guaranteed to continue failing in future versions if they encode - a valid date. - - .. versionadded:: 2.7.0 - """ - components, pos = self._parse_isodate(dt_str) - - if len(dt_str) > pos: - if self._sep is None or dt_str[pos:pos + 1] == self._sep: - components += self._parse_isotime(dt_str[pos + 1:]) - else: - raise ValueError('String contains unknown ISO components') - - if len(components) > 3 and components[3] == 24: - components[3] = 0 - return datetime(*components) + timedelta(days=1) - - return datetime(*components) - - @_takes_ascii - def parse_isodate(self, datestr): - """ - Parse the date portion of an ISO string. - - :param datestr: - The string portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.date` object - """ - components, pos = self._parse_isodate(datestr) - if pos < len(datestr): - raise ValueError('String contains unknown ISO ' + - 'components: {}'.format(datestr)) - return date(*components) - - @_takes_ascii - def parse_isotime(self, timestr): - """ - Parse the time portion of an ISO string. - - :param timestr: - The time portion of an ISO string, without a separator - - :return: - Returns a :class:`datetime.time` object - """ - components = self._parse_isotime(timestr) - if components[0] == 24: - components[0] = 0 - return time(*components) - - @_takes_ascii - def parse_tzstr(self, tzstr, zero_as_utc=True): - """ - Parse a valid ISO time zone string. - - See :func:`isoparser.isoparse` for details on supported formats. - - :param tzstr: - A string representing an ISO time zone offset - - :param zero_as_utc: - Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones - - :return: - Returns :class:`dateutil.tz.tzoffset` for offsets and - :class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is - specified) offsets equivalent to UTC. - """ - return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc) - - # Constants - _DATE_SEP = b'-' - _TIME_SEP = b':' - _FRACTION_REGEX = re.compile(b'[\\.,]([0-9]+)') - - def _parse_isodate(self, dt_str): - try: - return self._parse_isodate_common(dt_str) - except ValueError: - return self._parse_isodate_uncommon(dt_str) - - def _parse_isodate_common(self, dt_str): - len_str = len(dt_str) - components = [1, 1, 1] - - if len_str < 4: - raise ValueError('ISO string too short') - - # Year - components[0] = int(dt_str[0:4]) - pos = 4 - if pos >= len_str: - return components, pos - - has_sep = dt_str[pos:pos + 1] == self._DATE_SEP - if has_sep: - pos += 1 - - # Month - if len_str - pos < 2: - raise ValueError('Invalid common month') - - components[1] = int(dt_str[pos:pos + 2]) - pos += 2 - - if pos >= len_str: - if has_sep: - return components, pos - else: - raise ValueError('Invalid ISO format') - - if has_sep: - if dt_str[pos:pos + 1] != self._DATE_SEP: - raise ValueError('Invalid separator in ISO string') - pos += 1 - - # Day - if len_str - pos < 2: - raise ValueError('Invalid common day') - components[2] = int(dt_str[pos:pos + 2]) - return components, pos + 2 - - def _parse_isodate_uncommon(self, dt_str): - if len(dt_str) < 4: - raise ValueError('ISO string too short') - - # All ISO formats start with the year - year = int(dt_str[0:4]) - - has_sep = dt_str[4:5] == self._DATE_SEP - - pos = 4 + has_sep # Skip '-' if it's there - if dt_str[pos:pos + 1] == b'W': - # YYYY-?Www-?D? - pos += 1 - weekno = int(dt_str[pos:pos + 2]) - pos += 2 - - dayno = 1 - if len(dt_str) > pos: - if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep: - raise ValueError('Inconsistent use of dash separator') - - pos += has_sep - - dayno = int(dt_str[pos:pos + 1]) - pos += 1 - - base_date = self._calculate_weekdate(year, weekno, dayno) - else: - # YYYYDDD or YYYY-DDD - if len(dt_str) - pos < 3: - raise ValueError('Invalid ordinal day') - - ordinal_day = int(dt_str[pos:pos + 3]) - pos += 3 - - if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)): - raise ValueError('Invalid ordinal day' + - ' {} for year {}'.format(ordinal_day, year)) - - base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1) - - components = [base_date.year, base_date.month, base_date.day] - return components, pos - - def _calculate_weekdate(self, year, week, day): - """ - Calculate the day of corresponding to the ISO year-week-day calendar. - - This function is effectively the inverse of - :func:`datetime.date.isocalendar`. - - :param year: - The year in the ISO calendar - - :param week: - The week in the ISO calendar - range is [1, 53] - - :param day: - The day in the ISO calendar - range is [1 (MON), 7 (SUN)] - - :return: - Returns a :class:`datetime.date` - """ - if not 0 < week < 54: - raise ValueError('Invalid week: {}'.format(week)) - - if not 0 < day < 8: # Range is 1-7 - raise ValueError('Invalid weekday: {}'.format(day)) - - # Get week 1 for the specific year: - jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it - week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1) - - # Now add the specific number of weeks and days to get what we want - week_offset = (week - 1) * 7 + (day - 1) - return week_1 + timedelta(days=week_offset) - - def _parse_isotime(self, timestr): - len_str = len(timestr) - components = [0, 0, 0, 0, None] - pos = 0 - comp = -1 - - if len(timestr) < 2: - raise ValueError('ISO time too short') - - has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP - - while pos < len_str and comp < 5: - comp += 1 - - if timestr[pos:pos + 1] in b'-+Zz': - # Detect time zone boundary - components[-1] = self._parse_tzstr(timestr[pos:]) - pos = len_str - break - - if comp < 3: - # Hour, minute, second - components[comp] = int(timestr[pos:pos + 2]) - pos += 2 - if (has_sep and pos < len_str and - timestr[pos:pos + 1] == self._TIME_SEP): - pos += 1 - - if comp == 3: - # Fraction of a second - frac = self._FRACTION_REGEX.match(timestr[pos:]) - if not frac: - continue - - us_str = frac.group(1)[:6] # Truncate to microseconds - components[comp] = int(us_str) * 10**(6 - len(us_str)) - pos += len(frac.group()) - - if pos < len_str: - raise ValueError('Unused components in ISO string') - - if components[0] == 24: - # Standard supports 00:00 and 24:00 as representations of midnight - if any(component != 0 for component in components[1:4]): - raise ValueError('Hour may only be 24 at 24:00:00.000') - - return components - - def _parse_tzstr(self, tzstr, zero_as_utc=True): - if tzstr == b'Z' or tzstr == b'z': - return tz.UTC - - if len(tzstr) not in {3, 5, 6}: - raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters') - - if tzstr[0:1] == b'-': - mult = -1 - elif tzstr[0:1] == b'+': - mult = 1 - else: - raise ValueError('Time zone offset requires sign') - - hours = int(tzstr[1:3]) - if len(tzstr) == 3: - minutes = 0 - else: - minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):]) - - if zero_as_utc and hours == 0 and minutes == 0: - return tz.UTC - else: - if minutes > 59: - raise ValueError('Invalid minutes in time zone offset') - - if hours > 23: - raise ValueError('Invalid hours in time zone offset') - - return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60) - - -DEFAULT_ISOPARSER = isoparser() -isoparse = DEFAULT_ISOPARSER.isoparse diff --git a/venv/lib/python3.7/site-packages/dateutil/relativedelta.py b/venv/lib/python3.7/site-packages/dateutil/relativedelta.py deleted file mode 100644 index a9e85f7..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/relativedelta.py +++ /dev/null @@ -1,599 +0,0 @@ -# -*- coding: utf-8 -*- -import datetime -import calendar - -import operator -from math import copysign - -from six import integer_types -from warnings import warn - -from ._common import weekday - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - -__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - - -class relativedelta(object): - """ - The relativedelta type is designed to be applied to an existing datetime and - can replace specific components of that datetime, or represents an interval - of time. - - It is based on the specification of the excellent work done by M.-A. Lemburg - in his - `mx.DateTime `_ extension. - However, notice that this type does *NOT* implement the same algorithm as - his work. Do *NOT* expect it to behave like mx.DateTime's counterpart. - - There are two different ways to build a relativedelta instance. The - first one is passing it two date/datetime classes:: - - relativedelta(datetime1, datetime2) - - The second one is passing it any number of the following keyword arguments:: - - relativedelta(arg1=x,arg2=y,arg3=z...) - - year, month, day, hour, minute, second, microsecond: - Absolute information (argument is singular); adding or subtracting a - relativedelta with absolute information does not perform an arithmetic - operation, but rather REPLACES the corresponding value in the - original datetime with the value(s) in relativedelta. - - years, months, weeks, days, hours, minutes, seconds, microseconds: - Relative information, may be negative (argument is plural); adding - or subtracting a relativedelta with relative information performs - the corresponding arithmetic operation on the original datetime value - with the information in the relativedelta. - - weekday: - One of the weekday instances (MO, TU, etc) available in the - relativedelta module. These instances may receive a parameter N, - specifying the Nth weekday, which could be positive or negative - (like MO(+1) or MO(-2)). Not specifying it is the same as specifying - +1. You can also use an integer, where 0=MO. This argument is always - relative e.g. if the calculated date is already Monday, using MO(1) - or MO(-1) won't change the day. To effectively make it absolute, use - it in combination with the day argument (e.g. day=1, MO(1) for first - Monday of the month). - - leapdays: - Will add given days to the date found, if year is a leap - year, and the date found is post 28 of february. - - yearday, nlyearday: - Set the yearday or the non-leap year day (jump leap days). - These are converted to day/month/leapdays information. - - There are relative and absolute forms of the keyword - arguments. The plural is relative, and the singular is - absolute. For each argument in the order below, the absolute form - is applied first (by setting each attribute to that value) and - then the relative form (by adding the value to the attribute). - - The order of attributes considered when this relativedelta is - added to a datetime is: - - 1. Year - 2. Month - 3. Day - 4. Hours - 5. Minutes - 6. Seconds - 7. Microseconds - - Finally, weekday is applied, using the rule described above. - - For example - - >>> from datetime import datetime - >>> from dateutil.relativedelta import relativedelta, MO - >>> dt = datetime(2018, 4, 9, 13, 37, 0) - >>> delta = relativedelta(hours=25, day=1, weekday=MO(1)) - >>> dt + delta - datetime.datetime(2018, 4, 2, 14, 37) - - First, the day is set to 1 (the first of the month), then 25 hours - are added, to get to the 2nd day and 14th hour, finally the - weekday is applied, but since the 2nd is already a Monday there is - no effect. - - """ - - def __init__(self, dt1=None, dt2=None, - years=0, months=0, days=0, leapdays=0, weeks=0, - hours=0, minutes=0, seconds=0, microseconds=0, - year=None, month=None, day=None, weekday=None, - yearday=None, nlyearday=None, - hour=None, minute=None, second=None, microsecond=None): - - if dt1 and dt2: - # datetime is a subclass of date. So both must be date - if not (isinstance(dt1, datetime.date) and - isinstance(dt2, datetime.date)): - raise TypeError("relativedelta only diffs datetime/date") - - # We allow two dates, or two datetimes, so we coerce them to be - # of the same type - if (isinstance(dt1, datetime.datetime) != - isinstance(dt2, datetime.datetime)): - if not isinstance(dt1, datetime.datetime): - dt1 = datetime.datetime.fromordinal(dt1.toordinal()) - elif not isinstance(dt2, datetime.datetime): - dt2 = datetime.datetime.fromordinal(dt2.toordinal()) - - self.years = 0 - self.months = 0 - self.days = 0 - self.leapdays = 0 - self.hours = 0 - self.minutes = 0 - self.seconds = 0 - self.microseconds = 0 - self.year = None - self.month = None - self.day = None - self.weekday = None - self.hour = None - self.minute = None - self.second = None - self.microsecond = None - self._has_time = 0 - - # Get year / month delta between the two - months = (dt1.year - dt2.year) * 12 + (dt1.month - dt2.month) - self._set_months(months) - - # Remove the year/month delta so the timedelta is just well-defined - # time units (seconds, days and microseconds) - dtm = self.__radd__(dt2) - - # If we've overshot our target, make an adjustment - if dt1 < dt2: - compare = operator.gt - increment = 1 - else: - compare = operator.lt - increment = -1 - - while compare(dt1, dtm): - months += increment - self._set_months(months) - dtm = self.__radd__(dt2) - - # Get the timedelta between the "months-adjusted" date and dt1 - delta = dt1 - dtm - self.seconds = delta.seconds + delta.days * 86400 - self.microseconds = delta.microseconds - else: - # Check for non-integer values in integer-only quantities - if any(x is not None and x != int(x) for x in (years, months)): - raise ValueError("Non-integer years and months are " - "ambiguous and not currently supported.") - - # Relative information - self.years = int(years) - self.months = int(months) - self.days = days + weeks * 7 - self.leapdays = leapdays - self.hours = hours - self.minutes = minutes - self.seconds = seconds - self.microseconds = microseconds - - # Absolute information - self.year = year - self.month = month - self.day = day - self.hour = hour - self.minute = minute - self.second = second - self.microsecond = microsecond - - if any(x is not None and int(x) != x - for x in (year, month, day, hour, - minute, second, microsecond)): - # For now we'll deprecate floats - later it'll be an error. - warn("Non-integer value passed as absolute information. " + - "This is not a well-defined condition and will raise " + - "errors in future versions.", DeprecationWarning) - - if isinstance(weekday, integer_types): - self.weekday = weekdays[weekday] - else: - self.weekday = weekday - - yday = 0 - if nlyearday: - yday = nlyearday - elif yearday: - yday = yearday - if yearday > 59: - self.leapdays = -1 - if yday: - ydayidx = [31, 59, 90, 120, 151, 181, 212, - 243, 273, 304, 334, 366] - for idx, ydays in enumerate(ydayidx): - if yday <= ydays: - self.month = idx+1 - if idx == 0: - self.day = yday - else: - self.day = yday-ydayidx[idx-1] - break - else: - raise ValueError("invalid year day (%d)" % yday) - - self._fix() - - def _fix(self): - if abs(self.microseconds) > 999999: - s = _sign(self.microseconds) - div, mod = divmod(self.microseconds * s, 1000000) - self.microseconds = mod * s - self.seconds += div * s - if abs(self.seconds) > 59: - s = _sign(self.seconds) - div, mod = divmod(self.seconds * s, 60) - self.seconds = mod * s - self.minutes += div * s - if abs(self.minutes) > 59: - s = _sign(self.minutes) - div, mod = divmod(self.minutes * s, 60) - self.minutes = mod * s - self.hours += div * s - if abs(self.hours) > 23: - s = _sign(self.hours) - div, mod = divmod(self.hours * s, 24) - self.hours = mod * s - self.days += div * s - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years += div * s - if (self.hours or self.minutes or self.seconds or self.microseconds - or self.hour is not None or self.minute is not None or - self.second is not None or self.microsecond is not None): - self._has_time = 1 - else: - self._has_time = 0 - - @property - def weeks(self): - return int(self.days / 7.0) - - @weeks.setter - def weeks(self, value): - self.days = self.days - (self.weeks * 7) + value * 7 - - def _set_months(self, months): - self.months = months - if abs(self.months) > 11: - s = _sign(self.months) - div, mod = divmod(self.months * s, 12) - self.months = mod * s - self.years = div * s - else: - self.years = 0 - - def normalized(self): - """ - Return a version of this object represented entirely using integer - values for the relative attributes. - - >>> relativedelta(days=1.5, hours=2).normalized() - relativedelta(days=+1, hours=+14) - - :return: - Returns a :class:`dateutil.relativedelta.relativedelta` object. - """ - # Cascade remainders down (rounding each to roughly nearest microsecond) - days = int(self.days) - - hours_f = round(self.hours + 24 * (self.days - days), 11) - hours = int(hours_f) - - minutes_f = round(self.minutes + 60 * (hours_f - hours), 10) - minutes = int(minutes_f) - - seconds_f = round(self.seconds + 60 * (minutes_f - minutes), 8) - seconds = int(seconds_f) - - microseconds = round(self.microseconds + 1e6 * (seconds_f - seconds)) - - # Constructor carries overflow back up with call to _fix() - return self.__class__(years=self.years, months=self.months, - days=days, hours=hours, minutes=minutes, - seconds=seconds, microseconds=microseconds, - leapdays=self.leapdays, year=self.year, - month=self.month, day=self.day, - weekday=self.weekday, hour=self.hour, - minute=self.minute, second=self.second, - microsecond=self.microsecond) - - def __add__(self, other): - if isinstance(other, relativedelta): - return self.__class__(years=other.years + self.years, - months=other.months + self.months, - days=other.days + self.days, - hours=other.hours + self.hours, - minutes=other.minutes + self.minutes, - seconds=other.seconds + self.seconds, - microseconds=(other.microseconds + - self.microseconds), - leapdays=other.leapdays or self.leapdays, - year=(other.year if other.year is not None - else self.year), - month=(other.month if other.month is not None - else self.month), - day=(other.day if other.day is not None - else self.day), - weekday=(other.weekday if other.weekday is not None - else self.weekday), - hour=(other.hour if other.hour is not None - else self.hour), - minute=(other.minute if other.minute is not None - else self.minute), - second=(other.second if other.second is not None - else self.second), - microsecond=(other.microsecond if other.microsecond - is not None else - self.microsecond)) - if isinstance(other, datetime.timedelta): - return self.__class__(years=self.years, - months=self.months, - days=self.days + other.days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds + other.seconds, - microseconds=self.microseconds + other.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - if not isinstance(other, datetime.date): - return NotImplemented - elif self._has_time and not isinstance(other, datetime.datetime): - other = datetime.datetime.fromordinal(other.toordinal()) - year = (self.year or other.year)+self.years - month = self.month or other.month - if self.months: - assert 1 <= abs(self.months) <= 12 - month += self.months - if month > 12: - year += 1 - month -= 12 - elif month < 1: - year -= 1 - month += 12 - day = min(calendar.monthrange(year, month)[1], - self.day or other.day) - repl = {"year": year, "month": month, "day": day} - for attr in ["hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - repl[attr] = value - days = self.days - if self.leapdays and month > 2 and calendar.isleap(year): - days += self.leapdays - ret = (other.replace(**repl) - + datetime.timedelta(days=days, - hours=self.hours, - minutes=self.minutes, - seconds=self.seconds, - microseconds=self.microseconds)) - if self.weekday: - weekday, nth = self.weekday.weekday, self.weekday.n or 1 - jumpdays = (abs(nth) - 1) * 7 - if nth > 0: - jumpdays += (7 - ret.weekday() + weekday) % 7 - else: - jumpdays += (ret.weekday() - weekday) % 7 - jumpdays *= -1 - ret += datetime.timedelta(days=jumpdays) - return ret - - def __radd__(self, other): - return self.__add__(other) - - def __rsub__(self, other): - return self.__neg__().__radd__(other) - - def __sub__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented # In case the other object defines __rsub__ - return self.__class__(years=self.years - other.years, - months=self.months - other.months, - days=self.days - other.days, - hours=self.hours - other.hours, - minutes=self.minutes - other.minutes, - seconds=self.seconds - other.seconds, - microseconds=self.microseconds - other.microseconds, - leapdays=self.leapdays or other.leapdays, - year=(self.year if self.year is not None - else other.year), - month=(self.month if self.month is not None else - other.month), - day=(self.day if self.day is not None else - other.day), - weekday=(self.weekday if self.weekday is not None else - other.weekday), - hour=(self.hour if self.hour is not None else - other.hour), - minute=(self.minute if self.minute is not None else - other.minute), - second=(self.second if self.second is not None else - other.second), - microsecond=(self.microsecond if self.microsecond - is not None else - other.microsecond)) - - def __abs__(self): - return self.__class__(years=abs(self.years), - months=abs(self.months), - days=abs(self.days), - hours=abs(self.hours), - minutes=abs(self.minutes), - seconds=abs(self.seconds), - microseconds=abs(self.microseconds), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __neg__(self): - return self.__class__(years=-self.years, - months=-self.months, - days=-self.days, - hours=-self.hours, - minutes=-self.minutes, - seconds=-self.seconds, - microseconds=-self.microseconds, - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - def __bool__(self): - return not (not self.years and - not self.months and - not self.days and - not self.hours and - not self.minutes and - not self.seconds and - not self.microseconds and - not self.leapdays and - self.year is None and - self.month is None and - self.day is None and - self.weekday is None and - self.hour is None and - self.minute is None and - self.second is None and - self.microsecond is None) - # Compatibility with Python 2.x - __nonzero__ = __bool__ - - def __mul__(self, other): - try: - f = float(other) - except TypeError: - return NotImplemented - - return self.__class__(years=int(self.years * f), - months=int(self.months * f), - days=int(self.days * f), - hours=int(self.hours * f), - minutes=int(self.minutes * f), - seconds=int(self.seconds * f), - microseconds=int(self.microseconds * f), - leapdays=self.leapdays, - year=self.year, - month=self.month, - day=self.day, - weekday=self.weekday, - hour=self.hour, - minute=self.minute, - second=self.second, - microsecond=self.microsecond) - - __rmul__ = __mul__ - - def __eq__(self, other): - if not isinstance(other, relativedelta): - return NotImplemented - if self.weekday or other.weekday: - if not self.weekday or not other.weekday: - return False - if self.weekday.weekday != other.weekday.weekday: - return False - n1, n2 = self.weekday.n, other.weekday.n - if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)): - return False - return (self.years == other.years and - self.months == other.months and - self.days == other.days and - self.hours == other.hours and - self.minutes == other.minutes and - self.seconds == other.seconds and - self.microseconds == other.microseconds and - self.leapdays == other.leapdays and - self.year == other.year and - self.month == other.month and - self.day == other.day and - self.hour == other.hour and - self.minute == other.minute and - self.second == other.second and - self.microsecond == other.microsecond) - - def __hash__(self): - return hash(( - self.weekday, - self.years, - self.months, - self.days, - self.hours, - self.minutes, - self.seconds, - self.microseconds, - self.leapdays, - self.year, - self.month, - self.day, - self.hour, - self.minute, - self.second, - self.microsecond, - )) - - def __ne__(self, other): - return not self.__eq__(other) - - def __div__(self, other): - try: - reciprocal = 1 / float(other) - except TypeError: - return NotImplemented - - return self.__mul__(reciprocal) - - __truediv__ = __div__ - - def __repr__(self): - l = [] - for attr in ["years", "months", "days", "leapdays", - "hours", "minutes", "seconds", "microseconds"]: - value = getattr(self, attr) - if value: - l.append("{attr}={value:+g}".format(attr=attr, value=value)) - for attr in ["year", "month", "day", "weekday", - "hour", "minute", "second", "microsecond"]: - value = getattr(self, attr) - if value is not None: - l.append("{attr}={value}".format(attr=attr, value=repr(value))) - return "{classname}({attrs})".format(classname=self.__class__.__name__, - attrs=", ".join(l)) - - -def _sign(x): - return int(copysign(1, x)) - -# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.7/site-packages/dateutil/rrule.py b/venv/lib/python3.7/site-packages/dateutil/rrule.py deleted file mode 100644 index 6bf0ea9..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/rrule.py +++ /dev/null @@ -1,1735 +0,0 @@ -# -*- coding: utf-8 -*- -""" -The rrule module offers a small, complete, and very fast, implementation of -the recurrence rules documented in the -`iCalendar RFC `_, -including support for caching of results. -""" -import itertools -import datetime -import calendar -import re -import sys - -try: - from math import gcd -except ImportError: - from fractions import gcd - -from six import advance_iterator, integer_types -from six.moves import _thread, range -import heapq - -from ._common import weekday as weekdaybase - -# For warning about deprecation of until and count -from warnings import warn - -__all__ = ["rrule", "rruleset", "rrulestr", - "YEARLY", "MONTHLY", "WEEKLY", "DAILY", - "HOURLY", "MINUTELY", "SECONDLY", - "MO", "TU", "WE", "TH", "FR", "SA", "SU"] - -# Every mask is 7 days longer to handle cross-year weekly periods. -M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30 + - [7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7) -M365MASK = list(M366MASK) -M29, M30, M31 = list(range(1, 30)), list(range(1, 31)), list(range(1, 32)) -MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -MDAY365MASK = list(MDAY366MASK) -M29, M30, M31 = list(range(-29, 0)), list(range(-30, 0)), list(range(-31, 0)) -NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7]) -NMDAY365MASK = list(NMDAY366MASK) -M366RANGE = (0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366) -M365RANGE = (0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365) -WDAYMASK = [0, 1, 2, 3, 4, 5, 6]*55 -del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31] -MDAY365MASK = tuple(MDAY365MASK) -M365MASK = tuple(M365MASK) - -FREQNAMES = ['YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY', 'HOURLY', 'MINUTELY', 'SECONDLY'] - -(YEARLY, - MONTHLY, - WEEKLY, - DAILY, - HOURLY, - MINUTELY, - SECONDLY) = list(range(7)) - -# Imported on demand. -easter = None -parser = None - - -class weekday(weekdaybase): - """ - This version of weekday does not allow n = 0. - """ - def __init__(self, wkday, n=None): - if n == 0: - raise ValueError("Can't create weekday with n==0") - - super(weekday, self).__init__(wkday, n) - - -MO, TU, WE, TH, FR, SA, SU = weekdays = tuple(weekday(x) for x in range(7)) - - -def _invalidates_cache(f): - """ - Decorator for rruleset methods which may invalidate the - cached length. - """ - def inner_func(self, *args, **kwargs): - rv = f(self, *args, **kwargs) - self._invalidate_cache() - return rv - - return inner_func - - -class rrulebase(object): - def __init__(self, cache=False): - if cache: - self._cache = [] - self._cache_lock = _thread.allocate_lock() - self._invalidate_cache() - else: - self._cache = None - self._cache_complete = False - self._len = None - - def __iter__(self): - if self._cache_complete: - return iter(self._cache) - elif self._cache is None: - return self._iter() - else: - return self._iter_cached() - - def _invalidate_cache(self): - if self._cache is not None: - self._cache = [] - self._cache_complete = False - self._cache_gen = self._iter() - - if self._cache_lock.locked(): - self._cache_lock.release() - - self._len = None - - def _iter_cached(self): - i = 0 - gen = self._cache_gen - cache = self._cache - acquire = self._cache_lock.acquire - release = self._cache_lock.release - while gen: - if i == len(cache): - acquire() - if self._cache_complete: - break - try: - for j in range(10): - cache.append(advance_iterator(gen)) - except StopIteration: - self._cache_gen = gen = None - self._cache_complete = True - break - release() - yield cache[i] - i += 1 - while i < self._len: - yield cache[i] - i += 1 - - def __getitem__(self, item): - if self._cache_complete: - return self._cache[item] - elif isinstance(item, slice): - if item.step and item.step < 0: - return list(iter(self))[item] - else: - return list(itertools.islice(self, - item.start or 0, - item.stop or sys.maxsize, - item.step or 1)) - elif item >= 0: - gen = iter(self) - try: - for i in range(item+1): - res = advance_iterator(gen) - except StopIteration: - raise IndexError - return res - else: - return list(iter(self))[item] - - def __contains__(self, item): - if self._cache_complete: - return item in self._cache - else: - for i in self: - if i == item: - return True - elif i > item: - return False - return False - - # __len__() introduces a large performance penalty. - def count(self): - """ Returns the number of recurrences in this set. It will have go - trough the whole recurrence, if this hasn't been done before. """ - if self._len is None: - for x in self: - pass - return self._len - - def before(self, dt, inc=False): - """ Returns the last recurrence before the given datetime instance. The - inc keyword defines what happens if dt is an occurrence. With - inc=True, if dt itself is an occurrence, it will be returned. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - last = None - if inc: - for i in gen: - if i > dt: - break - last = i - else: - for i in gen: - if i >= dt: - break - last = i - return last - - def after(self, dt, inc=False): - """ Returns the first recurrence after the given datetime instance. The - inc keyword defines what happens if dt is an occurrence. With - inc=True, if dt itself is an occurrence, it will be returned. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - if inc: - for i in gen: - if i >= dt: - return i - else: - for i in gen: - if i > dt: - return i - return None - - def xafter(self, dt, count=None, inc=False): - """ - Generator which yields up to `count` recurrences after the given - datetime instance, equivalent to `after`. - - :param dt: - The datetime at which to start generating recurrences. - - :param count: - The maximum number of recurrences to generate. If `None` (default), - dates are generated until the recurrence rule is exhausted. - - :param inc: - If `dt` is an instance of the rule and `inc` is `True`, it is - included in the output. - - :yields: Yields a sequence of `datetime` objects. - """ - - if self._cache_complete: - gen = self._cache - else: - gen = self - - # Select the comparison function - if inc: - comp = lambda dc, dtc: dc >= dtc - else: - comp = lambda dc, dtc: dc > dtc - - # Generate dates - n = 0 - for d in gen: - if comp(d, dt): - if count is not None: - n += 1 - if n > count: - break - - yield d - - def between(self, after, before, inc=False, count=1): - """ Returns all the occurrences of the rrule between after and before. - The inc keyword defines what happens if after and/or before are - themselves occurrences. With inc=True, they will be included in the - list, if they are found in the recurrence set. """ - if self._cache_complete: - gen = self._cache - else: - gen = self - started = False - l = [] - if inc: - for i in gen: - if i > before: - break - elif not started: - if i >= after: - started = True - l.append(i) - else: - l.append(i) - else: - for i in gen: - if i >= before: - break - elif not started: - if i > after: - started = True - l.append(i) - else: - l.append(i) - return l - - -class rrule(rrulebase): - """ - That's the base of the rrule operation. It accepts all the keywords - defined in the RFC as its constructor parameters (except byday, - which was renamed to byweekday) and more. The constructor prototype is:: - - rrule(freq) - - Where freq must be one of YEARLY, MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY, - or SECONDLY. - - .. note:: - Per RFC section 3.3.10, recurrence instances falling on invalid dates - and times are ignored rather than coerced: - - Recurrence rules may generate recurrence instances with an invalid - date (e.g., February 30) or nonexistent local time (e.g., 1:30 AM - on a day where the local time is moved forward by an hour at 1:00 - AM). Such recurrence instances MUST be ignored and MUST NOT be - counted as part of the recurrence set. - - This can lead to possibly surprising behavior when, for example, the - start date occurs at the end of the month: - - >>> from dateutil.rrule import rrule, MONTHLY - >>> from datetime import datetime - >>> start_date = datetime(2014, 12, 31) - >>> list(rrule(freq=MONTHLY, count=4, dtstart=start_date)) - ... # doctest: +NORMALIZE_WHITESPACE - [datetime.datetime(2014, 12, 31, 0, 0), - datetime.datetime(2015, 1, 31, 0, 0), - datetime.datetime(2015, 3, 31, 0, 0), - datetime.datetime(2015, 5, 31, 0, 0)] - - Additionally, it supports the following keyword arguments: - - :param dtstart: - The recurrence start. Besides being the base for the recurrence, - missing parameters in the final recurrence instances will also be - extracted from this date. If not given, datetime.now() will be used - instead. - :param interval: - The interval between each freq iteration. For example, when using - YEARLY, an interval of 2 means once every two years, but with HOURLY, - it means once every two hours. The default interval is 1. - :param wkst: - The week start day. Must be one of the MO, TU, WE constants, or an - integer, specifying the first day of the week. This will affect - recurrences based on weekly periods. The default week start is got - from calendar.firstweekday(), and may be modified by - calendar.setfirstweekday(). - :param count: - If given, this determines how many occurrences will be generated. - - .. note:: - As of version 2.5.0, the use of the keyword ``until`` in conjunction - with ``count`` is deprecated, to make sure ``dateutil`` is fully - compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` - **must not** occur in the same call to ``rrule``. - :param until: - If given, this must be a datetime instance specifying the upper-bound - limit of the recurrence. The last recurrence in the rule is the greatest - datetime that is less than or equal to the value specified in the - ``until`` parameter. - - .. note:: - As of version 2.5.0, the use of the keyword ``until`` in conjunction - with ``count`` is deprecated, to make sure ``dateutil`` is fully - compliant with `RFC-5545 Sec. 3.3.10 `_. Therefore, ``until`` and ``count`` - **must not** occur in the same call to ``rrule``. - :param bysetpos: - If given, it must be either an integer, or a sequence of integers, - positive or negative. Each given integer will specify an occurrence - number, corresponding to the nth occurrence of the rule inside the - frequency period. For example, a bysetpos of -1 if combined with a - MONTHLY frequency, and a byweekday of (MO, TU, WE, TH, FR), will - result in the last work day of every month. - :param bymonth: - If given, it must be either an integer, or a sequence of integers, - meaning the months to apply the recurrence to. - :param bymonthday: - If given, it must be either an integer, or a sequence of integers, - meaning the month days to apply the recurrence to. - :param byyearday: - If given, it must be either an integer, or a sequence of integers, - meaning the year days to apply the recurrence to. - :param byeaster: - If given, it must be either an integer, or a sequence of integers, - positive or negative. Each integer will define an offset from the - Easter Sunday. Passing the offset 0 to byeaster will yield the Easter - Sunday itself. This is an extension to the RFC specification. - :param byweekno: - If given, it must be either an integer, or a sequence of integers, - meaning the week numbers to apply the recurrence to. Week numbers - have the meaning described in ISO8601, that is, the first week of - the year is that containing at least four days of the new year. - :param byweekday: - If given, it must be either an integer (0 == MO), a sequence of - integers, one of the weekday constants (MO, TU, etc), or a sequence - of these constants. When given, these variables will define the - weekdays where the recurrence will be applied. It's also possible to - use an argument n for the weekday instances, which will mean the nth - occurrence of this weekday in the period. For example, with MONTHLY, - or with YEARLY and BYMONTH, using FR(+1) in byweekday will specify the - first friday of the month where the recurrence happens. Notice that in - the RFC documentation, this is specified as BYDAY, but was renamed to - avoid the ambiguity of that keyword. - :param byhour: - If given, it must be either an integer, or a sequence of integers, - meaning the hours to apply the recurrence to. - :param byminute: - If given, it must be either an integer, or a sequence of integers, - meaning the minutes to apply the recurrence to. - :param bysecond: - If given, it must be either an integer, or a sequence of integers, - meaning the seconds to apply the recurrence to. - :param cache: - If given, it must be a boolean value specifying to enable or disable - caching of results. If you will use the same rrule instance multiple - times, enabling caching will improve the performance considerably. - """ - def __init__(self, freq, dtstart=None, - interval=1, wkst=None, count=None, until=None, bysetpos=None, - bymonth=None, bymonthday=None, byyearday=None, byeaster=None, - byweekno=None, byweekday=None, - byhour=None, byminute=None, bysecond=None, - cache=False): - super(rrule, self).__init__(cache) - global easter - if not dtstart: - if until and until.tzinfo: - dtstart = datetime.datetime.now(tz=until.tzinfo).replace(microsecond=0) - else: - dtstart = datetime.datetime.now().replace(microsecond=0) - elif not isinstance(dtstart, datetime.datetime): - dtstart = datetime.datetime.fromordinal(dtstart.toordinal()) - else: - dtstart = dtstart.replace(microsecond=0) - self._dtstart = dtstart - self._tzinfo = dtstart.tzinfo - self._freq = freq - self._interval = interval - self._count = count - - # Cache the original byxxx rules, if they are provided, as the _byxxx - # attributes do not necessarily map to the inputs, and this can be - # a problem in generating the strings. Only store things if they've - # been supplied (the string retrieval will just use .get()) - self._original_rule = {} - - if until and not isinstance(until, datetime.datetime): - until = datetime.datetime.fromordinal(until.toordinal()) - self._until = until - - if self._dtstart and self._until: - if (self._dtstart.tzinfo is not None) != (self._until.tzinfo is not None): - # According to RFC5545 Section 3.3.10: - # https://tools.ietf.org/html/rfc5545#section-3.3.10 - # - # > If the "DTSTART" property is specified as a date with UTC - # > time or a date with local time and time zone reference, - # > then the UNTIL rule part MUST be specified as a date with - # > UTC time. - raise ValueError( - 'RRULE UNTIL values must be specified in UTC when DTSTART ' - 'is timezone-aware' - ) - - if count is not None and until: - warn("Using both 'count' and 'until' is inconsistent with RFC 5545" - " and has been deprecated in dateutil. Future versions will " - "raise an error.", DeprecationWarning) - - if wkst is None: - self._wkst = calendar.firstweekday() - elif isinstance(wkst, integer_types): - self._wkst = wkst - else: - self._wkst = wkst.weekday - - if bysetpos is None: - self._bysetpos = None - elif isinstance(bysetpos, integer_types): - if bysetpos == 0 or not (-366 <= bysetpos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - self._bysetpos = (bysetpos,) - else: - self._bysetpos = tuple(bysetpos) - for pos in self._bysetpos: - if pos == 0 or not (-366 <= pos <= 366): - raise ValueError("bysetpos must be between 1 and 366, " - "or between -366 and -1") - - if self._bysetpos: - self._original_rule['bysetpos'] = self._bysetpos - - if (byweekno is None and byyearday is None and bymonthday is None and - byweekday is None and byeaster is None): - if freq == YEARLY: - if bymonth is None: - bymonth = dtstart.month - self._original_rule['bymonth'] = None - bymonthday = dtstart.day - self._original_rule['bymonthday'] = None - elif freq == MONTHLY: - bymonthday = dtstart.day - self._original_rule['bymonthday'] = None - elif freq == WEEKLY: - byweekday = dtstart.weekday() - self._original_rule['byweekday'] = None - - # bymonth - if bymonth is None: - self._bymonth = None - else: - if isinstance(bymonth, integer_types): - bymonth = (bymonth,) - - self._bymonth = tuple(sorted(set(bymonth))) - - if 'bymonth' not in self._original_rule: - self._original_rule['bymonth'] = self._bymonth - - # byyearday - if byyearday is None: - self._byyearday = None - else: - if isinstance(byyearday, integer_types): - byyearday = (byyearday,) - - self._byyearday = tuple(sorted(set(byyearday))) - self._original_rule['byyearday'] = self._byyearday - - # byeaster - if byeaster is not None: - if not easter: - from dateutil import easter - if isinstance(byeaster, integer_types): - self._byeaster = (byeaster,) - else: - self._byeaster = tuple(sorted(byeaster)) - - self._original_rule['byeaster'] = self._byeaster - else: - self._byeaster = None - - # bymonthday - if bymonthday is None: - self._bymonthday = () - self._bynmonthday = () - else: - if isinstance(bymonthday, integer_types): - bymonthday = (bymonthday,) - - bymonthday = set(bymonthday) # Ensure it's unique - - self._bymonthday = tuple(sorted(x for x in bymonthday if x > 0)) - self._bynmonthday = tuple(sorted(x for x in bymonthday if x < 0)) - - # Storing positive numbers first, then negative numbers - if 'bymonthday' not in self._original_rule: - self._original_rule['bymonthday'] = tuple( - itertools.chain(self._bymonthday, self._bynmonthday)) - - # byweekno - if byweekno is None: - self._byweekno = None - else: - if isinstance(byweekno, integer_types): - byweekno = (byweekno,) - - self._byweekno = tuple(sorted(set(byweekno))) - - self._original_rule['byweekno'] = self._byweekno - - # byweekday / bynweekday - if byweekday is None: - self._byweekday = None - self._bynweekday = None - else: - # If it's one of the valid non-sequence types, convert to a - # single-element sequence before the iterator that builds the - # byweekday set. - if isinstance(byweekday, integer_types) or hasattr(byweekday, "n"): - byweekday = (byweekday,) - - self._byweekday = set() - self._bynweekday = set() - for wday in byweekday: - if isinstance(wday, integer_types): - self._byweekday.add(wday) - elif not wday.n or freq > MONTHLY: - self._byweekday.add(wday.weekday) - else: - self._bynweekday.add((wday.weekday, wday.n)) - - if not self._byweekday: - self._byweekday = None - elif not self._bynweekday: - self._bynweekday = None - - if self._byweekday is not None: - self._byweekday = tuple(sorted(self._byweekday)) - orig_byweekday = [weekday(x) for x in self._byweekday] - else: - orig_byweekday = () - - if self._bynweekday is not None: - self._bynweekday = tuple(sorted(self._bynweekday)) - orig_bynweekday = [weekday(*x) for x in self._bynweekday] - else: - orig_bynweekday = () - - if 'byweekday' not in self._original_rule: - self._original_rule['byweekday'] = tuple(itertools.chain( - orig_byweekday, orig_bynweekday)) - - # byhour - if byhour is None: - if freq < HOURLY: - self._byhour = {dtstart.hour} - else: - self._byhour = None - else: - if isinstance(byhour, integer_types): - byhour = (byhour,) - - if freq == HOURLY: - self._byhour = self.__construct_byset(start=dtstart.hour, - byxxx=byhour, - base=24) - else: - self._byhour = set(byhour) - - self._byhour = tuple(sorted(self._byhour)) - self._original_rule['byhour'] = self._byhour - - # byminute - if byminute is None: - if freq < MINUTELY: - self._byminute = {dtstart.minute} - else: - self._byminute = None - else: - if isinstance(byminute, integer_types): - byminute = (byminute,) - - if freq == MINUTELY: - self._byminute = self.__construct_byset(start=dtstart.minute, - byxxx=byminute, - base=60) - else: - self._byminute = set(byminute) - - self._byminute = tuple(sorted(self._byminute)) - self._original_rule['byminute'] = self._byminute - - # bysecond - if bysecond is None: - if freq < SECONDLY: - self._bysecond = ((dtstart.second,)) - else: - self._bysecond = None - else: - if isinstance(bysecond, integer_types): - bysecond = (bysecond,) - - self._bysecond = set(bysecond) - - if freq == SECONDLY: - self._bysecond = self.__construct_byset(start=dtstart.second, - byxxx=bysecond, - base=60) - else: - self._bysecond = set(bysecond) - - self._bysecond = tuple(sorted(self._bysecond)) - self._original_rule['bysecond'] = self._bysecond - - if self._freq >= HOURLY: - self._timeset = None - else: - self._timeset = [] - for hour in self._byhour: - for minute in self._byminute: - for second in self._bysecond: - self._timeset.append( - datetime.time(hour, minute, second, - tzinfo=self._tzinfo)) - self._timeset.sort() - self._timeset = tuple(self._timeset) - - def __str__(self): - """ - Output a string that would generate this RRULE if passed to rrulestr. - This is mostly compatible with RFC5545, except for the - dateutil-specific extension BYEASTER. - """ - - output = [] - h, m, s = [None] * 3 - if self._dtstart: - output.append(self._dtstart.strftime('DTSTART:%Y%m%dT%H%M%S')) - h, m, s = self._dtstart.timetuple()[3:6] - - parts = ['FREQ=' + FREQNAMES[self._freq]] - if self._interval != 1: - parts.append('INTERVAL=' + str(self._interval)) - - if self._wkst: - parts.append('WKST=' + repr(weekday(self._wkst))[0:2]) - - if self._count is not None: - parts.append('COUNT=' + str(self._count)) - - if self._until: - parts.append(self._until.strftime('UNTIL=%Y%m%dT%H%M%S')) - - if self._original_rule.get('byweekday') is not None: - # The str() method on weekday objects doesn't generate - # RFC5545-compliant strings, so we should modify that. - original_rule = dict(self._original_rule) - wday_strings = [] - for wday in original_rule['byweekday']: - if wday.n: - wday_strings.append('{n:+d}{wday}'.format( - n=wday.n, - wday=repr(wday)[0:2])) - else: - wday_strings.append(repr(wday)) - - original_rule['byweekday'] = wday_strings - else: - original_rule = self._original_rule - - partfmt = '{name}={vals}' - for name, key in [('BYSETPOS', 'bysetpos'), - ('BYMONTH', 'bymonth'), - ('BYMONTHDAY', 'bymonthday'), - ('BYYEARDAY', 'byyearday'), - ('BYWEEKNO', 'byweekno'), - ('BYDAY', 'byweekday'), - ('BYHOUR', 'byhour'), - ('BYMINUTE', 'byminute'), - ('BYSECOND', 'bysecond'), - ('BYEASTER', 'byeaster')]: - value = original_rule.get(key) - if value: - parts.append(partfmt.format(name=name, vals=(','.join(str(v) - for v in value)))) - - output.append('RRULE:' + ';'.join(parts)) - return '\n'.join(output) - - def replace(self, **kwargs): - """Return new rrule with same attributes except for those attributes given new - values by whichever keyword arguments are specified.""" - new_kwargs = {"interval": self._interval, - "count": self._count, - "dtstart": self._dtstart, - "freq": self._freq, - "until": self._until, - "wkst": self._wkst, - "cache": False if self._cache is None else True } - new_kwargs.update(self._original_rule) - new_kwargs.update(kwargs) - return rrule(**new_kwargs) - - def _iter(self): - year, month, day, hour, minute, second, weekday, yearday, _ = \ - self._dtstart.timetuple() - - # Some local variables to speed things up a bit - freq = self._freq - interval = self._interval - wkst = self._wkst - until = self._until - bymonth = self._bymonth - byweekno = self._byweekno - byyearday = self._byyearday - byweekday = self._byweekday - byeaster = self._byeaster - bymonthday = self._bymonthday - bynmonthday = self._bynmonthday - bysetpos = self._bysetpos - byhour = self._byhour - byminute = self._byminute - bysecond = self._bysecond - - ii = _iterinfo(self) - ii.rebuild(year, month) - - getdayset = {YEARLY: ii.ydayset, - MONTHLY: ii.mdayset, - WEEKLY: ii.wdayset, - DAILY: ii.ddayset, - HOURLY: ii.ddayset, - MINUTELY: ii.ddayset, - SECONDLY: ii.ddayset}[freq] - - if freq < HOURLY: - timeset = self._timeset - else: - gettimeset = {HOURLY: ii.htimeset, - MINUTELY: ii.mtimeset, - SECONDLY: ii.stimeset}[freq] - if ((freq >= HOURLY and - self._byhour and hour not in self._byhour) or - (freq >= MINUTELY and - self._byminute and minute not in self._byminute) or - (freq >= SECONDLY and - self._bysecond and second not in self._bysecond)): - timeset = () - else: - timeset = gettimeset(hour, minute, second) - - total = 0 - count = self._count - while True: - # Get dayset with the right frequency - dayset, start, end = getdayset(year, month, day) - - # Do the "hard" work ;-) - filtered = False - for i in dayset[start:end]: - if ((bymonth and ii.mmask[i] not in bymonth) or - (byweekno and not ii.wnomask[i]) or - (byweekday and ii.wdaymask[i] not in byweekday) or - (ii.nwdaymask and not ii.nwdaymask[i]) or - (byeaster and not ii.eastermask[i]) or - ((bymonthday or bynmonthday) and - ii.mdaymask[i] not in bymonthday and - ii.nmdaymask[i] not in bynmonthday) or - (byyearday and - ((i < ii.yearlen and i+1 not in byyearday and - -ii.yearlen+i not in byyearday) or - (i >= ii.yearlen and i+1-ii.yearlen not in byyearday and - -ii.nextyearlen+i-ii.yearlen not in byyearday)))): - dayset[i] = None - filtered = True - - # Output results - if bysetpos and timeset: - poslist = [] - for pos in bysetpos: - if pos < 0: - daypos, timepos = divmod(pos, len(timeset)) - else: - daypos, timepos = divmod(pos-1, len(timeset)) - try: - i = [x for x in dayset[start:end] - if x is not None][daypos] - time = timeset[timepos] - except IndexError: - pass - else: - date = datetime.date.fromordinal(ii.yearordinal+i) - res = datetime.datetime.combine(date, time) - if res not in poslist: - poslist.append(res) - poslist.sort() - for res in poslist: - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - if count is not None: - count -= 1 - if count < 0: - self._len = total - return - total += 1 - yield res - else: - for i in dayset[start:end]: - if i is not None: - date = datetime.date.fromordinal(ii.yearordinal + i) - for time in timeset: - res = datetime.datetime.combine(date, time) - if until and res > until: - self._len = total - return - elif res >= self._dtstart: - if count is not None: - count -= 1 - if count < 0: - self._len = total - return - - total += 1 - yield res - - # Handle frequency and interval - fixday = False - if freq == YEARLY: - year += interval - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == MONTHLY: - month += interval - if month > 12: - div, mod = divmod(month, 12) - month = mod - year += div - if month == 0: - month = 12 - year -= 1 - if year > datetime.MAXYEAR: - self._len = total - return - ii.rebuild(year, month) - elif freq == WEEKLY: - if wkst > weekday: - day += -(weekday+1+(6-wkst))+self._interval*7 - else: - day += -(weekday-wkst)+self._interval*7 - weekday = wkst - fixday = True - elif freq == DAILY: - day += interval - fixday = True - elif freq == HOURLY: - if filtered: - # Jump to one iteration before next day - hour += ((23-hour)//interval)*interval - - if byhour: - ndays, hour = self.__mod_distance(value=hour, - byxxx=self._byhour, - base=24) - else: - ndays, hour = divmod(hour+interval, 24) - - if ndays: - day += ndays - fixday = True - - timeset = gettimeset(hour, minute, second) - elif freq == MINUTELY: - if filtered: - # Jump to one iteration before next day - minute += ((1439-(hour*60+minute))//interval)*interval - - valid = False - rep_rate = (24*60) - for j in range(rep_rate // gcd(interval, rep_rate)): - if byminute: - nhours, minute = \ - self.__mod_distance(value=minute, - byxxx=self._byminute, - base=60) - else: - nhours, minute = divmod(minute+interval, 60) - - div, hour = divmod(hour+nhours, 24) - if div: - day += div - fixday = True - filtered = False - - if not byhour or hour in byhour: - valid = True - break - - if not valid: - raise ValueError('Invalid combination of interval and ' + - 'byhour resulting in empty rule.') - - timeset = gettimeset(hour, minute, second) - elif freq == SECONDLY: - if filtered: - # Jump to one iteration before next day - second += (((86399 - (hour * 3600 + minute * 60 + second)) - // interval) * interval) - - rep_rate = (24 * 3600) - valid = False - for j in range(0, rep_rate // gcd(interval, rep_rate)): - if bysecond: - nminutes, second = \ - self.__mod_distance(value=second, - byxxx=self._bysecond, - base=60) - else: - nminutes, second = divmod(second+interval, 60) - - div, minute = divmod(minute+nminutes, 60) - if div: - hour += div - div, hour = divmod(hour, 24) - if div: - day += div - fixday = True - - if ((not byhour or hour in byhour) and - (not byminute or minute in byminute) and - (not bysecond or second in bysecond)): - valid = True - break - - if not valid: - raise ValueError('Invalid combination of interval, ' + - 'byhour and byminute resulting in empty' + - ' rule.') - - timeset = gettimeset(hour, minute, second) - - if fixday and day > 28: - daysinmonth = calendar.monthrange(year, month)[1] - if day > daysinmonth: - while day > daysinmonth: - day -= daysinmonth - month += 1 - if month == 13: - month = 1 - year += 1 - if year > datetime.MAXYEAR: - self._len = total - return - daysinmonth = calendar.monthrange(year, month)[1] - ii.rebuild(year, month) - - def __construct_byset(self, start, byxxx, base): - """ - If a `BYXXX` sequence is passed to the constructor at the same level as - `FREQ` (e.g. `FREQ=HOURLY,BYHOUR={2,4,7},INTERVAL=3`), there are some - specifications which cannot be reached given some starting conditions. - - This occurs whenever the interval is not coprime with the base of a - given unit and the difference between the starting position and the - ending position is not coprime with the greatest common denominator - between the interval and the base. For example, with a FREQ of hourly - starting at 17:00 and an interval of 4, the only valid values for - BYHOUR would be {21, 1, 5, 9, 13, 17}, because 4 and 24 are not - coprime. - - :param start: - Specifies the starting position. - :param byxxx: - An iterable containing the list of allowed values. - :param base: - The largest allowable value for the specified frequency (e.g. - 24 hours, 60 minutes). - - This does not preserve the type of the iterable, returning a set, since - the values should be unique and the order is irrelevant, this will - speed up later lookups. - - In the event of an empty set, raises a :exception:`ValueError`, as this - results in an empty rrule. - """ - - cset = set() - - # Support a single byxxx value. - if isinstance(byxxx, integer_types): - byxxx = (byxxx, ) - - for num in byxxx: - i_gcd = gcd(self._interval, base) - # Use divmod rather than % because we need to wrap negative nums. - if i_gcd == 1 or divmod(num - start, i_gcd)[1] == 0: - cset.add(num) - - if len(cset) == 0: - raise ValueError("Invalid rrule byxxx generates an empty set.") - - return cset - - def __mod_distance(self, value, byxxx, base): - """ - Calculates the next value in a sequence where the `FREQ` parameter is - specified along with a `BYXXX` parameter at the same "level" - (e.g. `HOURLY` specified with `BYHOUR`). - - :param value: - The old value of the component. - :param byxxx: - The `BYXXX` set, which should have been generated by - `rrule._construct_byset`, or something else which checks that a - valid rule is present. - :param base: - The largest allowable value for the specified frequency (e.g. - 24 hours, 60 minutes). - - If a valid value is not found after `base` iterations (the maximum - number before the sequence would start to repeat), this raises a - :exception:`ValueError`, as no valid values were found. - - This returns a tuple of `divmod(n*interval, base)`, where `n` is the - smallest number of `interval` repetitions until the next specified - value in `byxxx` is found. - """ - accumulator = 0 - for ii in range(1, base + 1): - # Using divmod() over % to account for negative intervals - div, value = divmod(value + self._interval, base) - accumulator += div - if value in byxxx: - return (accumulator, value) - - -class _iterinfo(object): - __slots__ = ["rrule", "lastyear", "lastmonth", - "yearlen", "nextyearlen", "yearordinal", "yearweekday", - "mmask", "mrange", "mdaymask", "nmdaymask", - "wdaymask", "wnomask", "nwdaymask", "eastermask"] - - def __init__(self, rrule): - for attr in self.__slots__: - setattr(self, attr, None) - self.rrule = rrule - - def rebuild(self, year, month): - # Every mask is 7 days longer to handle cross-year weekly periods. - rr = self.rrule - if year != self.lastyear: - self.yearlen = 365 + calendar.isleap(year) - self.nextyearlen = 365 + calendar.isleap(year + 1) - firstyday = datetime.date(year, 1, 1) - self.yearordinal = firstyday.toordinal() - self.yearweekday = firstyday.weekday() - - wday = datetime.date(year, 1, 1).weekday() - if self.yearlen == 365: - self.mmask = M365MASK - self.mdaymask = MDAY365MASK - self.nmdaymask = NMDAY365MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M365RANGE - else: - self.mmask = M366MASK - self.mdaymask = MDAY366MASK - self.nmdaymask = NMDAY366MASK - self.wdaymask = WDAYMASK[wday:] - self.mrange = M366RANGE - - if not rr._byweekno: - self.wnomask = None - else: - self.wnomask = [0]*(self.yearlen+7) - # no1wkst = firstwkst = self.wdaymask.index(rr._wkst) - no1wkst = firstwkst = (7-self.yearweekday+rr._wkst) % 7 - if no1wkst >= 4: - no1wkst = 0 - # Number of days in the year, plus the days we got - # from last year. - wyearlen = self.yearlen+(self.yearweekday-rr._wkst) % 7 - else: - # Number of days in the year, minus the days we - # left in last year. - wyearlen = self.yearlen-no1wkst - div, mod = divmod(wyearlen, 7) - numweeks = div+mod//4 - for n in rr._byweekno: - if n < 0: - n += numweeks+1 - if not (0 < n <= numweeks): - continue - if n > 1: - i = no1wkst+(n-1)*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - else: - i = no1wkst - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if 1 in rr._byweekno: - # Check week number 1 of next year as well - # TODO: Check -numweeks for next year. - i = no1wkst+numweeks*7 - if no1wkst != firstwkst: - i -= 7-firstwkst - if i < self.yearlen: - # If week starts in next year, we - # don't care about it. - for j in range(7): - self.wnomask[i] = 1 - i += 1 - if self.wdaymask[i] == rr._wkst: - break - if no1wkst: - # Check last week number of last year as - # well. If no1wkst is 0, either the year - # started on week start, or week number 1 - # got days from last year, so there are no - # days from last year's last week number in - # this year. - if -1 not in rr._byweekno: - lyearweekday = datetime.date(year-1, 1, 1).weekday() - lno1wkst = (7-lyearweekday+rr._wkst) % 7 - lyearlen = 365+calendar.isleap(year-1) - if lno1wkst >= 4: - lno1wkst = 0 - lnumweeks = 52+(lyearlen + - (lyearweekday-rr._wkst) % 7) % 7//4 - else: - lnumweeks = 52+(self.yearlen-no1wkst) % 7//4 - else: - lnumweeks = -1 - if lnumweeks in rr._byweekno: - for i in range(no1wkst): - self.wnomask[i] = 1 - - if (rr._bynweekday and (month != self.lastmonth or - year != self.lastyear)): - ranges = [] - if rr._freq == YEARLY: - if rr._bymonth: - for month in rr._bymonth: - ranges.append(self.mrange[month-1:month+1]) - else: - ranges = [(0, self.yearlen)] - elif rr._freq == MONTHLY: - ranges = [self.mrange[month-1:month+1]] - if ranges: - # Weekly frequency won't get here, so we may not - # care about cross-year weekly periods. - self.nwdaymask = [0]*self.yearlen - for first, last in ranges: - last -= 1 - for wday, n in rr._bynweekday: - if n < 0: - i = last+(n+1)*7 - i -= (self.wdaymask[i]-wday) % 7 - else: - i = first+(n-1)*7 - i += (7-self.wdaymask[i]+wday) % 7 - if first <= i <= last: - self.nwdaymask[i] = 1 - - if rr._byeaster: - self.eastermask = [0]*(self.yearlen+7) - eyday = easter.easter(year).toordinal()-self.yearordinal - for offset in rr._byeaster: - self.eastermask[eyday+offset] = 1 - - self.lastyear = year - self.lastmonth = month - - def ydayset(self, year, month, day): - return list(range(self.yearlen)), 0, self.yearlen - - def mdayset(self, year, month, day): - dset = [None]*self.yearlen - start, end = self.mrange[month-1:month+1] - for i in range(start, end): - dset[i] = i - return dset, start, end - - def wdayset(self, year, month, day): - # We need to handle cross-year weeks here. - dset = [None]*(self.yearlen+7) - i = datetime.date(year, month, day).toordinal()-self.yearordinal - start = i - for j in range(7): - dset[i] = i - i += 1 - # if (not (0 <= i < self.yearlen) or - # self.wdaymask[i] == self.rrule._wkst): - # This will cross the year boundary, if necessary. - if self.wdaymask[i] == self.rrule._wkst: - break - return dset, start, i - - def ddayset(self, year, month, day): - dset = [None] * self.yearlen - i = datetime.date(year, month, day).toordinal() - self.yearordinal - dset[i] = i - return dset, i, i + 1 - - def htimeset(self, hour, minute, second): - tset = [] - rr = self.rrule - for minute in rr._byminute: - for second in rr._bysecond: - tset.append(datetime.time(hour, minute, second, - tzinfo=rr._tzinfo)) - tset.sort() - return tset - - def mtimeset(self, hour, minute, second): - tset = [] - rr = self.rrule - for second in rr._bysecond: - tset.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo)) - tset.sort() - return tset - - def stimeset(self, hour, minute, second): - return (datetime.time(hour, minute, second, - tzinfo=self.rrule._tzinfo),) - - -class rruleset(rrulebase): - """ The rruleset type allows more complex recurrence setups, mixing - multiple rules, dates, exclusion rules, and exclusion dates. The type - constructor takes the following keyword arguments: - - :param cache: If True, caching of results will be enabled, improving - performance of multiple queries considerably. """ - - class _genitem(object): - def __init__(self, genlist, gen): - try: - self.dt = advance_iterator(gen) - genlist.append(self) - except StopIteration: - pass - self.genlist = genlist - self.gen = gen - - def __next__(self): - try: - self.dt = advance_iterator(self.gen) - except StopIteration: - if self.genlist[0] is self: - heapq.heappop(self.genlist) - else: - self.genlist.remove(self) - heapq.heapify(self.genlist) - - next = __next__ - - def __lt__(self, other): - return self.dt < other.dt - - def __gt__(self, other): - return self.dt > other.dt - - def __eq__(self, other): - return self.dt == other.dt - - def __ne__(self, other): - return self.dt != other.dt - - def __init__(self, cache=False): - super(rruleset, self).__init__(cache) - self._rrule = [] - self._rdate = [] - self._exrule = [] - self._exdate = [] - - @_invalidates_cache - def rrule(self, rrule): - """ Include the given :py:class:`rrule` instance in the recurrence set - generation. """ - self._rrule.append(rrule) - - @_invalidates_cache - def rdate(self, rdate): - """ Include the given :py:class:`datetime` instance in the recurrence - set generation. """ - self._rdate.append(rdate) - - @_invalidates_cache - def exrule(self, exrule): - """ Include the given rrule instance in the recurrence set exclusion - list. Dates which are part of the given recurrence rules will not - be generated, even if some inclusive rrule or rdate matches them. - """ - self._exrule.append(exrule) - - @_invalidates_cache - def exdate(self, exdate): - """ Include the given datetime instance in the recurrence set - exclusion list. Dates included that way will not be generated, - even if some inclusive rrule or rdate matches them. """ - self._exdate.append(exdate) - - def _iter(self): - rlist = [] - self._rdate.sort() - self._genitem(rlist, iter(self._rdate)) - for gen in [iter(x) for x in self._rrule]: - self._genitem(rlist, gen) - exlist = [] - self._exdate.sort() - self._genitem(exlist, iter(self._exdate)) - for gen in [iter(x) for x in self._exrule]: - self._genitem(exlist, gen) - lastdt = None - total = 0 - heapq.heapify(rlist) - heapq.heapify(exlist) - while rlist: - ritem = rlist[0] - if not lastdt or lastdt != ritem.dt: - while exlist and exlist[0] < ritem: - exitem = exlist[0] - advance_iterator(exitem) - if exlist and exlist[0] is exitem: - heapq.heapreplace(exlist, exitem) - if not exlist or ritem != exlist[0]: - total += 1 - yield ritem.dt - lastdt = ritem.dt - advance_iterator(ritem) - if rlist and rlist[0] is ritem: - heapq.heapreplace(rlist, ritem) - self._len = total - - - - -class _rrulestr(object): - """ Parses a string representation of a recurrence rule or set of - recurrence rules. - - :param s: - Required, a string defining one or more recurrence rules. - - :param dtstart: - If given, used as the default recurrence start if not specified in the - rule string. - - :param cache: - If set ``True`` caching of results will be enabled, improving - performance of multiple queries considerably. - - :param unfold: - If set ``True`` indicates that a rule string is split over more - than one line and should be joined before processing. - - :param forceset: - If set ``True`` forces a :class:`dateutil.rrule.rruleset` to - be returned. - - :param compatible: - If set ``True`` forces ``unfold`` and ``forceset`` to be ``True``. - - :param ignoretz: - If set ``True``, time zones in parsed strings are ignored and a naive - :class:`datetime.datetime` object is returned. - - :param tzids: - If given, a callable or mapping used to retrieve a - :class:`datetime.tzinfo` from a string representation. - Defaults to :func:`dateutil.tz.gettz`. - - :param tzinfos: - Additional time zone names / aliases which may be present in a string - representation. See :func:`dateutil.parser.parse` for more - information. - - :return: - Returns a :class:`dateutil.rrule.rruleset` or - :class:`dateutil.rrule.rrule` - """ - - _freq_map = {"YEARLY": YEARLY, - "MONTHLY": MONTHLY, - "WEEKLY": WEEKLY, - "DAILY": DAILY, - "HOURLY": HOURLY, - "MINUTELY": MINUTELY, - "SECONDLY": SECONDLY} - - _weekday_map = {"MO": 0, "TU": 1, "WE": 2, "TH": 3, - "FR": 4, "SA": 5, "SU": 6} - - def _handle_int(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = int(value) - - def _handle_int_list(self, rrkwargs, name, value, **kwargs): - rrkwargs[name.lower()] = [int(x) for x in value.split(',')] - - _handle_INTERVAL = _handle_int - _handle_COUNT = _handle_int - _handle_BYSETPOS = _handle_int_list - _handle_BYMONTH = _handle_int_list - _handle_BYMONTHDAY = _handle_int_list - _handle_BYYEARDAY = _handle_int_list - _handle_BYEASTER = _handle_int_list - _handle_BYWEEKNO = _handle_int_list - _handle_BYHOUR = _handle_int_list - _handle_BYMINUTE = _handle_int_list - _handle_BYSECOND = _handle_int_list - - def _handle_FREQ(self, rrkwargs, name, value, **kwargs): - rrkwargs["freq"] = self._freq_map[value] - - def _handle_UNTIL(self, rrkwargs, name, value, **kwargs): - global parser - if not parser: - from dateutil import parser - try: - rrkwargs["until"] = parser.parse(value, - ignoretz=kwargs.get("ignoretz"), - tzinfos=kwargs.get("tzinfos")) - except ValueError: - raise ValueError("invalid until date") - - def _handle_WKST(self, rrkwargs, name, value, **kwargs): - rrkwargs["wkst"] = self._weekday_map[value] - - def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwargs): - """ - Two ways to specify this: +1MO or MO(+1) - """ - l = [] - for wday in value.split(','): - if '(' in wday: - # If it's of the form TH(+1), etc. - splt = wday.split('(') - w = splt[0] - n = int(splt[1][:-1]) - elif len(wday): - # If it's of the form +1MO - for i in range(len(wday)): - if wday[i] not in '+-0123456789': - break - n = wday[:i] or None - w = wday[i:] - if n: - n = int(n) - else: - raise ValueError("Invalid (empty) BYDAY specification.") - - l.append(weekdays[self._weekday_map[w]](n)) - rrkwargs["byweekday"] = l - - _handle_BYDAY = _handle_BYWEEKDAY - - def _parse_rfc_rrule(self, line, - dtstart=None, - cache=False, - ignoretz=False, - tzinfos=None): - if line.find(':') != -1: - name, value = line.split(':') - if name != "RRULE": - raise ValueError("unknown parameter name") - else: - value = line - rrkwargs = {} - for pair in value.split(';'): - name, value = pair.split('=') - name = name.upper() - value = value.upper() - try: - getattr(self, "_handle_"+name)(rrkwargs, name, value, - ignoretz=ignoretz, - tzinfos=tzinfos) - except AttributeError: - raise ValueError("unknown parameter '%s'" % name) - except (KeyError, ValueError): - raise ValueError("invalid '%s': %s" % (name, value)) - return rrule(dtstart=dtstart, cache=cache, **rrkwargs) - - def _parse_date_value(self, date_value, parms, rule_tzids, - ignoretz, tzids, tzinfos): - global parser - if not parser: - from dateutil import parser - - datevals = [] - value_found = False - TZID = None - - for parm in parms: - if parm.startswith("TZID="): - try: - tzkey = rule_tzids[parm.split('TZID=')[-1]] - except KeyError: - continue - if tzids is None: - from . import tz - tzlookup = tz.gettz - elif callable(tzids): - tzlookup = tzids - else: - tzlookup = getattr(tzids, 'get', None) - if tzlookup is None: - msg = ('tzids must be a callable, mapping, or None, ' - 'not %s' % tzids) - raise ValueError(msg) - - TZID = tzlookup(tzkey) - continue - - # RFC 5445 3.8.2.4: The VALUE parameter is optional, but may be found - # only once. - if parm not in {"VALUE=DATE-TIME", "VALUE=DATE"}: - raise ValueError("unsupported parm: " + parm) - else: - if value_found: - msg = ("Duplicate value parameter found in: " + parm) - raise ValueError(msg) - value_found = True - - for datestr in date_value.split(','): - date = parser.parse(datestr, ignoretz=ignoretz, tzinfos=tzinfos) - if TZID is not None: - if date.tzinfo is None: - date = date.replace(tzinfo=TZID) - else: - raise ValueError('DTSTART/EXDATE specifies multiple timezone') - datevals.append(date) - - return datevals - - def _parse_rfc(self, s, - dtstart=None, - cache=False, - unfold=False, - forceset=False, - compatible=False, - ignoretz=False, - tzids=None, - tzinfos=None): - global parser - if compatible: - forceset = True - unfold = True - - TZID_NAMES = dict(map( - lambda x: (x.upper(), x), - re.findall('TZID=(?P[^:]+):', s) - )) - s = s.upper() - if not s.strip(): - raise ValueError("empty string") - if unfold: - lines = s.splitlines() - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - else: - lines = s.split() - if (not forceset and len(lines) == 1 and (s.find(':') == -1 or - s.startswith('RRULE:'))): - return self._parse_rfc_rrule(lines[0], cache=cache, - dtstart=dtstart, ignoretz=ignoretz, - tzinfos=tzinfos) - else: - rrulevals = [] - rdatevals = [] - exrulevals = [] - exdatevals = [] - for line in lines: - if not line: - continue - if line.find(':') == -1: - name = "RRULE" - value = line - else: - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0] - parms = parms[1:] - if name == "RRULE": - for parm in parms: - raise ValueError("unsupported RRULE parm: "+parm) - rrulevals.append(value) - elif name == "RDATE": - for parm in parms: - if parm != "VALUE=DATE-TIME": - raise ValueError("unsupported RDATE parm: "+parm) - rdatevals.append(value) - elif name == "EXRULE": - for parm in parms: - raise ValueError("unsupported EXRULE parm: "+parm) - exrulevals.append(value) - elif name == "EXDATE": - exdatevals.extend( - self._parse_date_value(value, parms, - TZID_NAMES, ignoretz, - tzids, tzinfos) - ) - elif name == "DTSTART": - dtvals = self._parse_date_value(value, parms, TZID_NAMES, - ignoretz, tzids, tzinfos) - if len(dtvals) != 1: - raise ValueError("Multiple DTSTART values specified:" + - value) - dtstart = dtvals[0] - else: - raise ValueError("unsupported property: "+name) - if (forceset or len(rrulevals) > 1 or rdatevals - or exrulevals or exdatevals): - if not parser and (rdatevals or exdatevals): - from dateutil import parser - rset = rruleset(cache=cache) - for value in rrulevals: - rset.rrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in rdatevals: - for datestr in value.split(','): - rset.rdate(parser.parse(datestr, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exrulevals: - rset.exrule(self._parse_rfc_rrule(value, dtstart=dtstart, - ignoretz=ignoretz, - tzinfos=tzinfos)) - for value in exdatevals: - rset.exdate(value) - if compatible and dtstart: - rset.rdate(dtstart) - return rset - else: - return self._parse_rfc_rrule(rrulevals[0], - dtstart=dtstart, - cache=cache, - ignoretz=ignoretz, - tzinfos=tzinfos) - - def __call__(self, s, **kwargs): - return self._parse_rfc(s, **kwargs) - - -rrulestr = _rrulestr() - -# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/__init__.py b/venv/lib/python3.7/site-packages/dateutil/tz/__init__.py deleted file mode 100644 index af1352c..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/tz/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# -*- coding: utf-8 -*- -from .tz import * -from .tz import __doc__ - -__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange", - "tzstr", "tzical", "tzwin", "tzwinlocal", "gettz", - "enfold", "datetime_ambiguous", "datetime_exists", - "resolve_imaginary", "UTC", "DeprecatedTzFormatWarning"] - - -class DeprecatedTzFormatWarning(Warning): - """Warning raised when time zones are parsed from deprecated formats.""" diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 764c429..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/_common.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/_common.cpython-37.pyc deleted file mode 100644 index b90d0d9..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/_common.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/_factories.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/_factories.cpython-37.pyc deleted file mode 100644 index 717044f..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/_factories.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/tz.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/tz.cpython-37.pyc deleted file mode 100644 index b4e8335..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/tz.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/win.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/win.cpython-37.pyc deleted file mode 100644 index 1c7ac4b..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/tz/__pycache__/win.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/_common.py b/venv/lib/python3.7/site-packages/dateutil/tz/_common.py deleted file mode 100644 index e6ac118..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/tz/_common.py +++ /dev/null @@ -1,419 +0,0 @@ -from six import PY2 - -from functools import wraps - -from datetime import datetime, timedelta, tzinfo - - -ZERO = timedelta(0) - -__all__ = ['tzname_in_python2', 'enfold'] - - -def tzname_in_python2(namefunc): - """Change unicode output into bytestrings in Python 2 - - tzname() API changed in Python 3. It used to return bytes, but was changed - to unicode strings - """ - if PY2: - @wraps(namefunc) - def adjust_encoding(*args, **kwargs): - name = namefunc(*args, **kwargs) - if name is not None: - name = name.encode() - - return name - - return adjust_encoding - else: - return namefunc - - -# The following is adapted from Alexander Belopolsky's tz library -# https://github.com/abalkin/tz -if hasattr(datetime, 'fold'): - # This is the pre-python 3.6 fold situation - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - return dt.replace(fold=fold) - -else: - class _DatetimeWithFold(datetime): - """ - This is a class designed to provide a PEP 495-compliant interface for - Python versions before 3.6. It is used only for dates in a fold, so - the ``fold`` attribute is fixed at ``1``. - - .. versionadded:: 2.6.0 - """ - __slots__ = () - - def replace(self, *args, **kwargs): - """ - Return a datetime with the same attributes, except for those - attributes given new values by whichever keyword arguments are - specified. Note that tzinfo=None can be specified to create a naive - datetime from an aware datetime with no conversion of date and time - data. - - This is reimplemented in ``_DatetimeWithFold`` because pypy3 will - return a ``datetime.datetime`` even if ``fold`` is unchanged. - """ - argnames = ( - 'year', 'month', 'day', 'hour', 'minute', 'second', - 'microsecond', 'tzinfo' - ) - - for arg, argname in zip(args, argnames): - if argname in kwargs: - raise TypeError('Duplicate argument: {}'.format(argname)) - - kwargs[argname] = arg - - for argname in argnames: - if argname not in kwargs: - kwargs[argname] = getattr(self, argname) - - dt_class = self.__class__ if kwargs.get('fold', 1) else datetime - - return dt_class(**kwargs) - - @property - def fold(self): - return 1 - - def enfold(dt, fold=1): - """ - Provides a unified interface for assigning the ``fold`` attribute to - datetimes both before and after the implementation of PEP-495. - - :param fold: - The value for the ``fold`` attribute in the returned datetime. This - should be either 0 or 1. - - :return: - Returns an object for which ``getattr(dt, 'fold', 0)`` returns - ``fold`` for all versions of Python. In versions prior to - Python 3.6, this is a ``_DatetimeWithFold`` object, which is a - subclass of :py:class:`datetime.datetime` with the ``fold`` - attribute added, if ``fold`` is 1. - - .. versionadded:: 2.6.0 - """ - if getattr(dt, 'fold', 0) == fold: - return dt - - args = dt.timetuple()[:6] - args += (dt.microsecond, dt.tzinfo) - - if fold: - return _DatetimeWithFold(*args) - else: - return datetime(*args) - - -def _validate_fromutc_inputs(f): - """ - The CPython version of ``fromutc`` checks that the input is a ``datetime`` - object and that ``self`` is attached as its ``tzinfo``. - """ - @wraps(f) - def fromutc(self, dt): - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - return f(self, dt) - - return fromutc - - -class _tzinfo(tzinfo): - """ - Base class for all ``dateutil`` ``tzinfo`` objects. - """ - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - - dt = dt.replace(tzinfo=self) - - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dt = wall_0.replace(tzinfo=None) == wall_1.replace(tzinfo=None) - - return same_dt and not same_offset - - def _fold_status(self, dt_utc, dt_wall): - """ - Determine the fold status of a "wall" datetime, given a representation - of the same datetime as a (naive) UTC datetime. This is calculated based - on the assumption that ``dt.utcoffset() - dt.dst()`` is constant for all - datetimes, and that this offset is the actual number of hours separating - ``dt_utc`` and ``dt_wall``. - - :param dt_utc: - Representation of the datetime as UTC - - :param dt_wall: - Representation of the datetime as "wall time". This parameter must - either have a `fold` attribute or have a fold-naive - :class:`datetime.tzinfo` attached, otherwise the calculation may - fail. - """ - if self.is_ambiguous(dt_wall): - delta_wall = dt_wall - dt_utc - _fold = int(delta_wall == (dt_utc.utcoffset() - dt_utc.dst())) - else: - _fold = 0 - - return _fold - - def _fold(self, dt): - return getattr(dt, 'fold', 0) - - def _fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - - # Re-implement the algorithm from Python's datetime.py - dtoff = dt.utcoffset() - if dtoff is None: - raise ValueError("fromutc() requires a non-None utcoffset() " - "result") - - # The original datetime.py code assumes that `dst()` defaults to - # zero during ambiguous times. PEP 495 inverts this presumption, so - # for pre-PEP 495 versions of python, we need to tweak the algorithm. - dtdst = dt.dst() - if dtdst is None: - raise ValueError("fromutc() requires a non-None dst() result") - delta = dtoff - dtdst - - dt += delta - # Set fold=1 so we can default to being in the fold for - # ambiguous dates. - dtdst = enfold(dt, fold=1).dst() - if dtdst is None: - raise ValueError("fromutc(): dt.dst gave inconsistent " - "results; cannot convert") - return dt + dtdst - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Given a timezone-aware datetime in a given timezone, calculates a - timezone-aware datetime in a new timezone. - - Since this is the one time that we *know* we have an unambiguous - datetime object, we take this opportunity to determine whether the - datetime is ambiguous and in a "fold" state (e.g. if it's the first - occurrence, chronologically, of the ambiguous datetime). - - :param dt: - A timezone-aware :class:`datetime.datetime` object. - """ - dt_wall = self._fromutc(dt) - - # Calculate the fold status given the two datetimes. - _fold = self._fold_status(dt, dt_wall) - - # Set the default fold value for ambiguous dates - return enfold(dt_wall, fold=_fold) - - -class tzrangebase(_tzinfo): - """ - This is an abstract base class for time zones represented by an annual - transition into and out of DST. Child classes should implement the following - methods: - - * ``__init__(self, *args, **kwargs)`` - * ``transitions(self, year)`` - this is expected to return a tuple of - datetimes representing the DST on and off transitions in standard - time. - - A fully initialized ``tzrangebase`` subclass should also provide the - following attributes: - * ``hasdst``: Boolean whether or not the zone uses DST. - * ``_dst_offset`` / ``_std_offset``: :class:`datetime.timedelta` objects - representing the respective UTC offsets. - * ``_dst_abbr`` / ``_std_abbr``: Strings representing the timezone short - abbreviations in DST and STD, respectively. - * ``_hasdst``: Whether or not the zone has DST. - - .. versionadded:: 2.6.0 - """ - def __init__(self): - raise NotImplementedError('tzrangebase is an abstract base class') - - def utcoffset(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - isdst = self._isdst(dt) - - if isdst is None: - return None - elif isdst: - return self._dst_base_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - if self._isdst(dt): - return self._dst_abbr - else: - return self._std_abbr - - def fromutc(self, dt): - """ Given a datetime in UTC, return local time """ - if not isinstance(dt, datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # Get transitions - if there are none, fixed offset - transitions = self.transitions(dt.year) - if transitions is None: - return dt + self.utcoffset(dt) - - # Get the transition times in UTC - dston, dstoff = transitions - - dston -= self._std_offset - dstoff -= self._std_offset - - utc_transitions = (dston, dstoff) - dt_utc = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt_utc, utc_transitions) - - if isdst: - dt_wall = dt + self._dst_offset - else: - dt_wall = dt + self._std_offset - - _fold = int(not isdst and self.is_ambiguous(dt_wall)) - - return enfold(dt_wall, fold=_fold) - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if not self.hasdst: - return False - - start, end = self.transitions(dt.year) - - dt = dt.replace(tzinfo=None) - return (end <= dt < end + self._dst_base_offset) - - def _isdst(self, dt): - if not self.hasdst: - return False - elif dt is None: - return None - - transitions = self.transitions(dt.year) - - if transitions is None: - return False - - dt = dt.replace(tzinfo=None) - - isdst = self._naive_isdst(dt, transitions) - - # Handle ambiguous dates - if not isdst and self.is_ambiguous(dt): - return not self._fold(dt) - else: - return isdst - - def _naive_isdst(self, dt, transitions): - dston, dstoff = transitions - - dt = dt.replace(tzinfo=None) - - if dston < dstoff: - isdst = dston <= dt < dstoff - else: - isdst = not dstoff <= dt < dston - - return isdst - - @property - def _dst_base_offset(self): - return self._dst_offset - self._std_offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(...)" % self.__class__.__name__ - - __reduce__ = object.__reduce__ diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/_factories.py b/venv/lib/python3.7/site-packages/dateutil/tz/_factories.py deleted file mode 100644 index f8a6589..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/tz/_factories.py +++ /dev/null @@ -1,80 +0,0 @@ -from datetime import timedelta -import weakref -from collections import OrderedDict - -from six.moves import _thread - - -class _TzSingleton(type): - def __init__(cls, *args, **kwargs): - cls.__instance = None - super(_TzSingleton, cls).__init__(*args, **kwargs) - - def __call__(cls): - if cls.__instance is None: - cls.__instance = super(_TzSingleton, cls).__call__() - return cls.__instance - - -class _TzFactory(type): - def instance(cls, *args, **kwargs): - """Alternate constructor that returns a fresh instance""" - return type.__call__(cls, *args, **kwargs) - - -class _TzOffsetFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - cls._cache_lock = _thread.allocate_lock() - - def __call__(cls, name, offset): - if isinstance(offset, timedelta): - key = (name, offset.total_seconds()) - else: - key = (name, offset) - - instance = cls.__instances.get(key, None) - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(name, offset)) - - # This lock may not be necessary in Python 3. See GH issue #901 - with cls._cache_lock: - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - - -class _TzStrFactory(_TzFactory): - def __init__(cls, *args, **kwargs): - cls.__instances = weakref.WeakValueDictionary() - cls.__strong_cache = OrderedDict() - cls.__strong_cache_size = 8 - - cls.__cache_lock = _thread.allocate_lock() - - def __call__(cls, s, posix_offset=False): - key = (s, posix_offset) - instance = cls.__instances.get(key, None) - - if instance is None: - instance = cls.__instances.setdefault(key, - cls.instance(s, posix_offset)) - - # This lock may not be necessary in Python 3. See GH issue #901 - with cls.__cache_lock: - cls.__strong_cache[key] = cls.__strong_cache.pop(key, instance) - - # Remove an item if the strong cache is overpopulated - if len(cls.__strong_cache) > cls.__strong_cache_size: - cls.__strong_cache.popitem(last=False) - - return instance - diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/tz.py b/venv/lib/python3.7/site-packages/dateutil/tz/tz.py deleted file mode 100644 index af81e88..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/tz/tz.py +++ /dev/null @@ -1,1849 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers timezone implementations subclassing the abstract -:py:class:`datetime.tzinfo` type. There are classes to handle tzfile format -files (usually are in :file:`/etc/localtime`, :file:`/usr/share/zoneinfo`, -etc), TZ environment string (in all known formats), given ranges (with help -from relative deltas), local machine timezone, fixed offset timezone, and UTC -timezone. -""" -import datetime -import struct -import time -import sys -import os -import bisect -import weakref -from collections import OrderedDict - -import six -from six import string_types -from six.moves import _thread -from ._common import tzname_in_python2, _tzinfo -from ._common import tzrangebase, enfold -from ._common import _validate_fromutc_inputs - -from ._factories import _TzSingleton, _TzOffsetFactory -from ._factories import _TzStrFactory -try: - from .win import tzwin, tzwinlocal -except ImportError: - tzwin = tzwinlocal = None - -# For warning about rounding tzinfo -from warnings import warn - -ZERO = datetime.timedelta(0) -EPOCH = datetime.datetime.utcfromtimestamp(0) -EPOCHORDINAL = EPOCH.toordinal() - - -@six.add_metaclass(_TzSingleton) -class tzutc(datetime.tzinfo): - """ - This is a tzinfo object that represents the UTC time zone. - - **Examples:** - - .. doctest:: - - >>> from datetime import * - >>> from dateutil.tz import * - - >>> datetime.now() - datetime.datetime(2003, 9, 27, 9, 40, 1, 521290) - - >>> datetime.now(tzutc()) - datetime.datetime(2003, 9, 27, 12, 40, 12, 156379, tzinfo=tzutc()) - - >>> datetime.now(tzutc()).tzname() - 'UTC' - - .. versionchanged:: 2.7.0 - ``tzutc()`` is now a singleton, so the result of ``tzutc()`` will - always return the same object. - - .. doctest:: - - >>> from dateutil.tz import tzutc, UTC - >>> tzutc() is tzutc() - True - >>> tzutc() is UTC - True - """ - def utcoffset(self, dt): - return ZERO - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return "UTC" - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - return False - - @_validate_fromutc_inputs - def fromutc(self, dt): - """ - Fast track version of fromutc() returns the original ``dt`` object for - any valid :py:class:`datetime.datetime` object. - """ - return dt - - def __eq__(self, other): - if not isinstance(other, (tzutc, tzoffset)): - return NotImplemented - - return (isinstance(other, tzutc) or - (isinstance(other, tzoffset) and other._offset == ZERO)) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - - -#: Convenience constant providing a :class:`tzutc()` instance -#: -#: .. versionadded:: 2.7.0 -UTC = tzutc() - - -@six.add_metaclass(_TzOffsetFactory) -class tzoffset(datetime.tzinfo): - """ - A simple class for representing a fixed offset from UTC. - - :param name: - The timezone name, to be returned when ``tzname()`` is called. - :param offset: - The time zone offset in seconds, or (since version 2.6.0, represented - as a :py:class:`datetime.timedelta` object). - """ - def __init__(self, name, offset): - self._name = name - - try: - # Allow a timedelta - offset = offset.total_seconds() - except (TypeError, AttributeError): - pass - - self._offset = datetime.timedelta(seconds=_get_supported_offset(offset)) - - def utcoffset(self, dt): - return self._offset - - def dst(self, dt): - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._name - - @_validate_fromutc_inputs - def fromutc(self, dt): - return dt + self._offset - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - return False - - def __eq__(self, other): - if not isinstance(other, tzoffset): - return NotImplemented - - return self._offset == other._offset - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(%s, %s)" % (self.__class__.__name__, - repr(self._name), - int(self._offset.total_seconds())) - - __reduce__ = object.__reduce__ - - -class tzlocal(_tzinfo): - """ - A :class:`tzinfo` subclass built around the ``time`` timezone functions. - """ - def __init__(self): - super(tzlocal, self).__init__() - - self._std_offset = datetime.timedelta(seconds=-time.timezone) - if time.daylight: - self._dst_offset = datetime.timedelta(seconds=-time.altzone) - else: - self._dst_offset = self._std_offset - - self._dst_saved = self._dst_offset - self._std_offset - self._hasdst = bool(self._dst_saved) - self._tznames = tuple(time.tzname) - - def utcoffset(self, dt): - if dt is None and self._hasdst: - return None - - if self._isdst(dt): - return self._dst_offset - else: - return self._std_offset - - def dst(self, dt): - if dt is None and self._hasdst: - return None - - if self._isdst(dt): - return self._dst_offset - self._std_offset - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._tznames[self._isdst(dt)] - - def is_ambiguous(self, dt): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - naive_dst = self._naive_is_dst(dt) - return (not naive_dst and - (naive_dst != self._naive_is_dst(dt - self._dst_saved))) - - def _naive_is_dst(self, dt): - timestamp = _datetime_to_timestamp(dt) - return time.localtime(timestamp + time.timezone).tm_isdst - - def _isdst(self, dt, fold_naive=True): - # We can't use mktime here. It is unstable when deciding if - # the hour near to a change is DST or not. - # - # timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour, - # dt.minute, dt.second, dt.weekday(), 0, -1)) - # return time.localtime(timestamp).tm_isdst - # - # The code above yields the following result: - # - # >>> import tz, datetime - # >>> t = tz.tzlocal() - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRDT' - # >>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname() - # 'BRST' - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRST' - # >>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname() - # 'BRDT' - # >>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname() - # 'BRDT' - # - # Here is a more stable implementation: - # - if not self._hasdst: - return False - - # Check for ambiguous times: - dstval = self._naive_is_dst(dt) - fold = getattr(dt, 'fold', None) - - if self.is_ambiguous(dt): - if fold is not None: - return not self._fold(dt) - else: - return True - - return dstval - - def __eq__(self, other): - if isinstance(other, tzlocal): - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset) - elif isinstance(other, tzutc): - return (not self._hasdst and - self._tznames[0] in {'UTC', 'GMT'} and - self._std_offset == ZERO) - elif isinstance(other, tzoffset): - return (not self._hasdst and - self._tznames[0] == other._name and - self._std_offset == other._offset) - else: - return NotImplemented - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s()" % self.__class__.__name__ - - __reduce__ = object.__reduce__ - - -class _ttinfo(object): - __slots__ = ["offset", "delta", "isdst", "abbr", - "isstd", "isgmt", "dstoffset"] - - def __init__(self): - for attr in self.__slots__: - setattr(self, attr, None) - - def __repr__(self): - l = [] - for attr in self.__slots__: - value = getattr(self, attr) - if value is not None: - l.append("%s=%s" % (attr, repr(value))) - return "%s(%s)" % (self.__class__.__name__, ", ".join(l)) - - def __eq__(self, other): - if not isinstance(other, _ttinfo): - return NotImplemented - - return (self.offset == other.offset and - self.delta == other.delta and - self.isdst == other.isdst and - self.abbr == other.abbr and - self.isstd == other.isstd and - self.isgmt == other.isgmt and - self.dstoffset == other.dstoffset) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __getstate__(self): - state = {} - for name in self.__slots__: - state[name] = getattr(self, name, None) - return state - - def __setstate__(self, state): - for name in self.__slots__: - if name in state: - setattr(self, name, state[name]) - - -class _tzfile(object): - """ - Lightweight class for holding the relevant transition and time zone - information read from binary tzfiles. - """ - attrs = ['trans_list', 'trans_list_utc', 'trans_idx', 'ttinfo_list', - 'ttinfo_std', 'ttinfo_dst', 'ttinfo_before', 'ttinfo_first'] - - def __init__(self, **kwargs): - for attr in self.attrs: - setattr(self, attr, kwargs.get(attr, None)) - - -class tzfile(_tzinfo): - """ - This is a ``tzinfo`` subclass that allows one to use the ``tzfile(5)`` - format timezone files to extract current and historical zone information. - - :param fileobj: - This can be an opened file stream or a file name that the time zone - information can be read from. - - :param filename: - This is an optional parameter specifying the source of the time zone - information in the event that ``fileobj`` is a file object. If omitted - and ``fileobj`` is a file stream, this parameter will be set either to - ``fileobj``'s ``name`` attribute or to ``repr(fileobj)``. - - See `Sources for Time Zone and Daylight Saving Time Data - `_ for more information. - Time zone files can be compiled from the `IANA Time Zone database files - `_ with the `zic time zone compiler - `_ - - .. note:: - - Only construct a ``tzfile`` directly if you have a specific timezone - file on disk that you want to read into a Python ``tzinfo`` object. - If you want to get a ``tzfile`` representing a specific IANA zone, - (e.g. ``'America/New_York'``), you should call - :func:`dateutil.tz.gettz` with the zone identifier. - - - **Examples:** - - Using the US Eastern time zone as an example, we can see that a ``tzfile`` - provides time zone information for the standard Daylight Saving offsets: - - .. testsetup:: tzfile - - from dateutil.tz import gettz - from datetime import datetime - - .. doctest:: tzfile - - >>> NYC = gettz('America/New_York') - >>> NYC - tzfile('/usr/share/zoneinfo/America/New_York') - - >>> print(datetime(2016, 1, 3, tzinfo=NYC)) # EST - 2016-01-03 00:00:00-05:00 - - >>> print(datetime(2016, 7, 7, tzinfo=NYC)) # EDT - 2016-07-07 00:00:00-04:00 - - - The ``tzfile`` structure contains a fully history of the time zone, - so historical dates will also have the right offsets. For example, before - the adoption of the UTC standards, New York used local solar mean time: - - .. doctest:: tzfile - - >>> print(datetime(1901, 4, 12, tzinfo=NYC)) # LMT - 1901-04-12 00:00:00-04:56 - - And during World War II, New York was on "Eastern War Time", which was a - state of permanent daylight saving time: - - .. doctest:: tzfile - - >>> print(datetime(1944, 2, 7, tzinfo=NYC)) # EWT - 1944-02-07 00:00:00-04:00 - - """ - - def __init__(self, fileobj, filename=None): - super(tzfile, self).__init__() - - file_opened_here = False - if isinstance(fileobj, string_types): - self._filename = fileobj - fileobj = open(fileobj, 'rb') - file_opened_here = True - elif filename is not None: - self._filename = filename - elif hasattr(fileobj, "name"): - self._filename = fileobj.name - else: - self._filename = repr(fileobj) - - if fileobj is not None: - if not file_opened_here: - fileobj = _nullcontext(fileobj) - - with fileobj as file_stream: - tzobj = self._read_tzfile(file_stream) - - self._set_tzdata(tzobj) - - def _set_tzdata(self, tzobj): - """ Set the time zone data of this object from a _tzfile object """ - # Copy the relevant attributes over as private attributes - for attr in _tzfile.attrs: - setattr(self, '_' + attr, getattr(tzobj, attr)) - - def _read_tzfile(self, fileobj): - out = _tzfile() - - # From tzfile(5): - # - # The time zone information files used by tzset(3) - # begin with the magic characters "TZif" to identify - # them as time zone information files, followed by - # sixteen bytes reserved for future use, followed by - # six four-byte values of type long, written in a - # ``standard'' byte order (the high-order byte - # of the value is written first). - if fileobj.read(4).decode() != "TZif": - raise ValueError("magic not found") - - fileobj.read(16) - - ( - # The number of UTC/local indicators stored in the file. - ttisgmtcnt, - - # The number of standard/wall indicators stored in the file. - ttisstdcnt, - - # The number of leap seconds for which data is - # stored in the file. - leapcnt, - - # The number of "transition times" for which data - # is stored in the file. - timecnt, - - # The number of "local time types" for which data - # is stored in the file (must not be zero). - typecnt, - - # The number of characters of "time zone - # abbreviation strings" stored in the file. - charcnt, - - ) = struct.unpack(">6l", fileobj.read(24)) - - # The above header is followed by tzh_timecnt four-byte - # values of type long, sorted in ascending order. - # These values are written in ``standard'' byte order. - # Each is used as a transition time (as returned by - # time(2)) at which the rules for computing local time - # change. - - if timecnt: - out.trans_list_utc = list(struct.unpack(">%dl" % timecnt, - fileobj.read(timecnt*4))) - else: - out.trans_list_utc = [] - - # Next come tzh_timecnt one-byte values of type unsigned - # char; each one tells which of the different types of - # ``local time'' types described in the file is associated - # with the same-indexed transition time. These values - # serve as indices into an array of ttinfo structures that - # appears next in the file. - - if timecnt: - out.trans_idx = struct.unpack(">%dB" % timecnt, - fileobj.read(timecnt)) - else: - out.trans_idx = [] - - # Each ttinfo structure is written as a four-byte value - # for tt_gmtoff of type long, in a standard byte - # order, followed by a one-byte value for tt_isdst - # and a one-byte value for tt_abbrind. In each - # structure, tt_gmtoff gives the number of - # seconds to be added to UTC, tt_isdst tells whether - # tm_isdst should be set by localtime(3), and - # tt_abbrind serves as an index into the array of - # time zone abbreviation characters that follow the - # ttinfo structure(s) in the file. - - ttinfo = [] - - for i in range(typecnt): - ttinfo.append(struct.unpack(">lbb", fileobj.read(6))) - - abbr = fileobj.read(charcnt).decode() - - # Then there are tzh_leapcnt pairs of four-byte - # values, written in standard byte order; the - # first value of each pair gives the time (as - # returned by time(2)) at which a leap second - # occurs; the second gives the total number of - # leap seconds to be applied after the given time. - # The pairs of values are sorted in ascending order - # by time. - - # Not used, for now (but seek for correct file position) - if leapcnt: - fileobj.seek(leapcnt * 8, os.SEEK_CUR) - - # Then there are tzh_ttisstdcnt standard/wall - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as standard - # time or wall clock time, and are used when - # a time zone file is used in handling POSIX-style - # time zone environment variables. - - if ttisstdcnt: - isstd = struct.unpack(">%db" % ttisstdcnt, - fileobj.read(ttisstdcnt)) - - # Finally, there are tzh_ttisgmtcnt UTC/local - # indicators, each stored as a one-byte value; - # they tell whether the transition times associated - # with local time types were specified as UTC or - # local time, and are used when a time zone file - # is used in handling POSIX-style time zone envi- - # ronment variables. - - if ttisgmtcnt: - isgmt = struct.unpack(">%db" % ttisgmtcnt, - fileobj.read(ttisgmtcnt)) - - # Build ttinfo list - out.ttinfo_list = [] - for i in range(typecnt): - gmtoff, isdst, abbrind = ttinfo[i] - gmtoff = _get_supported_offset(gmtoff) - tti = _ttinfo() - tti.offset = gmtoff - tti.dstoffset = datetime.timedelta(0) - tti.delta = datetime.timedelta(seconds=gmtoff) - tti.isdst = isdst - tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)] - tti.isstd = (ttisstdcnt > i and isstd[i] != 0) - tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0) - out.ttinfo_list.append(tti) - - # Replace ttinfo indexes for ttinfo objects. - out.trans_idx = [out.ttinfo_list[idx] for idx in out.trans_idx] - - # Set standard, dst, and before ttinfos. before will be - # used when a given time is before any transitions, - # and will be set to the first non-dst ttinfo, or to - # the first dst, if all of them are dst. - out.ttinfo_std = None - out.ttinfo_dst = None - out.ttinfo_before = None - if out.ttinfo_list: - if not out.trans_list_utc: - out.ttinfo_std = out.ttinfo_first = out.ttinfo_list[0] - else: - for i in range(timecnt-1, -1, -1): - tti = out.trans_idx[i] - if not out.ttinfo_std and not tti.isdst: - out.ttinfo_std = tti - elif not out.ttinfo_dst and tti.isdst: - out.ttinfo_dst = tti - - if out.ttinfo_std and out.ttinfo_dst: - break - else: - if out.ttinfo_dst and not out.ttinfo_std: - out.ttinfo_std = out.ttinfo_dst - - for tti in out.ttinfo_list: - if not tti.isdst: - out.ttinfo_before = tti - break - else: - out.ttinfo_before = out.ttinfo_list[0] - - # Now fix transition times to become relative to wall time. - # - # I'm not sure about this. In my tests, the tz source file - # is setup to wall time, and in the binary file isstd and - # isgmt are off, so it should be in wall time. OTOH, it's - # always in gmt time. Let me know if you have comments - # about this. - lastdst = None - lastoffset = None - lastdstoffset = None - lastbaseoffset = None - out.trans_list = [] - - for i, tti in enumerate(out.trans_idx): - offset = tti.offset - dstoffset = 0 - - if lastdst is not None: - if tti.isdst: - if not lastdst: - dstoffset = offset - lastoffset - - if not dstoffset and lastdstoffset: - dstoffset = lastdstoffset - - tti.dstoffset = datetime.timedelta(seconds=dstoffset) - lastdstoffset = dstoffset - - # If a time zone changes its base offset during a DST transition, - # then you need to adjust by the previous base offset to get the - # transition time in local time. Otherwise you use the current - # base offset. Ideally, I would have some mathematical proof of - # why this is true, but I haven't really thought about it enough. - baseoffset = offset - dstoffset - adjustment = baseoffset - if (lastbaseoffset is not None and baseoffset != lastbaseoffset - and tti.isdst != lastdst): - # The base DST has changed - adjustment = lastbaseoffset - - lastdst = tti.isdst - lastoffset = offset - lastbaseoffset = baseoffset - - out.trans_list.append(out.trans_list_utc[i] + adjustment) - - out.trans_idx = tuple(out.trans_idx) - out.trans_list = tuple(out.trans_list) - out.trans_list_utc = tuple(out.trans_list_utc) - - return out - - def _find_last_transition(self, dt, in_utc=False): - # If there's no list, there are no transitions to find - if not self._trans_list: - return None - - timestamp = _datetime_to_timestamp(dt) - - # Find where the timestamp fits in the transition list - if the - # timestamp is a transition time, it's part of the "after" period. - trans_list = self._trans_list_utc if in_utc else self._trans_list - idx = bisect.bisect_right(trans_list, timestamp) - - # We want to know when the previous transition was, so subtract off 1 - return idx - 1 - - def _get_ttinfo(self, idx): - # For no list or after the last transition, default to _ttinfo_std - if idx is None or (idx + 1) >= len(self._trans_list): - return self._ttinfo_std - - # If there is a list and the time is before it, return _ttinfo_before - if idx < 0: - return self._ttinfo_before - - return self._trans_idx[idx] - - def _find_ttinfo(self, dt): - idx = self._resolve_ambiguous_time(dt) - - return self._get_ttinfo(idx) - - def fromutc(self, dt): - """ - The ``tzfile`` implementation of :py:func:`datetime.tzinfo.fromutc`. - - :param dt: - A :py:class:`datetime.datetime` object. - - :raises TypeError: - Raised if ``dt`` is not a :py:class:`datetime.datetime` object. - - :raises ValueError: - Raised if this is called with a ``dt`` which does not have this - ``tzinfo`` attached. - - :return: - Returns a :py:class:`datetime.datetime` object representing the - wall time in ``self``'s time zone. - """ - # These isinstance checks are in datetime.tzinfo, so we'll preserve - # them, even if we don't care about duck typing. - if not isinstance(dt, datetime.datetime): - raise TypeError("fromutc() requires a datetime argument") - - if dt.tzinfo is not self: - raise ValueError("dt.tzinfo is not self") - - # First treat UTC as wall time and get the transition we're in. - idx = self._find_last_transition(dt, in_utc=True) - tti = self._get_ttinfo(idx) - - dt_out = dt + datetime.timedelta(seconds=tti.offset) - - fold = self.is_ambiguous(dt_out, idx=idx) - - return enfold(dt_out, fold=int(fold)) - - def is_ambiguous(self, dt, idx=None): - """ - Whether or not the "wall time" of a given datetime is ambiguous in this - zone. - - :param dt: - A :py:class:`datetime.datetime`, naive or time zone aware. - - - :return: - Returns ``True`` if ambiguous, ``False`` otherwise. - - .. versionadded:: 2.6.0 - """ - if idx is None: - idx = self._find_last_transition(dt) - - # Calculate the difference in offsets from current to previous - timestamp = _datetime_to_timestamp(dt) - tti = self._get_ttinfo(idx) - - if idx is None or idx <= 0: - return False - - od = self._get_ttinfo(idx - 1).offset - tti.offset - tt = self._trans_list[idx] # Transition time - - return timestamp < tt + od - - def _resolve_ambiguous_time(self, dt): - idx = self._find_last_transition(dt) - - # If we have no transitions, return the index - _fold = self._fold(dt) - if idx is None or idx == 0: - return idx - - # If it's ambiguous and we're in a fold, shift to a different index. - idx_offset = int(not _fold and self.is_ambiguous(dt, idx)) - - return idx - idx_offset - - def utcoffset(self, dt): - if dt is None: - return None - - if not self._ttinfo_std: - return ZERO - - return self._find_ttinfo(dt).delta - - def dst(self, dt): - if dt is None: - return None - - if not self._ttinfo_dst: - return ZERO - - tti = self._find_ttinfo(dt) - - if not tti.isdst: - return ZERO - - # The documentation says that utcoffset()-dst() must - # be constant for every dt. - return tti.dstoffset - - @tzname_in_python2 - def tzname(self, dt): - if not self._ttinfo_std or dt is None: - return None - return self._find_ttinfo(dt).abbr - - def __eq__(self, other): - if not isinstance(other, tzfile): - return NotImplemented - return (self._trans_list == other._trans_list and - self._trans_idx == other._trans_idx and - self._ttinfo_list == other._ttinfo_list) - - __hash__ = None - - def __ne__(self, other): - return not (self == other) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._filename)) - - def __reduce__(self): - return self.__reduce_ex__(None) - - def __reduce_ex__(self, protocol): - return (self.__class__, (None, self._filename), self.__dict__) - - -class tzrange(tzrangebase): - """ - The ``tzrange`` object is a time zone specified by a set of offsets and - abbreviations, equivalent to the way the ``TZ`` variable can be specified - in POSIX-like systems, but using Python delta objects to specify DST - start, end and offsets. - - :param stdabbr: - The abbreviation for standard time (e.g. ``'EST'``). - - :param stdoffset: - An integer or :class:`datetime.timedelta` object or equivalent - specifying the base offset from UTC. - - If unspecified, +00:00 is used. - - :param dstabbr: - The abbreviation for DST / "Summer" time (e.g. ``'EDT'``). - - If specified, with no other DST information, DST is assumed to occur - and the default behavior or ``dstoffset``, ``start`` and ``end`` is - used. If unspecified and no other DST information is specified, it - is assumed that this zone has no DST. - - If this is unspecified and other DST information is *is* specified, - DST occurs in the zone but the time zone abbreviation is left - unchanged. - - :param dstoffset: - A an integer or :class:`datetime.timedelta` object or equivalent - specifying the UTC offset during DST. If unspecified and any other DST - information is specified, it is assumed to be the STD offset +1 hour. - - :param start: - A :class:`relativedelta.relativedelta` object or equivalent specifying - the time and time of year that daylight savings time starts. To - specify, for example, that DST starts at 2AM on the 2nd Sunday in - March, pass: - - ``relativedelta(hours=2, month=3, day=1, weekday=SU(+2))`` - - If unspecified and any other DST information is specified, the default - value is 2 AM on the first Sunday in April. - - :param end: - A :class:`relativedelta.relativedelta` object or equivalent - representing the time and time of year that daylight savings time - ends, with the same specification method as in ``start``. One note is - that this should point to the first time in the *standard* zone, so if - a transition occurs at 2AM in the DST zone and the clocks are set back - 1 hour to 1AM, set the ``hours`` parameter to +1. - - - **Examples:** - - .. testsetup:: tzrange - - from dateutil.tz import tzrange, tzstr - - .. doctest:: tzrange - - >>> tzstr('EST5EDT') == tzrange("EST", -18000, "EDT") - True - - >>> from dateutil.relativedelta import * - >>> range1 = tzrange("EST", -18000, "EDT") - >>> range2 = tzrange("EST", -18000, "EDT", -14400, - ... relativedelta(hours=+2, month=4, day=1, - ... weekday=SU(+1)), - ... relativedelta(hours=+1, month=10, day=31, - ... weekday=SU(-1))) - >>> tzstr('EST5EDT') == range1 == range2 - True - - """ - def __init__(self, stdabbr, stdoffset=None, - dstabbr=None, dstoffset=None, - start=None, end=None): - - global relativedelta - from dateutil import relativedelta - - self._std_abbr = stdabbr - self._dst_abbr = dstabbr - - try: - stdoffset = stdoffset.total_seconds() - except (TypeError, AttributeError): - pass - - try: - dstoffset = dstoffset.total_seconds() - except (TypeError, AttributeError): - pass - - if stdoffset is not None: - self._std_offset = datetime.timedelta(seconds=stdoffset) - else: - self._std_offset = ZERO - - if dstoffset is not None: - self._dst_offset = datetime.timedelta(seconds=dstoffset) - elif dstabbr and stdoffset is not None: - self._dst_offset = self._std_offset + datetime.timedelta(hours=+1) - else: - self._dst_offset = ZERO - - if dstabbr and start is None: - self._start_delta = relativedelta.relativedelta( - hours=+2, month=4, day=1, weekday=relativedelta.SU(+1)) - else: - self._start_delta = start - - if dstabbr and end is None: - self._end_delta = relativedelta.relativedelta( - hours=+1, month=10, day=31, weekday=relativedelta.SU(-1)) - else: - self._end_delta = end - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = bool(self._start_delta) - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - if not self.hasdst: - return None - - base_year = datetime.datetime(year, 1, 1) - - start = base_year + self._start_delta - end = base_year + self._end_delta - - return (start, end) - - def __eq__(self, other): - if not isinstance(other, tzrange): - return NotImplemented - - return (self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr and - self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._start_delta == other._start_delta and - self._end_delta == other._end_delta) - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -@six.add_metaclass(_TzStrFactory) -class tzstr(tzrange): - """ - ``tzstr`` objects are time zone objects specified by a time-zone string as - it would be passed to a ``TZ`` variable on POSIX-style systems (see - the `GNU C Library: TZ Variable`_ for more details). - - There is one notable exception, which is that POSIX-style time zones use an - inverted offset format, so normally ``GMT+3`` would be parsed as an offset - 3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an - offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX - behavior, pass a ``True`` value to ``posix_offset``. - - The :class:`tzrange` object provides the same functionality, but is - specified using :class:`relativedelta.relativedelta` objects. rather than - strings. - - :param s: - A time zone string in ``TZ`` variable format. This can be a - :class:`bytes` (2.x: :class:`str`), :class:`str` (2.x: - :class:`unicode`) or a stream emitting unicode characters - (e.g. :class:`StringIO`). - - :param posix_offset: - Optional. If set to ``True``, interpret strings such as ``GMT+3`` or - ``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the - POSIX standard. - - .. caution:: - - Prior to version 2.7.0, this function also supported time zones - in the format: - - * ``EST5EDT,4,0,6,7200,10,0,26,7200,3600`` - * ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600`` - - This format is non-standard and has been deprecated; this function - will raise a :class:`DeprecatedTZFormatWarning` until - support is removed in a future version. - - .. _`GNU C Library: TZ Variable`: - https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html - """ - def __init__(self, s, posix_offset=False): - global parser - from dateutil.parser import _parser as parser - - self._s = s - - res = parser._parsetz(s) - if res is None or res.any_unused_tokens: - raise ValueError("unknown string format") - - # Here we break the compatibility with the TZ variable handling. - # GMT-3 actually *means* the timezone -3. - if res.stdabbr in ("GMT", "UTC") and not posix_offset: - res.stdoffset *= -1 - - # We must initialize it first, since _delta() needs - # _std_offset and _dst_offset set. Use False in start/end - # to avoid building it two times. - tzrange.__init__(self, res.stdabbr, res.stdoffset, - res.dstabbr, res.dstoffset, - start=False, end=False) - - if not res.dstabbr: - self._start_delta = None - self._end_delta = None - else: - self._start_delta = self._delta(res.start) - if self._start_delta: - self._end_delta = self._delta(res.end, isend=1) - - self.hasdst = bool(self._start_delta) - - def _delta(self, x, isend=0): - from dateutil import relativedelta - kwargs = {} - if x.month is not None: - kwargs["month"] = x.month - if x.weekday is not None: - kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week) - if x.week > 0: - kwargs["day"] = 1 - else: - kwargs["day"] = 31 - elif x.day: - kwargs["day"] = x.day - elif x.yday is not None: - kwargs["yearday"] = x.yday - elif x.jyday is not None: - kwargs["nlyearday"] = x.jyday - if not kwargs: - # Default is to start on first sunday of april, and end - # on last sunday of october. - if not isend: - kwargs["month"] = 4 - kwargs["day"] = 1 - kwargs["weekday"] = relativedelta.SU(+1) - else: - kwargs["month"] = 10 - kwargs["day"] = 31 - kwargs["weekday"] = relativedelta.SU(-1) - if x.time is not None: - kwargs["seconds"] = x.time - else: - # Default is 2AM. - kwargs["seconds"] = 7200 - if isend: - # Convert to standard time, to follow the documented way - # of working with the extra hour. See the documentation - # of the tzinfo class. - delta = self._dst_offset - self._std_offset - kwargs["seconds"] -= delta.seconds + delta.days * 86400 - return relativedelta.relativedelta(**kwargs) - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - - -class _tzicalvtzcomp(object): - def __init__(self, tzoffsetfrom, tzoffsetto, isdst, - tzname=None, rrule=None): - self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom) - self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto) - self.tzoffsetdiff = self.tzoffsetto - self.tzoffsetfrom - self.isdst = isdst - self.tzname = tzname - self.rrule = rrule - - -class _tzicalvtz(_tzinfo): - def __init__(self, tzid, comps=[]): - super(_tzicalvtz, self).__init__() - - self._tzid = tzid - self._comps = comps - self._cachedate = [] - self._cachecomp = [] - self._cache_lock = _thread.allocate_lock() - - def _find_comp(self, dt): - if len(self._comps) == 1: - return self._comps[0] - - dt = dt.replace(tzinfo=None) - - try: - with self._cache_lock: - return self._cachecomp[self._cachedate.index( - (dt, self._fold(dt)))] - except ValueError: - pass - - lastcompdt = None - lastcomp = None - - for comp in self._comps: - compdt = self._find_compdt(comp, dt) - - if compdt and (not lastcompdt or lastcompdt < compdt): - lastcompdt = compdt - lastcomp = comp - - if not lastcomp: - # RFC says nothing about what to do when a given - # time is before the first onset date. We'll look for the - # first standard component, or the first component, if - # none is found. - for comp in self._comps: - if not comp.isdst: - lastcomp = comp - break - else: - lastcomp = comp[0] - - with self._cache_lock: - self._cachedate.insert(0, (dt, self._fold(dt))) - self._cachecomp.insert(0, lastcomp) - - if len(self._cachedate) > 10: - self._cachedate.pop() - self._cachecomp.pop() - - return lastcomp - - def _find_compdt(self, comp, dt): - if comp.tzoffsetdiff < ZERO and self._fold(dt): - dt -= comp.tzoffsetdiff - - compdt = comp.rrule.before(dt, inc=True) - - return compdt - - def utcoffset(self, dt): - if dt is None: - return None - - return self._find_comp(dt).tzoffsetto - - def dst(self, dt): - comp = self._find_comp(dt) - if comp.isdst: - return comp.tzoffsetdiff - else: - return ZERO - - @tzname_in_python2 - def tzname(self, dt): - return self._find_comp(dt).tzname - - def __repr__(self): - return "" % repr(self._tzid) - - __reduce__ = object.__reduce__ - - -class tzical(object): - """ - This object is designed to parse an iCalendar-style ``VTIMEZONE`` structure - as set out in `RFC 5545`_ Section 4.6.5 into one or more `tzinfo` objects. - - :param `fileobj`: - A file or stream in iCalendar format, which should be UTF-8 encoded - with CRLF endings. - - .. _`RFC 5545`: https://tools.ietf.org/html/rfc5545 - """ - def __init__(self, fileobj): - global rrule - from dateutil import rrule - - if isinstance(fileobj, string_types): - self._s = fileobj - # ical should be encoded in UTF-8 with CRLF - fileobj = open(fileobj, 'r') - else: - self._s = getattr(fileobj, 'name', repr(fileobj)) - fileobj = _nullcontext(fileobj) - - self._vtz = {} - - with fileobj as fobj: - self._parse_rfc(fobj.read()) - - def keys(self): - """ - Retrieves the available time zones as a list. - """ - return list(self._vtz.keys()) - - def get(self, tzid=None): - """ - Retrieve a :py:class:`datetime.tzinfo` object by its ``tzid``. - - :param tzid: - If there is exactly one time zone available, omitting ``tzid`` - or passing :py:const:`None` value returns it. Otherwise a valid - key (which can be retrieved from :func:`keys`) is required. - - :raises ValueError: - Raised if ``tzid`` is not specified but there are either more - or fewer than 1 zone defined. - - :returns: - Returns either a :py:class:`datetime.tzinfo` object representing - the relevant time zone or :py:const:`None` if the ``tzid`` was - not found. - """ - if tzid is None: - if len(self._vtz) == 0: - raise ValueError("no timezones defined") - elif len(self._vtz) > 1: - raise ValueError("more than one timezone available") - tzid = next(iter(self._vtz)) - - return self._vtz.get(tzid) - - def _parse_offset(self, s): - s = s.strip() - if not s: - raise ValueError("empty offset") - if s[0] in ('+', '-'): - signal = (-1, +1)[s[0] == '+'] - s = s[1:] - else: - signal = +1 - if len(s) == 4: - return (int(s[:2]) * 3600 + int(s[2:]) * 60) * signal - elif len(s) == 6: - return (int(s[:2]) * 3600 + int(s[2:4]) * 60 + int(s[4:])) * signal - else: - raise ValueError("invalid offset: " + s) - - def _parse_rfc(self, s): - lines = s.splitlines() - if not lines: - raise ValueError("empty string") - - # Unfold - i = 0 - while i < len(lines): - line = lines[i].rstrip() - if not line: - del lines[i] - elif i > 0 and line[0] == " ": - lines[i-1] += line[1:] - del lines[i] - else: - i += 1 - - tzid = None - comps = [] - invtz = False - comptype = None - for line in lines: - if not line: - continue - name, value = line.split(':', 1) - parms = name.split(';') - if not parms: - raise ValueError("empty property name") - name = parms[0].upper() - parms = parms[1:] - if invtz: - if name == "BEGIN": - if value in ("STANDARD", "DAYLIGHT"): - # Process component - pass - else: - raise ValueError("unknown component: "+value) - comptype = value - founddtstart = False - tzoffsetfrom = None - tzoffsetto = None - rrulelines = [] - tzname = None - elif name == "END": - if value == "VTIMEZONE": - if comptype: - raise ValueError("component not closed: "+comptype) - if not tzid: - raise ValueError("mandatory TZID not found") - if not comps: - raise ValueError( - "at least one component is needed") - # Process vtimezone - self._vtz[tzid] = _tzicalvtz(tzid, comps) - invtz = False - elif value == comptype: - if not founddtstart: - raise ValueError("mandatory DTSTART not found") - if tzoffsetfrom is None: - raise ValueError( - "mandatory TZOFFSETFROM not found") - if tzoffsetto is None: - raise ValueError( - "mandatory TZOFFSETFROM not found") - # Process component - rr = None - if rrulelines: - rr = rrule.rrulestr("\n".join(rrulelines), - compatible=True, - ignoretz=True, - cache=True) - comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto, - (comptype == "DAYLIGHT"), - tzname, rr) - comps.append(comp) - comptype = None - else: - raise ValueError("invalid component end: "+value) - elif comptype: - if name == "DTSTART": - # DTSTART in VTIMEZONE takes a subset of valid RRULE - # values under RFC 5545. - for parm in parms: - if parm != 'VALUE=DATE-TIME': - msg = ('Unsupported DTSTART param in ' + - 'VTIMEZONE: ' + parm) - raise ValueError(msg) - rrulelines.append(line) - founddtstart = True - elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"): - rrulelines.append(line) - elif name == "TZOFFSETFROM": - if parms: - raise ValueError( - "unsupported %s parm: %s " % (name, parms[0])) - tzoffsetfrom = self._parse_offset(value) - elif name == "TZOFFSETTO": - if parms: - raise ValueError( - "unsupported TZOFFSETTO parm: "+parms[0]) - tzoffsetto = self._parse_offset(value) - elif name == "TZNAME": - if parms: - raise ValueError( - "unsupported TZNAME parm: "+parms[0]) - tzname = value - elif name == "COMMENT": - pass - else: - raise ValueError("unsupported property: "+name) - else: - if name == "TZID": - if parms: - raise ValueError( - "unsupported TZID parm: "+parms[0]) - tzid = value - elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"): - pass - else: - raise ValueError("unsupported property: "+name) - elif name == "BEGIN" and value == "VTIMEZONE": - tzid = None - comps = [] - invtz = True - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, repr(self._s)) - - -if sys.platform != "win32": - TZFILES = ["/etc/localtime", "localtime"] - TZPATHS = ["/usr/share/zoneinfo", - "/usr/lib/zoneinfo", - "/usr/share/lib/zoneinfo", - "/etc/zoneinfo"] -else: - TZFILES = [] - TZPATHS = [] - - -def __get_gettz(): - tzlocal_classes = (tzlocal,) - if tzwinlocal is not None: - tzlocal_classes += (tzwinlocal,) - - class GettzFunc(object): - """ - Retrieve a time zone object from a string representation - - This function is intended to retrieve the :py:class:`tzinfo` subclass - that best represents the time zone that would be used if a POSIX - `TZ variable`_ were set to the same value. - - If no argument or an empty string is passed to ``gettz``, local time - is returned: - - .. code-block:: python3 - - >>> gettz() - tzfile('/etc/localtime') - - This function is also the preferred way to map IANA tz database keys - to :class:`tzfile` objects: - - .. code-block:: python3 - - >>> gettz('Pacific/Kiritimati') - tzfile('/usr/share/zoneinfo/Pacific/Kiritimati') - - On Windows, the standard is extended to include the Windows-specific - zone names provided by the operating system: - - .. code-block:: python3 - - >>> gettz('Egypt Standard Time') - tzwin('Egypt Standard Time') - - Passing a GNU ``TZ`` style string time zone specification returns a - :class:`tzstr` object: - - .. code-block:: python3 - - >>> gettz('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') - tzstr('AEST-10AEDT-11,M10.1.0/2,M4.1.0/3') - - :param name: - A time zone name (IANA, or, on Windows, Windows keys), location of - a ``tzfile(5)`` zoneinfo file or ``TZ`` variable style time zone - specifier. An empty string, no argument or ``None`` is interpreted - as local time. - - :return: - Returns an instance of one of ``dateutil``'s :py:class:`tzinfo` - subclasses. - - .. versionchanged:: 2.7.0 - - After version 2.7.0, any two calls to ``gettz`` using the same - input strings will return the same object: - - .. code-block:: python3 - - >>> tz.gettz('America/Chicago') is tz.gettz('America/Chicago') - True - - In addition to improving performance, this ensures that - `"same zone" semantics`_ are used for datetimes in the same zone. - - - .. _`TZ variable`: - https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html - - .. _`"same zone" semantics`: - https://blog.ganssle.io/articles/2018/02/aware-datetime-arithmetic.html - """ - def __init__(self): - - self.__instances = weakref.WeakValueDictionary() - self.__strong_cache_size = 8 - self.__strong_cache = OrderedDict() - self._cache_lock = _thread.allocate_lock() - - def __call__(self, name=None): - with self._cache_lock: - rv = self.__instances.get(name, None) - - if rv is None: - rv = self.nocache(name=name) - if not (name is None - or isinstance(rv, tzlocal_classes) - or rv is None): - # tzlocal is slightly more complicated than the other - # time zone providers because it depends on environment - # at construction time, so don't cache that. - # - # We also cannot store weak references to None, so we - # will also not store that. - self.__instances[name] = rv - else: - # No need for strong caching, return immediately - return rv - - self.__strong_cache[name] = self.__strong_cache.pop(name, rv) - - if len(self.__strong_cache) > self.__strong_cache_size: - self.__strong_cache.popitem(last=False) - - return rv - - def set_cache_size(self, size): - with self._cache_lock: - self.__strong_cache_size = size - while len(self.__strong_cache) > size: - self.__strong_cache.popitem(last=False) - - def cache_clear(self): - with self._cache_lock: - self.__instances = weakref.WeakValueDictionary() - self.__strong_cache.clear() - - @staticmethod - def nocache(name=None): - """A non-cached version of gettz""" - tz = None - if not name: - try: - name = os.environ["TZ"] - except KeyError: - pass - if name is None or name == ":": - for filepath in TZFILES: - if not os.path.isabs(filepath): - filename = filepath - for path in TZPATHS: - filepath = os.path.join(path, filename) - if os.path.isfile(filepath): - break - else: - continue - if os.path.isfile(filepath): - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = tzlocal() - else: - try: - if name.startswith(":"): - name = name[1:] - except TypeError as e: - if isinstance(name, bytes): - new_msg = "gettz argument should be str, not bytes" - six.raise_from(TypeError(new_msg), e) - else: - raise - if os.path.isabs(name): - if os.path.isfile(name): - tz = tzfile(name) - else: - tz = None - else: - for path in TZPATHS: - filepath = os.path.join(path, name) - if not os.path.isfile(filepath): - filepath = filepath.replace(' ', '_') - if not os.path.isfile(filepath): - continue - try: - tz = tzfile(filepath) - break - except (IOError, OSError, ValueError): - pass - else: - tz = None - if tzwin is not None: - try: - tz = tzwin(name) - except (WindowsError, UnicodeEncodeError): - # UnicodeEncodeError is for Python 2.7 compat - tz = None - - if not tz: - from dateutil.zoneinfo import get_zonefile_instance - tz = get_zonefile_instance().get(name) - - if not tz: - for c in name: - # name is not a tzstr unless it has at least - # one offset. For short values of "name", an - # explicit for loop seems to be the fastest way - # To determine if a string contains a digit - if c in "0123456789": - try: - tz = tzstr(name) - except ValueError: - pass - break - else: - if name in ("GMT", "UTC"): - tz = UTC - elif name in time.tzname: - tz = tzlocal() - return tz - - return GettzFunc() - - -gettz = __get_gettz() -del __get_gettz - - -def datetime_exists(dt, tz=None): - """ - Given a datetime and a time zone, determine whether or not a given datetime - would fall in a gap. - - :param dt: - A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` - is provided.) - - :param tz: - A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If - ``None`` or not provided, the datetime's own time zone will be used. - - :return: - Returns a boolean value whether or not the "wall time" exists in - ``tz``. - - .. versionadded:: 2.7.0 - """ - if tz is None: - if dt.tzinfo is None: - raise ValueError('Datetime is naive and no time zone provided.') - tz = dt.tzinfo - - dt = dt.replace(tzinfo=None) - - # This is essentially a test of whether or not the datetime can survive - # a round trip to UTC. - dt_rt = dt.replace(tzinfo=tz).astimezone(UTC).astimezone(tz) - dt_rt = dt_rt.replace(tzinfo=None) - - return dt == dt_rt - - -def datetime_ambiguous(dt, tz=None): - """ - Given a datetime and a time zone, determine whether or not a given datetime - is ambiguous (i.e if there are two times differentiated only by their DST - status). - - :param dt: - A :class:`datetime.datetime` (whose time zone will be ignored if ``tz`` - is provided.) - - :param tz: - A :class:`datetime.tzinfo` with support for the ``fold`` attribute. If - ``None`` or not provided, the datetime's own time zone will be used. - - :return: - Returns a boolean value whether or not the "wall time" is ambiguous in - ``tz``. - - .. versionadded:: 2.6.0 - """ - if tz is None: - if dt.tzinfo is None: - raise ValueError('Datetime is naive and no time zone provided.') - - tz = dt.tzinfo - - # If a time zone defines its own "is_ambiguous" function, we'll use that. - is_ambiguous_fn = getattr(tz, 'is_ambiguous', None) - if is_ambiguous_fn is not None: - try: - return tz.is_ambiguous(dt) - except Exception: - pass - - # If it doesn't come out and tell us it's ambiguous, we'll just check if - # the fold attribute has any effect on this particular date and time. - dt = dt.replace(tzinfo=tz) - wall_0 = enfold(dt, fold=0) - wall_1 = enfold(dt, fold=1) - - same_offset = wall_0.utcoffset() == wall_1.utcoffset() - same_dst = wall_0.dst() == wall_1.dst() - - return not (same_offset and same_dst) - - -def resolve_imaginary(dt): - """ - Given a datetime that may be imaginary, return an existing datetime. - - This function assumes that an imaginary datetime represents what the - wall time would be in a zone had the offset transition not occurred, so - it will always fall forward by the transition's change in offset. - - .. doctest:: - - >>> from dateutil import tz - >>> from datetime import datetime - >>> NYC = tz.gettz('America/New_York') - >>> print(tz.resolve_imaginary(datetime(2017, 3, 12, 2, 30, tzinfo=NYC))) - 2017-03-12 03:30:00-04:00 - - >>> KIR = tz.gettz('Pacific/Kiritimati') - >>> print(tz.resolve_imaginary(datetime(1995, 1, 1, 12, 30, tzinfo=KIR))) - 1995-01-02 12:30:00+14:00 - - As a note, :func:`datetime.astimezone` is guaranteed to produce a valid, - existing datetime, so a round-trip to and from UTC is sufficient to get - an extant datetime, however, this generally "falls back" to an earlier time - rather than falling forward to the STD side (though no guarantees are made - about this behavior). - - :param dt: - A :class:`datetime.datetime` which may or may not exist. - - :return: - Returns an existing :class:`datetime.datetime`. If ``dt`` was not - imaginary, the datetime returned is guaranteed to be the same object - passed to the function. - - .. versionadded:: 2.7.0 - """ - if dt.tzinfo is not None and not datetime_exists(dt): - - curr_offset = (dt + datetime.timedelta(hours=24)).utcoffset() - old_offset = (dt - datetime.timedelta(hours=24)).utcoffset() - - dt += curr_offset - old_offset - - return dt - - -def _datetime_to_timestamp(dt): - """ - Convert a :class:`datetime.datetime` object to an epoch timestamp in - seconds since January 1, 1970, ignoring the time zone. - """ - return (dt.replace(tzinfo=None) - EPOCH).total_seconds() - - -if sys.version_info >= (3, 6): - def _get_supported_offset(second_offset): - return second_offset -else: - def _get_supported_offset(second_offset): - # For python pre-3.6, round to full-minutes if that's not the case. - # Python's datetime doesn't accept sub-minute timezones. Check - # http://python.org/sf/1447945 or https://bugs.python.org/issue5288 - # for some information. - old_offset = second_offset - calculated_offset = 60 * ((second_offset + 30) // 60) - return calculated_offset - - -try: - # Python 3.7 feature - from contextlib import nullcontext as _nullcontext -except ImportError: - class _nullcontext(object): - """ - Class for wrapping contexts so that they are passed through in a - with statement. - """ - def __init__(self, context): - self.context = context - - def __enter__(self): - return self.context - - def __exit__(*args, **kwargs): - pass - -# vim:ts=4:sw=4:et diff --git a/venv/lib/python3.7/site-packages/dateutil/tz/win.py b/venv/lib/python3.7/site-packages/dateutil/tz/win.py deleted file mode 100644 index cde07ba..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/tz/win.py +++ /dev/null @@ -1,370 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module provides an interface to the native time zone data on Windows, -including :py:class:`datetime.tzinfo` implementations. - -Attempting to import this module on a non-Windows platform will raise an -:py:obj:`ImportError`. -""" -# This code was originally contributed by Jeffrey Harris. -import datetime -import struct - -from six.moves import winreg -from six import text_type - -try: - import ctypes - from ctypes import wintypes -except ValueError: - # ValueError is raised on non-Windows systems for some horrible reason. - raise ImportError("Running tzwin on non-Windows system") - -from ._common import tzrangebase - -__all__ = ["tzwin", "tzwinlocal", "tzres"] - -ONEWEEK = datetime.timedelta(7) - -TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones" -TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones" -TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation" - - -def _settzkeyname(): - handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) - try: - winreg.OpenKey(handle, TZKEYNAMENT).Close() - TZKEYNAME = TZKEYNAMENT - except WindowsError: - TZKEYNAME = TZKEYNAME9X - handle.Close() - return TZKEYNAME - - -TZKEYNAME = _settzkeyname() - - -class tzres(object): - """ - Class for accessing ``tzres.dll``, which contains timezone name related - resources. - - .. versionadded:: 2.5.0 - """ - p_wchar = ctypes.POINTER(wintypes.WCHAR) # Pointer to a wide char - - def __init__(self, tzres_loc='tzres.dll'): - # Load the user32 DLL so we can load strings from tzres - user32 = ctypes.WinDLL('user32') - - # Specify the LoadStringW function - user32.LoadStringW.argtypes = (wintypes.HINSTANCE, - wintypes.UINT, - wintypes.LPWSTR, - ctypes.c_int) - - self.LoadStringW = user32.LoadStringW - self._tzres = ctypes.WinDLL(tzres_loc) - self.tzres_loc = tzres_loc - - def load_name(self, offset): - """ - Load a timezone name from a DLL offset (integer). - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.load_name(112)) - 'Eastern Standard Time' - - :param offset: - A positive integer value referring to a string from the tzres dll. - - .. note:: - - Offsets found in the registry are generally of the form - ``@tzres.dll,-114``. The offset in this case is 114, not -114. - - """ - resource = self.p_wchar() - lpBuffer = ctypes.cast(ctypes.byref(resource), wintypes.LPWSTR) - nchar = self.LoadStringW(self._tzres._handle, offset, lpBuffer, 0) - return resource[:nchar] - - def name_from_string(self, tzname_str): - """ - Parse strings as returned from the Windows registry into the time zone - name as defined in the registry. - - >>> from dateutil.tzwin import tzres - >>> tzr = tzres() - >>> print(tzr.name_from_string('@tzres.dll,-251')) - 'Dateline Daylight Time' - >>> print(tzr.name_from_string('Eastern Standard Time')) - 'Eastern Standard Time' - - :param tzname_str: - A timezone name string as returned from a Windows registry key. - - :return: - Returns the localized timezone string from tzres.dll if the string - is of the form `@tzres.dll,-offset`, else returns the input string. - """ - if not tzname_str.startswith('@'): - return tzname_str - - name_splt = tzname_str.split(',-') - try: - offset = int(name_splt[1]) - except: - raise ValueError("Malformed timezone string.") - - return self.load_name(offset) - - -class tzwinbase(tzrangebase): - """tzinfo class based on win32's timezones available in the registry.""" - def __init__(self): - raise NotImplementedError('tzwinbase is an abstract base class') - - def __eq__(self, other): - # Compare on all relevant dimensions, including name. - if not isinstance(other, tzwinbase): - return NotImplemented - - return (self._std_offset == other._std_offset and - self._dst_offset == other._dst_offset and - self._stddayofweek == other._stddayofweek and - self._dstdayofweek == other._dstdayofweek and - self._stdweeknumber == other._stdweeknumber and - self._dstweeknumber == other._dstweeknumber and - self._stdhour == other._stdhour and - self._dsthour == other._dsthour and - self._stdminute == other._stdminute and - self._dstminute == other._dstminute and - self._std_abbr == other._std_abbr and - self._dst_abbr == other._dst_abbr) - - @staticmethod - def list(): - """Return a list of all time zones known to the system.""" - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZKEYNAME) as tzkey: - result = [winreg.EnumKey(tzkey, i) - for i in range(winreg.QueryInfoKey(tzkey)[0])] - return result - - def display(self): - """ - Return the display name of the time zone. - """ - return self._display - - def transitions(self, year): - """ - For a given year, get the DST on and off transition times, expressed - always on the standard time side. For zones with no transitions, this - function returns ``None``. - - :param year: - The year whose transitions you would like to query. - - :return: - Returns a :class:`tuple` of :class:`datetime.datetime` objects, - ``(dston, dstoff)`` for zones with an annual DST transition, or - ``None`` for fixed offset zones. - """ - - if not self.hasdst: - return None - - dston = picknthweekday(year, self._dstmonth, self._dstdayofweek, - self._dsthour, self._dstminute, - self._dstweeknumber) - - dstoff = picknthweekday(year, self._stdmonth, self._stddayofweek, - self._stdhour, self._stdminute, - self._stdweeknumber) - - # Ambiguous dates default to the STD side - dstoff -= self._dst_base_offset - - return dston, dstoff - - def _get_hasdst(self): - return self._dstmonth != 0 - - @property - def _dst_base_offset(self): - return self._dst_base_offset_ - - -class tzwin(tzwinbase): - """ - Time zone object created from the zone info in the Windows registry - - These are similar to :py:class:`dateutil.tz.tzrange` objects in that - the time zone data is provided in the format of a single offset rule - for either 0 or 2 time zone transitions per year. - - :param: name - The name of a Windows time zone key, e.g. "Eastern Standard Time". - The full list of keys can be retrieved with :func:`tzwin.list`. - """ - - def __init__(self, name): - self._name = name - - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - tzkeyname = text_type("{kn}\\{name}").format(kn=TZKEYNAME, name=name) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - keydict = valuestodict(tzkey) - - self._std_abbr = keydict["Std"] - self._dst_abbr = keydict["Dlt"] - - self._display = keydict["Display"] - - # See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm - tup = struct.unpack("=3l16h", keydict["TZI"]) - stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1 - dstoffset = stdoffset-tup[2] # + DaylightBias * -1 - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # for the meaning see the win32 TIME_ZONE_INFORMATION structure docs - # http://msdn.microsoft.com/en-us/library/windows/desktop/ms725481(v=vs.85).aspx - (self._stdmonth, - self._stddayofweek, # Sunday = 0 - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[4:9] - - (self._dstmonth, - self._dstdayofweek, # Sunday = 0 - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[12:17] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwin(%s)" % repr(self._name) - - def __reduce__(self): - return (self.__class__, (self._name,)) - - -class tzwinlocal(tzwinbase): - """ - Class representing the local time zone information in the Windows registry - - While :class:`dateutil.tz.tzlocal` makes system calls (via the :mod:`time` - module) to retrieve time zone information, ``tzwinlocal`` retrieves the - rules directly from the Windows registry and creates an object like - :class:`dateutil.tz.tzwin`. - - Because Windows does not have an equivalent of :func:`time.tzset`, on - Windows, :class:`dateutil.tz.tzlocal` instances will always reflect the - time zone settings *at the time that the process was started*, meaning - changes to the machine's time zone settings during the run of a program - on Windows will **not** be reflected by :class:`dateutil.tz.tzlocal`. - Because ``tzwinlocal`` reads the registry directly, it is unaffected by - this issue. - """ - def __init__(self): - with winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE) as handle: - with winreg.OpenKey(handle, TZLOCALKEYNAME) as tzlocalkey: - keydict = valuestodict(tzlocalkey) - - self._std_abbr = keydict["StandardName"] - self._dst_abbr = keydict["DaylightName"] - - try: - tzkeyname = text_type('{kn}\\{sn}').format(kn=TZKEYNAME, - sn=self._std_abbr) - with winreg.OpenKey(handle, tzkeyname) as tzkey: - _keydict = valuestodict(tzkey) - self._display = _keydict["Display"] - except OSError: - self._display = None - - stdoffset = -keydict["Bias"]-keydict["StandardBias"] - dstoffset = stdoffset-keydict["DaylightBias"] - - self._std_offset = datetime.timedelta(minutes=stdoffset) - self._dst_offset = datetime.timedelta(minutes=dstoffset) - - # For reasons unclear, in this particular key, the day of week has been - # moved to the END of the SYSTEMTIME structure. - tup = struct.unpack("=8h", keydict["StandardStart"]) - - (self._stdmonth, - self._stdweeknumber, # Last = 5 - self._stdhour, - self._stdminute) = tup[1:5] - - self._stddayofweek = tup[7] - - tup = struct.unpack("=8h", keydict["DaylightStart"]) - - (self._dstmonth, - self._dstweeknumber, # Last = 5 - self._dsthour, - self._dstminute) = tup[1:5] - - self._dstdayofweek = tup[7] - - self._dst_base_offset_ = self._dst_offset - self._std_offset - self.hasdst = self._get_hasdst() - - def __repr__(self): - return "tzwinlocal()" - - def __str__(self): - # str will return the standard name, not the daylight name. - return "tzwinlocal(%s)" % repr(self._std_abbr) - - def __reduce__(self): - return (self.__class__, ()) - - -def picknthweekday(year, month, dayofweek, hour, minute, whichweek): - """ dayofweek == 0 means Sunday, whichweek 5 means last instance """ - first = datetime.datetime(year, month, 1, hour, minute) - - # This will work if dayofweek is ISO weekday (1-7) or Microsoft-style (0-6), - # Because 7 % 7 = 0 - weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7) + 1) - wd = weekdayone + ((whichweek - 1) * ONEWEEK) - if (wd.month != month): - wd -= ONEWEEK - - return wd - - -def valuestodict(key): - """Convert a registry key's values to a dictionary.""" - dout = {} - size = winreg.QueryInfoKey(key)[1] - tz_res = None - - for i in range(size): - key_name, value, dtype = winreg.EnumValue(key, i) - if dtype == winreg.REG_DWORD or dtype == winreg.REG_DWORD_LITTLE_ENDIAN: - # If it's a DWORD (32-bit integer), it's stored as unsigned - convert - # that to a proper signed integer - if value & (1 << 31): - value = value - (1 << 32) - elif dtype == winreg.REG_SZ: - # If it's a reference to the tzres DLL, load the actual string - if value.startswith('@tzres'): - tz_res = tz_res or tzres() - value = tz_res.name_from_string(value) - - value = value.rstrip('\x00') # Remove trailing nulls - - dout[key_name] = value - - return dout diff --git a/venv/lib/python3.7/site-packages/dateutil/tzwin.py b/venv/lib/python3.7/site-packages/dateutil/tzwin.py deleted file mode 100644 index cebc673..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/tzwin.py +++ /dev/null @@ -1,2 +0,0 @@ -# tzwin has moved to dateutil.tz.win -from .tz.win import * diff --git a/venv/lib/python3.7/site-packages/dateutil/utils.py b/venv/lib/python3.7/site-packages/dateutil/utils.py deleted file mode 100644 index 44d9c99..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/utils.py +++ /dev/null @@ -1,71 +0,0 @@ -# -*- coding: utf-8 -*- -""" -This module offers general convenience and utility functions for dealing with -datetimes. - -.. versionadded:: 2.7.0 -""" -from __future__ import unicode_literals - -from datetime import datetime, time - - -def today(tzinfo=None): - """ - Returns a :py:class:`datetime` representing the current day at midnight - - :param tzinfo: - The time zone to attach (also used to determine the current day). - - :return: - A :py:class:`datetime.datetime` object representing the current day - at midnight. - """ - - dt = datetime.now(tzinfo) - return datetime.combine(dt.date(), time(0, tzinfo=tzinfo)) - - -def default_tzinfo(dt, tzinfo): - """ - Sets the ``tzinfo`` parameter on naive datetimes only - - This is useful for example when you are provided a datetime that may have - either an implicit or explicit time zone, such as when parsing a time zone - string. - - .. doctest:: - - >>> from dateutil.tz import tzoffset - >>> from dateutil.parser import parse - >>> from dateutil.utils import default_tzinfo - >>> dflt_tz = tzoffset("EST", -18000) - >>> print(default_tzinfo(parse('2014-01-01 12:30 UTC'), dflt_tz)) - 2014-01-01 12:30:00+00:00 - >>> print(default_tzinfo(parse('2014-01-01 12:30'), dflt_tz)) - 2014-01-01 12:30:00-05:00 - - :param dt: - The datetime on which to replace the time zone - - :param tzinfo: - The :py:class:`datetime.tzinfo` subclass instance to assign to - ``dt`` if (and only if) it is naive. - - :return: - Returns an aware :py:class:`datetime.datetime`. - """ - if dt.tzinfo is not None: - return dt - else: - return dt.replace(tzinfo=tzinfo) - - -def within_delta(dt1, dt2, delta): - """ - Useful for comparing two datetimes that may a negilible difference - to be considered equal. - """ - delta = abs(delta) - difference = dt1 - dt2 - return -delta <= difference <= delta diff --git a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__init__.py b/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__init__.py deleted file mode 100644 index 34f11ad..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__init__.py +++ /dev/null @@ -1,167 +0,0 @@ -# -*- coding: utf-8 -*- -import warnings -import json - -from tarfile import TarFile -from pkgutil import get_data -from io import BytesIO - -from dateutil.tz import tzfile as _tzfile - -__all__ = ["get_zonefile_instance", "gettz", "gettz_db_metadata"] - -ZONEFILENAME = "dateutil-zoneinfo.tar.gz" -METADATA_FN = 'METADATA' - - -class tzfile(_tzfile): - def __reduce__(self): - return (gettz, (self._filename,)) - - -def getzoneinfofile_stream(): - try: - return BytesIO(get_data(__name__, ZONEFILENAME)) - except IOError as e: # TODO switch to FileNotFoundError? - warnings.warn("I/O error({0}): {1}".format(e.errno, e.strerror)) - return None - - -class ZoneInfoFile(object): - def __init__(self, zonefile_stream=None): - if zonefile_stream is not None: - with TarFile.open(fileobj=zonefile_stream) as tf: - self.zones = {zf.name: tzfile(tf.extractfile(zf), filename=zf.name) - for zf in tf.getmembers() - if zf.isfile() and zf.name != METADATA_FN} - # deal with links: They'll point to their parent object. Less - # waste of memory - links = {zl.name: self.zones[zl.linkname] - for zl in tf.getmembers() if - zl.islnk() or zl.issym()} - self.zones.update(links) - try: - metadata_json = tf.extractfile(tf.getmember(METADATA_FN)) - metadata_str = metadata_json.read().decode('UTF-8') - self.metadata = json.loads(metadata_str) - except KeyError: - # no metadata in tar file - self.metadata = None - else: - self.zones = {} - self.metadata = None - - def get(self, name, default=None): - """ - Wrapper for :func:`ZoneInfoFile.zones.get`. This is a convenience method - for retrieving zones from the zone dictionary. - - :param name: - The name of the zone to retrieve. (Generally IANA zone names) - - :param default: - The value to return in the event of a missing key. - - .. versionadded:: 2.6.0 - - """ - return self.zones.get(name, default) - - -# The current API has gettz as a module function, although in fact it taps into -# a stateful class. So as a workaround for now, without changing the API, we -# will create a new "global" class instance the first time a user requests a -# timezone. Ugly, but adheres to the api. -# -# TODO: Remove after deprecation period. -_CLASS_ZONE_INSTANCE = [] - - -def get_zonefile_instance(new_instance=False): - """ - This is a convenience function which provides a :class:`ZoneInfoFile` - instance using the data provided by the ``dateutil`` package. By default, it - caches a single instance of the ZoneInfoFile object and returns that. - - :param new_instance: - If ``True``, a new instance of :class:`ZoneInfoFile` is instantiated and - used as the cached instance for the next call. Otherwise, new instances - are created only as necessary. - - :return: - Returns a :class:`ZoneInfoFile` object. - - .. versionadded:: 2.6 - """ - if new_instance: - zif = None - else: - zif = getattr(get_zonefile_instance, '_cached_instance', None) - - if zif is None: - zif = ZoneInfoFile(getzoneinfofile_stream()) - - get_zonefile_instance._cached_instance = zif - - return zif - - -def gettz(name): - """ - This retrieves a time zone from the local zoneinfo tarball that is packaged - with dateutil. - - :param name: - An IANA-style time zone name, as found in the zoneinfo file. - - :return: - Returns a :class:`dateutil.tz.tzfile` time zone object. - - .. warning:: - It is generally inadvisable to use this function, and it is only - provided for API compatibility with earlier versions. This is *not* - equivalent to ``dateutil.tz.gettz()``, which selects an appropriate - time zone based on the inputs, favoring system zoneinfo. This is ONLY - for accessing the dateutil-specific zoneinfo (which may be out of - date compared to the system zoneinfo). - - .. deprecated:: 2.6 - If you need to use a specific zoneinfofile over the system zoneinfo, - instantiate a :class:`dateutil.zoneinfo.ZoneInfoFile` object and call - :func:`dateutil.zoneinfo.ZoneInfoFile.get(name)` instead. - - Use :func:`get_zonefile_instance` to retrieve an instance of the - dateutil-provided zoneinfo. - """ - warnings.warn("zoneinfo.gettz() will be removed in future versions, " - "to use the dateutil-provided zoneinfo files, instantiate a " - "ZoneInfoFile object and use ZoneInfoFile.zones.get() " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].zones.get(name) - - -def gettz_db_metadata(): - """ Get the zonefile metadata - - See `zonefile_metadata`_ - - :returns: - A dictionary with the database metadata - - .. deprecated:: 2.6 - See deprecation warning in :func:`zoneinfo.gettz`. To get metadata, - query the attribute ``zoneinfo.ZoneInfoFile.metadata``. - """ - warnings.warn("zoneinfo.gettz_db_metadata() will be removed in future " - "versions, to use the dateutil-provided zoneinfo files, " - "ZoneInfoFile object and query the 'metadata' attribute " - "instead. See the documentation for details.", - DeprecationWarning) - - if len(_CLASS_ZONE_INSTANCE) == 0: - _CLASS_ZONE_INSTANCE.append(ZoneInfoFile(getzoneinfofile_stream())) - return _CLASS_ZONE_INSTANCE[0].metadata diff --git a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index 55b77a7..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-37.pyc b/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-37.pyc deleted file mode 100644 index 8f62cdc..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/__pycache__/rebuild.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz b/venv/lib/python3.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz deleted file mode 100644 index 89e8351..0000000 Binary files a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/dateutil-zoneinfo.tar.gz and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/rebuild.py b/venv/lib/python3.7/site-packages/dateutil/zoneinfo/rebuild.py deleted file mode 100644 index 78f0d1a..0000000 --- a/venv/lib/python3.7/site-packages/dateutil/zoneinfo/rebuild.py +++ /dev/null @@ -1,53 +0,0 @@ -import logging -import os -import tempfile -import shutil -import json -from subprocess import check_call -from tarfile import TarFile - -from dateutil.zoneinfo import METADATA_FN, ZONEFILENAME - - -def rebuild(filename, tag=None, format="gz", zonegroups=[], metadata=None): - """Rebuild the internal timezone info in dateutil/zoneinfo/zoneinfo*tar* - - filename is the timezone tarball from ``ftp.iana.org/tz``. - - """ - tmpdir = tempfile.mkdtemp() - zonedir = os.path.join(tmpdir, "zoneinfo") - moduledir = os.path.dirname(__file__) - try: - with TarFile.open(filename) as tf: - for name in zonegroups: - tf.extract(name, tmpdir) - filepaths = [os.path.join(tmpdir, n) for n in zonegroups] - try: - check_call(["zic", "-d", zonedir] + filepaths) - except OSError as e: - _print_on_nosuchfile(e) - raise - # write metadata file - with open(os.path.join(zonedir, METADATA_FN), 'w') as f: - json.dump(metadata, f, indent=4, sort_keys=True) - target = os.path.join(moduledir, ZONEFILENAME) - with TarFile.open(target, "w:%s" % format) as tf: - for entry in os.listdir(zonedir): - entrypath = os.path.join(zonedir, entry) - tf.add(entrypath, entry) - finally: - shutil.rmtree(tmpdir) - - -def _print_on_nosuchfile(e): - """Print helpful troubleshooting message - - e is an exception raised by subprocess.check_call() - - """ - if e.errno == 2: - logging.error( - "Could not find zic. Perhaps you need to install " - "libc-bin or some other package that provides it, " - "or it's not in your PATH?") diff --git a/venv/lib/python3.7/site-packages/easy_install.py b/venv/lib/python3.7/site-packages/easy_install.py deleted file mode 100644 index d87e984..0000000 --- a/venv/lib/python3.7/site-packages/easy_install.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Run the EasyInstall command""" - -if __name__ == '__main__': - from setuptools.command.easy_install import main - main() diff --git a/venv/lib/python3.7/site-packages/editor.py b/venv/lib/python3.7/site-packages/editor.py deleted file mode 100755 index 6fc73f1..0000000 --- a/venv/lib/python3.7/site-packages/editor.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python -"""Tools for invoking editors programmatically.""" - -from __future__ import print_function - -import sys -import locale -import os.path -import subprocess -import tempfile -from distutils.spawn import find_executable - - -__all__ = [ - 'edit', - 'get_editor', - 'EditorError', -] - -__version__ = '1.0.4' - - -class EditorError(RuntimeError): - pass - - -def get_default_editors(): - # TODO: Make platform-specific - return [ - 'editor', - 'vim', - 'emacs', - 'nano', - ] - - -def get_editor_args(editor): - if editor in ['vim', 'gvim', 'vim.basic', 'vim.tiny']: - return ['-f', '-o'] - - elif editor == 'emacs': - return ['-nw'] - - elif editor == 'gedit': - return ['-w', '--new-window'] - - elif editor == 'nano': - return ['-R'] - - else: - return [] - - -def get_editor(): - # Get the editor from the environment. Prefer VISUAL to EDITOR - editor = os.environ.get('VISUAL') or os.environ.get('EDITOR') - if editor: - return editor - - # None found in the environment. Fallback to platform-specific defaults. - for ed in get_default_editors(): - path = find_executable(ed) - if path is not None: - return path - - raise EditorError("Unable to find a viable editor on this system." - "Please consider setting your $EDITOR variable") - - -def get_tty_filename(): - if sys.platform == 'win32': - return 'CON:' - return '/dev/tty' - - -def edit(filename=None, contents=None, use_tty=None, suffix=''): - editor = get_editor() - args = [editor] + get_editor_args(os.path.basename(os.path.realpath(editor))) - - if use_tty is None: - use_tty = sys.stdin.isatty() and not sys.stdout.isatty() - - if filename is None: - tmp = tempfile.NamedTemporaryFile(suffix=suffix) - filename = tmp.name - - if contents is not None: - # For python3 only. If str is passed instead of bytes, encode default - if hasattr(contents, 'encode'): - contents = contents.encode() - - with open(filename, mode='wb') as f: - f.write(contents) - - args += [filename] - - stdout = None - if use_tty: - stdout = open(get_tty_filename(), 'wb') - - proc = subprocess.Popen(args, close_fds=True, stdout=stdout) - proc.communicate() - - with open(filename, mode='rb') as f: - return f.read() - - -def _get_editor(ns): - print(get_editor()) - - -def _edit(ns): - contents = ns.contents - if contents is not None: - contents = contents.encode(locale.getpreferredencoding()) - print(edit(filename=ns.path, contents=contents)) - - -if __name__ == '__main__': - import argparse - ap = argparse.ArgumentParser() - sp = ap.add_subparsers() - - cmd = sp.add_parser('get-editor') - cmd.set_defaults(cmd=_get_editor) - - cmd = sp.add_parser('edit') - cmd.set_defaults(cmd=_edit) - cmd.add_argument('path', type=str, nargs='?') - cmd.add_argument('--contents', type=str) - - ns = ap.parse_args() - ns.cmd(ns) diff --git a/venv/lib/python3.7/site-packages/flask/__init__.py b/venv/lib/python3.7/site-packages/flask/__init__.py deleted file mode 100644 index 687475b..0000000 --- a/venv/lib/python3.7/site-packages/flask/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask - ~~~~~ - - A microframework based on Werkzeug. It's extensively documented - and follows best practice patterns. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -# utilities we import from Werkzeug and Jinja2 that are unused -# in the module but are exported as public interface. -from jinja2 import escape -from jinja2 import Markup -from werkzeug.exceptions import abort -from werkzeug.utils import redirect - -from . import json -from ._compat import json_available -from .app import Flask -from .app import Request -from .app import Response -from .blueprints import Blueprint -from .config import Config -from .ctx import after_this_request -from .ctx import copy_current_request_context -from .ctx import has_app_context -from .ctx import has_request_context -from .globals import _app_ctx_stack -from .globals import _request_ctx_stack -from .globals import current_app -from .globals import g -from .globals import request -from .globals import session -from .helpers import flash -from .helpers import get_flashed_messages -from .helpers import get_template_attribute -from .helpers import make_response -from .helpers import safe_join -from .helpers import send_file -from .helpers import send_from_directory -from .helpers import stream_with_context -from .helpers import url_for -from .json import jsonify -from .signals import appcontext_popped -from .signals import appcontext_pushed -from .signals import appcontext_tearing_down -from .signals import before_render_template -from .signals import got_request_exception -from .signals import message_flashed -from .signals import request_finished -from .signals import request_started -from .signals import request_tearing_down -from .signals import signals_available -from .signals import template_rendered -from .templating import render_template -from .templating import render_template_string - -__version__ = "1.1.1" diff --git a/venv/lib/python3.7/site-packages/flask/__main__.py b/venv/lib/python3.7/site-packages/flask/__main__.py deleted file mode 100644 index f61dbc0..0000000 --- a/venv/lib/python3.7/site-packages/flask/__main__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.__main__ - ~~~~~~~~~~~~~~ - - Alias for flask.run for the command line. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" - -if __name__ == "__main__": - from .cli import main - - main(as_module=True) diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index b863322..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/__main__.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/__main__.cpython-37.pyc deleted file mode 100644 index ac2116a..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/__main__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/_compat.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/_compat.cpython-37.pyc deleted file mode 100644 index 5f93bcf..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/_compat.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/app.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/app.cpython-37.pyc deleted file mode 100644 index d85ce9a..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/app.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/blueprints.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/blueprints.cpython-37.pyc deleted file mode 100644 index 2b8ae3b..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/blueprints.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/cli.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/cli.cpython-37.pyc deleted file mode 100644 index d1963d6..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/cli.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/config.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/config.cpython-37.pyc deleted file mode 100644 index 5238c54..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/config.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/ctx.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/ctx.cpython-37.pyc deleted file mode 100644 index d4ab145..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/ctx.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/debughelpers.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/debughelpers.cpython-37.pyc deleted file mode 100644 index 6b15c51..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/debughelpers.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/globals.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/globals.cpython-37.pyc deleted file mode 100644 index b1e459a..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/globals.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/helpers.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/helpers.cpython-37.pyc deleted file mode 100644 index d75fe0a..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/helpers.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/logging.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/logging.cpython-37.pyc deleted file mode 100644 index dfc5dce..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/logging.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/sessions.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/sessions.cpython-37.pyc deleted file mode 100644 index 1c8ce94..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/sessions.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/signals.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/signals.cpython-37.pyc deleted file mode 100644 index baaa386..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/signals.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/templating.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/templating.cpython-37.pyc deleted file mode 100644 index f60fce7..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/templating.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/testing.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/testing.cpython-37.pyc deleted file mode 100644 index 15e79ee..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/testing.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/views.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/views.cpython-37.pyc deleted file mode 100644 index 2789c58..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/views.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/__pycache__/wrappers.cpython-37.pyc b/venv/lib/python3.7/site-packages/flask/__pycache__/wrappers.cpython-37.pyc deleted file mode 100644 index c754c57..0000000 Binary files a/venv/lib/python3.7/site-packages/flask/__pycache__/wrappers.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/flask/_compat.py b/venv/lib/python3.7/site-packages/flask/_compat.py deleted file mode 100644 index 76c442c..0000000 --- a/venv/lib/python3.7/site-packages/flask/_compat.py +++ /dev/null @@ -1,145 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask._compat - ~~~~~~~~~~~~~ - - Some py2/py3 compatibility support based on a stripped down - version of six so we don't have to depend on a specific version - of it. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -import sys - -PY2 = sys.version_info[0] == 2 -_identity = lambda x: x - -try: # Python 2 - text_type = unicode - string_types = (str, unicode) - integer_types = (int, long) -except NameError: # Python 3 - text_type = str - string_types = (str,) - integer_types = (int,) - -if not PY2: - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - from inspect import getfullargspec as getargspec - from io import StringIO - import collections.abc as collections_abc - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - implements_to_string = _identity - -else: - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - from inspect import getargspec - from cStringIO import StringIO - import collections as collections_abc - - exec("def reraise(tp, value, tb=None):\n raise tp, value, tb") - - def implements_to_string(cls): - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode("utf-8") - return cls - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a - # dummy metaclass for one level of class instantiation that replaces - # itself with the actual metaclass. - class metaclass(type): - def __new__(metacls, name, this_bases, d): - return meta(name, bases, d) - - return type.__new__(metaclass, "temporary_class", (), {}) - - -# Certain versions of pypy have a bug where clearing the exception stack -# breaks the __exit__ function in a very peculiar way. The second level of -# exception blocks is necessary because pypy seems to forget to check if an -# exception happened until the next bytecode instruction? -# -# Relevant PyPy bugfix commit: -# https://bitbucket.org/pypy/pypy/commits/77ecf91c635a287e88e60d8ddb0f4e9df4003301 -# According to ronan on #pypy IRC, it is released in PyPy2 2.3 and later -# versions. -# -# Ubuntu 14.04 has PyPy 2.2.1, which does exhibit this bug. -BROKEN_PYPY_CTXMGR_EXIT = False -if hasattr(sys, "pypy_version_info"): - - class _Mgr(object): - def __enter__(self): - return self - - def __exit__(self, *args): - if hasattr(sys, "exc_clear"): - # Python 3 (PyPy3) doesn't have exc_clear - sys.exc_clear() - - try: - try: - with _Mgr(): - raise AssertionError() - except: # noqa: B001 - # We intentionally use a bare except here. See the comment above - # regarding a pypy bug as to why. - raise - except TypeError: - BROKEN_PYPY_CTXMGR_EXIT = True - except AssertionError: - pass - - -try: - from os import fspath -except ImportError: - # Backwards compatibility as proposed in PEP 0519: - # https://www.python.org/dev/peps/pep-0519/#backwards-compatibility - def fspath(path): - return path.__fspath__() if hasattr(path, "__fspath__") else path - - -class _DeprecatedBool(object): - def __init__(self, name, version, value): - self.message = "'{}' is deprecated and will be removed in version {}.".format( - name, version - ) - self.value = value - - def _warn(self): - import warnings - - warnings.warn(self.message, DeprecationWarning, stacklevel=2) - - def __eq__(self, other): - self._warn() - return other == self.value - - def __ne__(self, other): - self._warn() - return other != self.value - - def __bool__(self): - self._warn() - return self.value - - __nonzero__ = __bool__ - - -json_available = _DeprecatedBool("flask.json_available", "2.0.0", True) diff --git a/venv/lib/python3.7/site-packages/flask/app.py b/venv/lib/python3.7/site-packages/flask/app.py deleted file mode 100644 index e596fe5..0000000 --- a/venv/lib/python3.7/site-packages/flask/app.py +++ /dev/null @@ -1,2466 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.app - ~~~~~~~~~ - - This module implements the central WSGI application object. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -import os -import sys -import warnings -from datetime import timedelta -from functools import update_wrapper -from itertools import chain -from threading import Lock - -from werkzeug.datastructures import Headers -from werkzeug.datastructures import ImmutableDict -from werkzeug.exceptions import BadRequest -from werkzeug.exceptions import BadRequestKeyError -from werkzeug.exceptions import default_exceptions -from werkzeug.exceptions import HTTPException -from werkzeug.exceptions import InternalServerError -from werkzeug.exceptions import MethodNotAllowed -from werkzeug.routing import BuildError -from werkzeug.routing import Map -from werkzeug.routing import RequestRedirect -from werkzeug.routing import RoutingException -from werkzeug.routing import Rule -from werkzeug.wrappers import BaseResponse - -from . import cli -from . import json -from ._compat import integer_types -from ._compat import reraise -from ._compat import string_types -from ._compat import text_type -from .config import Config -from .config import ConfigAttribute -from .ctx import _AppCtxGlobals -from .ctx import AppContext -from .ctx import RequestContext -from .globals import _request_ctx_stack -from .globals import g -from .globals import request -from .globals import session -from .helpers import _endpoint_from_view_func -from .helpers import _PackageBoundObject -from .helpers import find_package -from .helpers import get_debug_flag -from .helpers import get_env -from .helpers import get_flashed_messages -from .helpers import get_load_dotenv -from .helpers import locked_cached_property -from .helpers import url_for -from .json import jsonify -from .logging import create_logger -from .sessions import SecureCookieSessionInterface -from .signals import appcontext_tearing_down -from .signals import got_request_exception -from .signals import request_finished -from .signals import request_started -from .signals import request_tearing_down -from .templating import _default_template_ctx_processor -from .templating import DispatchingJinjaLoader -from .templating import Environment -from .wrappers import Request -from .wrappers import Response - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -def _make_timedelta(value): - if not isinstance(value, timedelta): - return timedelta(seconds=value) - return value - - -def setupmethod(f): - """Wraps a method so that it performs a check in debug mode if the - first request was already handled. - """ - - def wrapper_func(self, *args, **kwargs): - if self.debug and self._got_first_request: - raise AssertionError( - "A setup function was called after the " - "first request was handled. This usually indicates a bug " - "in the application where a module was not imported " - "and decorators or other functionality was called too late.\n" - "To fix this make sure to import all your view modules, " - "database models and everything related at a central place " - "before the application starts serving requests." - ) - return f(self, *args, **kwargs) - - return update_wrapper(wrapper_func, f) - - -class Flask(_PackageBoundObject): - """The flask object implements a WSGI application and acts as the central - object. It is passed the name of the module or package of the - application. Once it is created it will act as a central registry for - the view functions, the URL rules, template configuration and much more. - - The name of the package is used to resolve resources from inside the - package or the folder the module is contained in depending on if the - package parameter resolves to an actual python package (a folder with - an :file:`__init__.py` file inside) or a standard module (just a ``.py`` file). - - For more information about resource loading, see :func:`open_resource`. - - Usually you create a :class:`Flask` instance in your main module or - in the :file:`__init__.py` file of your package like this:: - - from flask import Flask - app = Flask(__name__) - - .. admonition:: About the First Parameter - - The idea of the first parameter is to give Flask an idea of what - belongs to your application. This name is used to find resources - on the filesystem, can be used by extensions to improve debugging - information and a lot more. - - So it's important what you provide there. If you are using a single - module, `__name__` is always the correct value. If you however are - using a package, it's usually recommended to hardcode the name of - your package there. - - For example if your application is defined in :file:`yourapplication/app.py` - you should create it with one of the two versions below:: - - app = Flask('yourapplication') - app = Flask(__name__.split('.')[0]) - - Why is that? The application will work even with `__name__`, thanks - to how resources are looked up. However it will make debugging more - painful. Certain extensions can make assumptions based on the - import name of your application. For example the Flask-SQLAlchemy - extension will look for the code in your application that triggered - an SQL query in debug mode. If the import name is not properly set - up, that debugging information is lost. (For example it would only - pick up SQL queries in `yourapplication.app` and not - `yourapplication.views.frontend`) - - .. versionadded:: 0.7 - The `static_url_path`, `static_folder`, and `template_folder` - parameters were added. - - .. versionadded:: 0.8 - The `instance_path` and `instance_relative_config` parameters were - added. - - .. versionadded:: 0.11 - The `root_path` parameter was added. - - .. versionadded:: 1.0 - The ``host_matching`` and ``static_host`` parameters were added. - - .. versionadded:: 1.0 - The ``subdomain_matching`` parameter was added. Subdomain - matching needs to be enabled manually now. Setting - :data:`SERVER_NAME` does not implicitly enable it. - - :param import_name: the name of the application package - :param static_url_path: can be used to specify a different path for the - static files on the web. Defaults to the name - of the `static_folder` folder. - :param static_folder: the folder with static files that should be served - at `static_url_path`. Defaults to the ``'static'`` - folder in the root path of the application. - :param static_host: the host to use when adding the static route. - Defaults to None. Required when using ``host_matching=True`` - with a ``static_folder`` configured. - :param host_matching: set ``url_map.host_matching`` attribute. - Defaults to False. - :param subdomain_matching: consider the subdomain relative to - :data:`SERVER_NAME` when matching routes. Defaults to False. - :param template_folder: the folder that contains the templates that should - be used by the application. Defaults to - ``'templates'`` folder in the root path of the - application. - :param instance_path: An alternative instance path for the application. - By default the folder ``'instance'`` next to the - package or module is assumed to be the instance - path. - :param instance_relative_config: if set to ``True`` relative filenames - for loading the config are assumed to - be relative to the instance path instead - of the application root. - :param root_path: Flask by default will automatically calculate the path - to the root of the application. In certain situations - this cannot be achieved (for instance if the package - is a Python 3 namespace package) and needs to be - manually defined. - """ - - #: The class that is used for request objects. See :class:`~flask.Request` - #: for more information. - request_class = Request - - #: The class that is used for response objects. See - #: :class:`~flask.Response` for more information. - response_class = Response - - #: The class that is used for the Jinja environment. - #: - #: .. versionadded:: 0.11 - jinja_environment = Environment - - #: The class that is used for the :data:`~flask.g` instance. - #: - #: Example use cases for a custom class: - #: - #: 1. Store arbitrary attributes on flask.g. - #: 2. Add a property for lazy per-request database connectors. - #: 3. Return None instead of AttributeError on unexpected attributes. - #: 4. Raise exception if an unexpected attr is set, a "controlled" flask.g. - #: - #: In Flask 0.9 this property was called `request_globals_class` but it - #: was changed in 0.10 to :attr:`app_ctx_globals_class` because the - #: flask.g object is now application context scoped. - #: - #: .. versionadded:: 0.10 - app_ctx_globals_class = _AppCtxGlobals - - #: The class that is used for the ``config`` attribute of this app. - #: Defaults to :class:`~flask.Config`. - #: - #: Example use cases for a custom class: - #: - #: 1. Default values for certain config options. - #: 2. Access to config values through attributes in addition to keys. - #: - #: .. versionadded:: 0.11 - config_class = Config - - #: The testing flag. Set this to ``True`` to enable the test mode of - #: Flask extensions (and in the future probably also Flask itself). - #: For example this might activate test helpers that have an - #: additional runtime cost which should not be enabled by default. - #: - #: If this is enabled and PROPAGATE_EXCEPTIONS is not changed from the - #: default it's implicitly enabled. - #: - #: This attribute can also be configured from the config with the - #: ``TESTING`` configuration key. Defaults to ``False``. - testing = ConfigAttribute("TESTING") - - #: If a secret key is set, cryptographic components can use this to - #: sign cookies and other things. Set this to a complex random value - #: when you want to use the secure cookie for instance. - #: - #: This attribute can also be configured from the config with the - #: :data:`SECRET_KEY` configuration key. Defaults to ``None``. - secret_key = ConfigAttribute("SECRET_KEY") - - #: The secure cookie uses this for the name of the session cookie. - #: - #: This attribute can also be configured from the config with the - #: ``SESSION_COOKIE_NAME`` configuration key. Defaults to ``'session'`` - session_cookie_name = ConfigAttribute("SESSION_COOKIE_NAME") - - #: A :class:`~datetime.timedelta` which is used to set the expiration - #: date of a permanent session. The default is 31 days which makes a - #: permanent session survive for roughly one month. - #: - #: This attribute can also be configured from the config with the - #: ``PERMANENT_SESSION_LIFETIME`` configuration key. Defaults to - #: ``timedelta(days=31)`` - permanent_session_lifetime = ConfigAttribute( - "PERMANENT_SESSION_LIFETIME", get_converter=_make_timedelta - ) - - #: A :class:`~datetime.timedelta` which is used as default cache_timeout - #: for the :func:`send_file` functions. The default is 12 hours. - #: - #: This attribute can also be configured from the config with the - #: ``SEND_FILE_MAX_AGE_DEFAULT`` configuration key. This configuration - #: variable can also be set with an integer value used as seconds. - #: Defaults to ``timedelta(hours=12)`` - send_file_max_age_default = ConfigAttribute( - "SEND_FILE_MAX_AGE_DEFAULT", get_converter=_make_timedelta - ) - - #: Enable this if you want to use the X-Sendfile feature. Keep in - #: mind that the server has to support this. This only affects files - #: sent with the :func:`send_file` method. - #: - #: .. versionadded:: 0.2 - #: - #: This attribute can also be configured from the config with the - #: ``USE_X_SENDFILE`` configuration key. Defaults to ``False``. - use_x_sendfile = ConfigAttribute("USE_X_SENDFILE") - - #: The JSON encoder class to use. Defaults to :class:`~flask.json.JSONEncoder`. - #: - #: .. versionadded:: 0.10 - json_encoder = json.JSONEncoder - - #: The JSON decoder class to use. Defaults to :class:`~flask.json.JSONDecoder`. - #: - #: .. versionadded:: 0.10 - json_decoder = json.JSONDecoder - - #: Options that are passed to the Jinja environment in - #: :meth:`create_jinja_environment`. Changing these options after - #: the environment is created (accessing :attr:`jinja_env`) will - #: have no effect. - #: - #: .. versionchanged:: 1.1.0 - #: This is a ``dict`` instead of an ``ImmutableDict`` to allow - #: easier configuration. - #: - jinja_options = {"extensions": ["jinja2.ext.autoescape", "jinja2.ext.with_"]} - - #: Default configuration parameters. - default_config = ImmutableDict( - { - "ENV": None, - "DEBUG": None, - "TESTING": False, - "PROPAGATE_EXCEPTIONS": None, - "PRESERVE_CONTEXT_ON_EXCEPTION": None, - "SECRET_KEY": None, - "PERMANENT_SESSION_LIFETIME": timedelta(days=31), - "USE_X_SENDFILE": False, - "SERVER_NAME": None, - "APPLICATION_ROOT": "/", - "SESSION_COOKIE_NAME": "session", - "SESSION_COOKIE_DOMAIN": None, - "SESSION_COOKIE_PATH": None, - "SESSION_COOKIE_HTTPONLY": True, - "SESSION_COOKIE_SECURE": False, - "SESSION_COOKIE_SAMESITE": None, - "SESSION_REFRESH_EACH_REQUEST": True, - "MAX_CONTENT_LENGTH": None, - "SEND_FILE_MAX_AGE_DEFAULT": timedelta(hours=12), - "TRAP_BAD_REQUEST_ERRORS": None, - "TRAP_HTTP_EXCEPTIONS": False, - "EXPLAIN_TEMPLATE_LOADING": False, - "PREFERRED_URL_SCHEME": "http", - "JSON_AS_ASCII": True, - "JSON_SORT_KEYS": True, - "JSONIFY_PRETTYPRINT_REGULAR": False, - "JSONIFY_MIMETYPE": "application/json", - "TEMPLATES_AUTO_RELOAD": None, - "MAX_COOKIE_SIZE": 4093, - } - ) - - #: The rule object to use for URL rules created. This is used by - #: :meth:`add_url_rule`. Defaults to :class:`werkzeug.routing.Rule`. - #: - #: .. versionadded:: 0.7 - url_rule_class = Rule - - #: The map object to use for storing the URL rules and routing - #: configuration parameters. Defaults to :class:`werkzeug.routing.Map`. - #: - #: .. versionadded:: 1.1.0 - url_map_class = Map - - #: the test client that is used with when `test_client` is used. - #: - #: .. versionadded:: 0.7 - test_client_class = None - - #: The :class:`~click.testing.CliRunner` subclass, by default - #: :class:`~flask.testing.FlaskCliRunner` that is used by - #: :meth:`test_cli_runner`. Its ``__init__`` method should take a - #: Flask app object as the first argument. - #: - #: .. versionadded:: 1.0 - test_cli_runner_class = None - - #: the session interface to use. By default an instance of - #: :class:`~flask.sessions.SecureCookieSessionInterface` is used here. - #: - #: .. versionadded:: 0.8 - session_interface = SecureCookieSessionInterface() - - # TODO remove the next three attrs when Sphinx :inherited-members: works - # https://github.com/sphinx-doc/sphinx/issues/741 - - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__( - self, - import_name, - static_url_path=None, - static_folder="static", - static_host=None, - host_matching=False, - subdomain_matching=False, - template_folder="templates", - instance_path=None, - instance_relative_config=False, - root_path=None, - ): - _PackageBoundObject.__init__( - self, import_name, template_folder=template_folder, root_path=root_path - ) - - self.static_url_path = static_url_path - self.static_folder = static_folder - - if instance_path is None: - instance_path = self.auto_find_instance_path() - elif not os.path.isabs(instance_path): - raise ValueError( - "If an instance path is provided it must be absolute." - " A relative path was given instead." - ) - - #: Holds the path to the instance folder. - #: - #: .. versionadded:: 0.8 - self.instance_path = instance_path - - #: The configuration dictionary as :class:`Config`. This behaves - #: exactly like a regular dictionary but supports additional methods - #: to load a config from files. - self.config = self.make_config(instance_relative_config) - - #: A dictionary of all view functions registered. The keys will - #: be function names which are also used to generate URLs and - #: the values are the function objects themselves. - #: To register a view function, use the :meth:`route` decorator. - self.view_functions = {} - - #: A dictionary of all registered error handlers. The key is ``None`` - #: for error handlers active on the application, otherwise the key is - #: the name of the blueprint. Each key points to another dictionary - #: where the key is the status code of the http exception. The - #: special key ``None`` points to a list of tuples where the first item - #: is the class for the instance check and the second the error handler - #: function. - #: - #: To register an error handler, use the :meth:`errorhandler` - #: decorator. - self.error_handler_spec = {} - - #: A list of functions that are called when :meth:`url_for` raises a - #: :exc:`~werkzeug.routing.BuildError`. Each function registered here - #: is called with `error`, `endpoint` and `values`. If a function - #: returns ``None`` or raises a :exc:`BuildError` the next function is - #: tried. - #: - #: .. versionadded:: 0.9 - self.url_build_error_handlers = [] - - #: A dictionary with lists of functions that will be called at the - #: beginning of each request. The key of the dictionary is the name of - #: the blueprint this function is active for, or ``None`` for all - #: requests. To register a function, use the :meth:`before_request` - #: decorator. - self.before_request_funcs = {} - - #: A list of functions that will be called at the beginning of the - #: first request to this instance. To register a function, use the - #: :meth:`before_first_request` decorator. - #: - #: .. versionadded:: 0.8 - self.before_first_request_funcs = [] - - #: A dictionary with lists of functions that should be called after - #: each request. The key of the dictionary is the name of the blueprint - #: this function is active for, ``None`` for all requests. This can for - #: example be used to close database connections. To register a function - #: here, use the :meth:`after_request` decorator. - self.after_request_funcs = {} - - #: A dictionary with lists of functions that are called after - #: each request, even if an exception has occurred. The key of the - #: dictionary is the name of the blueprint this function is active for, - #: ``None`` for all requests. These functions are not allowed to modify - #: the request, and their return values are ignored. If an exception - #: occurred while processing the request, it gets passed to each - #: teardown_request function. To register a function here, use the - #: :meth:`teardown_request` decorator. - #: - #: .. versionadded:: 0.7 - self.teardown_request_funcs = {} - - #: A list of functions that are called when the application context - #: is destroyed. Since the application context is also torn down - #: if the request ends this is the place to store code that disconnects - #: from databases. - #: - #: .. versionadded:: 0.9 - self.teardown_appcontext_funcs = [] - - #: A dictionary with lists of functions that are called before the - #: :attr:`before_request_funcs` functions. The key of the dictionary is - #: the name of the blueprint this function is active for, or ``None`` - #: for all requests. To register a function, use - #: :meth:`url_value_preprocessor`. - #: - #: .. versionadded:: 0.7 - self.url_value_preprocessors = {} - - #: A dictionary with lists of functions that can be used as URL value - #: preprocessors. The key ``None`` here is used for application wide - #: callbacks, otherwise the key is the name of the blueprint. - #: Each of these functions has the chance to modify the dictionary - #: of URL values before they are used as the keyword arguments of the - #: view function. For each function registered this one should also - #: provide a :meth:`url_defaults` function that adds the parameters - #: automatically again that were removed that way. - #: - #: .. versionadded:: 0.7 - self.url_default_functions = {} - - #: A dictionary with list of functions that are called without argument - #: to populate the template context. The key of the dictionary is the - #: name of the blueprint this function is active for, ``None`` for all - #: requests. Each returns a dictionary that the template context is - #: updated with. To register a function here, use the - #: :meth:`context_processor` decorator. - self.template_context_processors = {None: [_default_template_ctx_processor]} - - #: A list of shell context processor functions that should be run - #: when a shell context is created. - #: - #: .. versionadded:: 0.11 - self.shell_context_processors = [] - - #: all the attached blueprints in a dictionary by name. Blueprints - #: can be attached multiple times so this dictionary does not tell - #: you how often they got attached. - #: - #: .. versionadded:: 0.7 - self.blueprints = {} - self._blueprint_order = [] - - #: a place where extensions can store application specific state. For - #: example this is where an extension could store database engines and - #: similar things. For backwards compatibility extensions should register - #: themselves like this:: - #: - #: if not hasattr(app, 'extensions'): - #: app.extensions = {} - #: app.extensions['extensionname'] = SomeObject() - #: - #: The key must match the name of the extension module. For example in - #: case of a "Flask-Foo" extension in `flask_foo`, the key would be - #: ``'foo'``. - #: - #: .. versionadded:: 0.7 - self.extensions = {} - - #: The :class:`~werkzeug.routing.Map` for this instance. You can use - #: this to change the routing converters after the class was created - #: but before any routes are connected. Example:: - #: - #: from werkzeug.routing import BaseConverter - #: - #: class ListConverter(BaseConverter): - #: def to_python(self, value): - #: return value.split(',') - #: def to_url(self, values): - #: return ','.join(super(ListConverter, self).to_url(value) - #: for value in values) - #: - #: app = Flask(__name__) - #: app.url_map.converters['list'] = ListConverter - self.url_map = self.url_map_class() - - self.url_map.host_matching = host_matching - self.subdomain_matching = subdomain_matching - - # tracks internally if the application already handled at least one - # request. - self._got_first_request = False - self._before_request_lock = Lock() - - # Add a static route using the provided static_url_path, static_host, - # and static_folder if there is a configured static_folder. - # Note we do this without checking if static_folder exists. - # For one, it might be created while the server is running (e.g. during - # development). Also, Google App Engine stores static files somewhere - if self.has_static_folder: - assert ( - bool(static_host) == host_matching - ), "Invalid static_host/host_matching combination" - self.add_url_rule( - self.static_url_path + "/", - endpoint="static", - host=static_host, - view_func=self.send_static_file, - ) - - # Set the name of the Click group in case someone wants to add - # the app's commands to another CLI tool. - self.cli.name = self.name - - @locked_cached_property - def name(self): - """The name of the application. This is usually the import name - with the difference that it's guessed from the run file if the - import name is main. This name is used as a display name when - Flask needs the name of the application. It can be set and overridden - to change the value. - - .. versionadded:: 0.8 - """ - if self.import_name == "__main__": - fn = getattr(sys.modules["__main__"], "__file__", None) - if fn is None: - return "__main__" - return os.path.splitext(os.path.basename(fn))[0] - return self.import_name - - @property - def propagate_exceptions(self): - """Returns the value of the ``PROPAGATE_EXCEPTIONS`` configuration - value in case it's set, otherwise a sensible default is returned. - - .. versionadded:: 0.7 - """ - rv = self.config["PROPAGATE_EXCEPTIONS"] - if rv is not None: - return rv - return self.testing or self.debug - - @property - def preserve_context_on_exception(self): - """Returns the value of the ``PRESERVE_CONTEXT_ON_EXCEPTION`` - configuration value in case it's set, otherwise a sensible default - is returned. - - .. versionadded:: 0.7 - """ - rv = self.config["PRESERVE_CONTEXT_ON_EXCEPTION"] - if rv is not None: - return rv - return self.debug - - @locked_cached_property - def logger(self): - """A standard Python :class:`~logging.Logger` for the app, with - the same name as :attr:`name`. - - In debug mode, the logger's :attr:`~logging.Logger.level` will - be set to :data:`~logging.DEBUG`. - - If there are no handlers configured, a default handler will be - added. See :doc:`/logging` for more information. - - .. versionchanged:: 1.1.0 - The logger takes the same name as :attr:`name` rather than - hard-coding ``"flask.app"``. - - .. versionchanged:: 1.0.0 - Behavior was simplified. The logger is always named - ``"flask.app"``. The level is only set during configuration, - it doesn't check ``app.debug`` each time. Only one format is - used, not different ones depending on ``app.debug``. No - handlers are removed, and a handler is only added if no - handlers are already configured. - - .. versionadded:: 0.3 - """ - return create_logger(self) - - @locked_cached_property - def jinja_env(self): - """The Jinja environment used to load templates. - - The environment is created the first time this property is - accessed. Changing :attr:`jinja_options` after that will have no - effect. - """ - return self.create_jinja_environment() - - @property - def got_first_request(self): - """This attribute is set to ``True`` if the application started - handling the first request. - - .. versionadded:: 0.8 - """ - return self._got_first_request - - def make_config(self, instance_relative=False): - """Used to create the config attribute by the Flask constructor. - The `instance_relative` parameter is passed in from the constructor - of Flask (there named `instance_relative_config`) and indicates if - the config should be relative to the instance path or the root path - of the application. - - .. versionadded:: 0.8 - """ - root_path = self.root_path - if instance_relative: - root_path = self.instance_path - defaults = dict(self.default_config) - defaults["ENV"] = get_env() - defaults["DEBUG"] = get_debug_flag() - return self.config_class(root_path, defaults) - - def auto_find_instance_path(self): - """Tries to locate the instance path if it was not provided to the - constructor of the application class. It will basically calculate - the path to a folder named ``instance`` next to your main file or - the package. - - .. versionadded:: 0.8 - """ - prefix, package_path = find_package(self.import_name) - if prefix is None: - return os.path.join(package_path, "instance") - return os.path.join(prefix, "var", self.name + "-instance") - - def open_instance_resource(self, resource, mode="rb"): - """Opens a resource from the application's instance folder - (:attr:`instance_path`). Otherwise works like - :meth:`open_resource`. Instance resources can also be opened for - writing. - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: resource file opening mode, default is 'rb'. - """ - return open(os.path.join(self.instance_path, resource), mode) - - @property - def templates_auto_reload(self): - """Reload templates when they are changed. Used by - :meth:`create_jinja_environment`. - - This attribute can be configured with :data:`TEMPLATES_AUTO_RELOAD`. If - not set, it will be enabled in debug mode. - - .. versionadded:: 1.0 - This property was added but the underlying config and behavior - already existed. - """ - rv = self.config["TEMPLATES_AUTO_RELOAD"] - return rv if rv is not None else self.debug - - @templates_auto_reload.setter - def templates_auto_reload(self, value): - self.config["TEMPLATES_AUTO_RELOAD"] = value - - def create_jinja_environment(self): - """Create the Jinja environment based on :attr:`jinja_options` - and the various Jinja-related methods of the app. Changing - :attr:`jinja_options` after this will have no effect. Also adds - Flask-related globals and filters to the environment. - - .. versionchanged:: 0.11 - ``Environment.auto_reload`` set in accordance with - ``TEMPLATES_AUTO_RELOAD`` configuration option. - - .. versionadded:: 0.5 - """ - options = dict(self.jinja_options) - - if "autoescape" not in options: - options["autoescape"] = self.select_jinja_autoescape - - if "auto_reload" not in options: - options["auto_reload"] = self.templates_auto_reload - - rv = self.jinja_environment(self, **options) - rv.globals.update( - url_for=url_for, - get_flashed_messages=get_flashed_messages, - config=self.config, - # request, session and g are normally added with the - # context processor for efficiency reasons but for imported - # templates we also want the proxies in there. - request=request, - session=session, - g=g, - ) - rv.filters["tojson"] = json.tojson_filter - return rv - - def create_global_jinja_loader(self): - """Creates the loader for the Jinja2 environment. Can be used to - override just the loader and keeping the rest unchanged. It's - discouraged to override this function. Instead one should override - the :meth:`jinja_loader` function instead. - - The global loader dispatches between the loaders of the application - and the individual blueprints. - - .. versionadded:: 0.7 - """ - return DispatchingJinjaLoader(self) - - def select_jinja_autoescape(self, filename): - """Returns ``True`` if autoescaping should be active for the given - template name. If no template name is given, returns `True`. - - .. versionadded:: 0.5 - """ - if filename is None: - return True - return filename.endswith((".html", ".htm", ".xml", ".xhtml")) - - def update_template_context(self, context): - """Update the template context with some commonly used variables. - This injects request, session, config and g into the template - context as well as everything template context processors want - to inject. Note that the as of Flask 0.6, the original values - in the context will not be overridden if a context processor - decides to return a value with the same key. - - :param context: the context as a dictionary that is updated in place - to add extra variables. - """ - funcs = self.template_context_processors[None] - reqctx = _request_ctx_stack.top - if reqctx is not None: - bp = reqctx.request.blueprint - if bp is not None and bp in self.template_context_processors: - funcs = chain(funcs, self.template_context_processors[bp]) - orig_ctx = context.copy() - for func in funcs: - context.update(func()) - # make sure the original values win. This makes it possible to - # easier add new variables in context processors without breaking - # existing views. - context.update(orig_ctx) - - def make_shell_context(self): - """Returns the shell context for an interactive shell for this - application. This runs all the registered shell context - processors. - - .. versionadded:: 0.11 - """ - rv = {"app": self, "g": g} - for processor in self.shell_context_processors: - rv.update(processor()) - return rv - - #: What environment the app is running in. Flask and extensions may - #: enable behaviors based on the environment, such as enabling debug - #: mode. This maps to the :data:`ENV` config key. This is set by the - #: :envvar:`FLASK_ENV` environment variable and may not behave as - #: expected if set in code. - #: - #: **Do not enable development when deploying in production.** - #: - #: Default: ``'production'`` - env = ConfigAttribute("ENV") - - @property - def debug(self): - """Whether debug mode is enabled. When using ``flask run`` to start - the development server, an interactive debugger will be shown for - unhandled exceptions, and the server will be reloaded when code - changes. This maps to the :data:`DEBUG` config key. This is - enabled when :attr:`env` is ``'development'`` and is overridden - by the ``FLASK_DEBUG`` environment variable. It may not behave as - expected if set in code. - - **Do not enable debug mode when deploying in production.** - - Default: ``True`` if :attr:`env` is ``'development'``, or - ``False`` otherwise. - """ - return self.config["DEBUG"] - - @debug.setter - def debug(self, value): - self.config["DEBUG"] = value - self.jinja_env.auto_reload = self.templates_auto_reload - - def run(self, host=None, port=None, debug=None, load_dotenv=True, **options): - """Runs the application on a local development server. - - Do not use ``run()`` in a production setting. It is not intended to - meet security and performance requirements for a production server. - Instead, see :ref:`deployment` for WSGI server recommendations. - - If the :attr:`debug` flag is set the server will automatically reload - for code changes and show a debugger in case an exception happened. - - If you want to run the application in debug mode, but disable the - code execution on the interactive debugger, you can pass - ``use_evalex=False`` as parameter. This will keep the debugger's - traceback screen active, but disable code execution. - - It is not recommended to use this function for development with - automatic reloading as this is badly supported. Instead you should - be using the :command:`flask` command line script's ``run`` support. - - .. admonition:: Keep in Mind - - Flask will suppress any server error with a generic error page - unless it is in debug mode. As such to enable just the - interactive debugger without the code reloading, you have to - invoke :meth:`run` with ``debug=True`` and ``use_reloader=False``. - Setting ``use_debugger`` to ``True`` without being in debug mode - won't catch any exceptions because there won't be any to - catch. - - :param host: the hostname to listen on. Set this to ``'0.0.0.0'`` to - have the server available externally as well. Defaults to - ``'127.0.0.1'`` or the host in the ``SERVER_NAME`` config variable - if present. - :param port: the port of the webserver. Defaults to ``5000`` or the - port defined in the ``SERVER_NAME`` config variable if present. - :param debug: if given, enable or disable debug mode. See - :attr:`debug`. - :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` - files to set environment variables. Will also change the working - directory to the directory containing the first file found. - :param options: the options to be forwarded to the underlying Werkzeug - server. See :func:`werkzeug.serving.run_simple` for more - information. - - .. versionchanged:: 1.0 - If installed, python-dotenv will be used to load environment - variables from :file:`.env` and :file:`.flaskenv` files. - - If set, the :envvar:`FLASK_ENV` and :envvar:`FLASK_DEBUG` - environment variables will override :attr:`env` and - :attr:`debug`. - - Threaded mode is enabled by default. - - .. versionchanged:: 0.10 - The default port is now picked from the ``SERVER_NAME`` - variable. - """ - # Change this into a no-op if the server is invoked from the - # command line. Have a look at cli.py for more information. - if os.environ.get("FLASK_RUN_FROM_CLI") == "true": - from .debughelpers import explain_ignored_app_run - - explain_ignored_app_run() - return - - if get_load_dotenv(load_dotenv): - cli.load_dotenv() - - # if set, let env vars override previous values - if "FLASK_ENV" in os.environ: - self.env = get_env() - self.debug = get_debug_flag() - elif "FLASK_DEBUG" in os.environ: - self.debug = get_debug_flag() - - # debug passed to method overrides all other sources - if debug is not None: - self.debug = bool(debug) - - _host = "127.0.0.1" - _port = 5000 - server_name = self.config.get("SERVER_NAME") - sn_host, sn_port = None, None - - if server_name: - sn_host, _, sn_port = server_name.partition(":") - - host = host or sn_host or _host - # pick the first value that's not None (0 is allowed) - port = int(next((p for p in (port, sn_port) if p is not None), _port)) - - options.setdefault("use_reloader", self.debug) - options.setdefault("use_debugger", self.debug) - options.setdefault("threaded", True) - - cli.show_server_banner(self.env, self.debug, self.name, False) - - from werkzeug.serving import run_simple - - try: - run_simple(host, port, self, **options) - finally: - # reset the first request information if the development server - # reset normally. This makes it possible to restart the server - # without reloader and that stuff from an interactive shell. - self._got_first_request = False - - def test_client(self, use_cookies=True, **kwargs): - """Creates a test client for this application. For information - about unit testing head over to :ref:`testing`. - - Note that if you are testing for assertions or exceptions in your - application code, you must set ``app.testing = True`` in order for the - exceptions to propagate to the test client. Otherwise, the exception - will be handled by the application (not visible to the test client) and - the only indication of an AssertionError or other exception will be a - 500 status code response to the test client. See the :attr:`testing` - attribute. For example:: - - app.testing = True - client = app.test_client() - - The test client can be used in a ``with`` block to defer the closing down - of the context until the end of the ``with`` block. This is useful if - you want to access the context locals for testing:: - - with app.test_client() as c: - rv = c.get('/?vodka=42') - assert request.args['vodka'] == '42' - - Additionally, you may pass optional keyword arguments that will then - be passed to the application's :attr:`test_client_class` constructor. - For example:: - - from flask.testing import FlaskClient - - class CustomClient(FlaskClient): - def __init__(self, *args, **kwargs): - self._authentication = kwargs.pop("authentication") - super(CustomClient,self).__init__( *args, **kwargs) - - app.test_client_class = CustomClient - client = app.test_client(authentication='Basic ....') - - See :class:`~flask.testing.FlaskClient` for more information. - - .. versionchanged:: 0.4 - added support for ``with`` block usage for the client. - - .. versionadded:: 0.7 - The `use_cookies` parameter was added as well as the ability - to override the client to be used by setting the - :attr:`test_client_class` attribute. - - .. versionchanged:: 0.11 - Added `**kwargs` to support passing additional keyword arguments to - the constructor of :attr:`test_client_class`. - """ - cls = self.test_client_class - if cls is None: - from .testing import FlaskClient as cls - return cls(self, self.response_class, use_cookies=use_cookies, **kwargs) - - def test_cli_runner(self, **kwargs): - """Create a CLI runner for testing CLI commands. - See :ref:`testing-cli`. - - Returns an instance of :attr:`test_cli_runner_class`, by default - :class:`~flask.testing.FlaskCliRunner`. The Flask app object is - passed as the first argument. - - .. versionadded:: 1.0 - """ - cls = self.test_cli_runner_class - - if cls is None: - from .testing import FlaskCliRunner as cls - - return cls(self, **kwargs) - - def open_session(self, request): - """Creates or opens a new session. Default implementation stores all - session data in a signed cookie. This requires that the - :attr:`secret_key` is set. Instead of overriding this method - we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.open_session`` - instead. - - :param request: an instance of :attr:`request_class`. - """ - - warnings.warn( - DeprecationWarning( - '"open_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.open_session" instead.' - ) - ) - return self.session_interface.open_session(self, request) - - def save_session(self, session, response): - """Saves the session if it needs updates. For the default - implementation, check :meth:`open_session`. Instead of overriding this - method we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.save_session`` - instead. - - :param session: the session to be saved (a - :class:`~werkzeug.contrib.securecookie.SecureCookie` - object) - :param response: an instance of :attr:`response_class` - """ - - warnings.warn( - DeprecationWarning( - '"save_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.save_session" instead.' - ) - ) - return self.session_interface.save_session(self, session, response) - - def make_null_session(self): - """Creates a new instance of a missing session. Instead of overriding - this method we recommend replacing the :class:`session_interface`. - - .. deprecated: 1.0 - Will be removed in 1.1. Use ``session_interface.make_null_session`` - instead. - - .. versionadded:: 0.7 - """ - - warnings.warn( - DeprecationWarning( - '"make_null_session" is deprecated and will be removed in 1.1. Use' - ' "session_interface.make_null_session" instead.' - ) - ) - return self.session_interface.make_null_session(self) - - @setupmethod - def register_blueprint(self, blueprint, **options): - """Register a :class:`~flask.Blueprint` on the application. Keyword - arguments passed to this method will override the defaults set on the - blueprint. - - Calls the blueprint's :meth:`~flask.Blueprint.register` method after - recording the blueprint in the application's :attr:`blueprints`. - - :param blueprint: The blueprint to register. - :param url_prefix: Blueprint routes will be prefixed with this. - :param subdomain: Blueprint routes will match on this subdomain. - :param url_defaults: Blueprint routes will use these default values for - view arguments. - :param options: Additional keyword arguments are passed to - :class:`~flask.blueprints.BlueprintSetupState`. They can be - accessed in :meth:`~flask.Blueprint.record` callbacks. - - .. versionadded:: 0.7 - """ - first_registration = False - - if blueprint.name in self.blueprints: - assert self.blueprints[blueprint.name] is blueprint, ( - "A name collision occurred between blueprints %r and %r. Both" - ' share the same name "%s". Blueprints that are created on the' - " fly need unique names." - % (blueprint, self.blueprints[blueprint.name], blueprint.name) - ) - else: - self.blueprints[blueprint.name] = blueprint - self._blueprint_order.append(blueprint) - first_registration = True - - blueprint.register(self, options, first_registration) - - def iter_blueprints(self): - """Iterates over all blueprints by the order they were registered. - - .. versionadded:: 0.11 - """ - return iter(self._blueprint_order) - - @setupmethod - def add_url_rule( - self, - rule, - endpoint=None, - view_func=None, - provide_automatic_options=None, - **options - ): - """Connects a URL rule. Works exactly like the :meth:`route` - decorator. If a view_func is provided it will be registered with the - endpoint. - - Basically this example:: - - @app.route('/') - def index(): - pass - - Is equivalent to the following:: - - def index(): - pass - app.add_url_rule('/', 'index', index) - - If the view_func is not provided you will need to connect the endpoint - to a view function like so:: - - app.view_functions['index'] = index - - Internally :meth:`route` invokes :meth:`add_url_rule` so if you want - to customize the behavior via subclassing you only need to change - this method. - - For more information refer to :ref:`url-route-registrations`. - - .. versionchanged:: 0.2 - `view_func` parameter added. - - .. versionchanged:: 0.6 - ``OPTIONS`` is added automatically as method. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param view_func: the function to call when serving a request to the - provided endpoint - :param provide_automatic_options: controls whether the ``OPTIONS`` - method should be added automatically. This can also be controlled - by setting the ``view_func.provide_automatic_options = False`` - before adding the rule. - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (``GET``, ``POST`` etc.). By default a rule - just listens for ``GET`` (and implicitly ``HEAD``). - Starting with Flask 0.6, ``OPTIONS`` is implicitly - added and handled by the standard request handling. - """ - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - options["endpoint"] = endpoint - methods = options.pop("methods", None) - - # if the methods are not given and the view_func object knows its - # methods we can use that instead. If neither exists, we go with - # a tuple of only ``GET`` as default. - if methods is None: - methods = getattr(view_func, "methods", None) or ("GET",) - if isinstance(methods, string_types): - raise TypeError( - "Allowed methods have to be iterables of strings, " - 'for example: @app.route(..., methods=["POST"])' - ) - methods = set(item.upper() for item in methods) - - # Methods that should always be added - required_methods = set(getattr(view_func, "required_methods", ())) - - # starting with Flask 0.8 the view_func object can disable and - # force-enable the automatic options handling. - if provide_automatic_options is None: - provide_automatic_options = getattr( - view_func, "provide_automatic_options", None - ) - - if provide_automatic_options is None: - if "OPTIONS" not in methods: - provide_automatic_options = True - required_methods.add("OPTIONS") - else: - provide_automatic_options = False - - # Add the required methods now. - methods |= required_methods - - rule = self.url_rule_class(rule, methods=methods, **options) - rule.provide_automatic_options = provide_automatic_options - - self.url_map.add(rule) - if view_func is not None: - old_func = self.view_functions.get(endpoint) - if old_func is not None and old_func != view_func: - raise AssertionError( - "View function mapping is overwriting an " - "existing endpoint function: %s" % endpoint - ) - self.view_functions[endpoint] = view_func - - def route(self, rule, **options): - """A decorator that is used to register a view function for a - given URL rule. This does the same thing as :meth:`add_url_rule` - but is intended for decorator usage:: - - @app.route('/') - def index(): - return 'Hello World' - - For more information refer to :ref:`url-route-registrations`. - - :param rule: the URL rule as string - :param endpoint: the endpoint for the registered URL rule. Flask - itself assumes the name of the view function as - endpoint - :param options: the options to be forwarded to the underlying - :class:`~werkzeug.routing.Rule` object. A change - to Werkzeug is handling of method options. methods - is a list of methods this rule should be limited - to (``GET``, ``POST`` etc.). By default a rule - just listens for ``GET`` (and implicitly ``HEAD``). - Starting with Flask 0.6, ``OPTIONS`` is implicitly - added and handled by the standard request handling. - """ - - def decorator(f): - endpoint = options.pop("endpoint", None) - self.add_url_rule(rule, endpoint, f, **options) - return f - - return decorator - - @setupmethod - def endpoint(self, endpoint): - """A decorator to register a function as an endpoint. - Example:: - - @app.endpoint('example.endpoint') - def example(): - return "example" - - :param endpoint: the name of the endpoint - """ - - def decorator(f): - self.view_functions[endpoint] = f - return f - - return decorator - - @staticmethod - def _get_exc_class_and_code(exc_class_or_code): - """Get the exception class being handled. For HTTP status codes - or ``HTTPException`` subclasses, return both the exception and - status code. - - :param exc_class_or_code: Any exception class, or an HTTP status - code as an integer. - """ - if isinstance(exc_class_or_code, integer_types): - exc_class = default_exceptions[exc_class_or_code] - else: - exc_class = exc_class_or_code - - assert issubclass(exc_class, Exception) - - if issubclass(exc_class, HTTPException): - return exc_class, exc_class.code - else: - return exc_class, None - - @setupmethod - def errorhandler(self, code_or_exception): - """Register a function to handle errors by code or exception class. - - A decorator that is used to register a function given an - error code. Example:: - - @app.errorhandler(404) - def page_not_found(error): - return 'This page does not exist', 404 - - You can also register handlers for arbitrary exceptions:: - - @app.errorhandler(DatabaseError) - def special_exception_handler(error): - return 'Database connection failed', 500 - - .. versionadded:: 0.7 - Use :meth:`register_error_handler` instead of modifying - :attr:`error_handler_spec` directly, for application wide error - handlers. - - .. versionadded:: 0.7 - One can now additionally also register custom exception types - that do not necessarily have to be a subclass of the - :class:`~werkzeug.exceptions.HTTPException` class. - - :param code_or_exception: the code as integer for the handler, or - an arbitrary exception - """ - - def decorator(f): - self._register_error_handler(None, code_or_exception, f) - return f - - return decorator - - @setupmethod - def register_error_handler(self, code_or_exception, f): - """Alternative error attach function to the :meth:`errorhandler` - decorator that is more straightforward to use for non decorator - usage. - - .. versionadded:: 0.7 - """ - self._register_error_handler(None, code_or_exception, f) - - @setupmethod - def _register_error_handler(self, key, code_or_exception, f): - """ - :type key: None|str - :type code_or_exception: int|T<=Exception - :type f: callable - """ - if isinstance(code_or_exception, HTTPException): # old broken behavior - raise ValueError( - "Tried to register a handler for an exception instance {0!r}." - " Handlers can only be registered for exception classes or" - " HTTP error codes.".format(code_or_exception) - ) - - try: - exc_class, code = self._get_exc_class_and_code(code_or_exception) - except KeyError: - raise KeyError( - "'{0}' is not a recognized HTTP error code. Use a subclass of" - " HTTPException with that code instead.".format(code_or_exception) - ) - - handlers = self.error_handler_spec.setdefault(key, {}).setdefault(code, {}) - handlers[exc_class] = f - - @setupmethod - def template_filter(self, name=None): - """A decorator that is used to register custom template filter. - You can specify a name for the filter, otherwise the function - name will be used. Example:: - - @app.template_filter() - def reverse(s): - return s[::-1] - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - - def decorator(f): - self.add_template_filter(f, name=name) - return f - - return decorator - - @setupmethod - def add_template_filter(self, f, name=None): - """Register a custom template filter. Works exactly like the - :meth:`template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - self.jinja_env.filters[name or f.__name__] = f - - @setupmethod - def template_test(self, name=None): - """A decorator that is used to register custom template test. - You can specify a name for the test, otherwise the function - name will be used. Example:: - - @app.template_test() - def is_prime(n): - if n == 2: - return True - for i in range(2, int(math.ceil(math.sqrt(n))) + 1): - if n % i == 0: - return False - return True - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - - def decorator(f): - self.add_template_test(f, name=name) - return f - - return decorator - - @setupmethod - def add_template_test(self, f, name=None): - """Register a custom template test. Works exactly like the - :meth:`template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - self.jinja_env.tests[name or f.__name__] = f - - @setupmethod - def template_global(self, name=None): - """A decorator that is used to register a custom template global function. - You can specify a name for the global function, otherwise the function - name will be used. Example:: - - @app.template_global() - def double(n): - return 2 * n - - .. versionadded:: 0.10 - - :param name: the optional name of the global function, otherwise the - function name will be used. - """ - - def decorator(f): - self.add_template_global(f, name=name) - return f - - return decorator - - @setupmethod - def add_template_global(self, f, name=None): - """Register a custom template global function. Works exactly like the - :meth:`template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global function, otherwise the - function name will be used. - """ - self.jinja_env.globals[name or f.__name__] = f - - @setupmethod - def before_request(self, f): - """Registers a function to run before each request. - - For example, this can be used to open a database connection, or to load - the logged in user from the session. - - The function will be called without any arguments. If it returns a - non-None value, the value is handled as if it was the return value from - the view, and further request handling is stopped. - """ - self.before_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def before_first_request(self, f): - """Registers a function to be run before the first request to this - instance of the application. - - The function will be called without any arguments and its return - value is ignored. - - .. versionadded:: 0.8 - """ - self.before_first_request_funcs.append(f) - return f - - @setupmethod - def after_request(self, f): - """Register a function to be run after each request. - - Your function must take one parameter, an instance of - :attr:`response_class` and return a new response object or the - same (see :meth:`process_response`). - - As of Flask 0.7 this function might not be executed at the end of the - request in case an unhandled exception occurred. - """ - self.after_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_request(self, f): - """Register a function to be run at the end of each request, - regardless of whether there was an exception or not. These functions - are executed when the request context is popped, even if not an - actual request was performed. - - Example:: - - ctx = app.test_request_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the request context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Generally teardown functions must take every necessary step to avoid - that they will fail. If they do execute code that might fail they - will have to surround the execution of these code by try/except - statements and log occurring errors. - - When a teardown function was called because of an exception it will - be passed an error object. - - The return values of teardown functions are ignored. - - .. admonition:: Debug Note - - In debug mode Flask will not tear down a request on an exception - immediately. Instead it will keep it alive so that the interactive - debugger can still access it. This behavior can be controlled - by the ``PRESERVE_CONTEXT_ON_EXCEPTION`` configuration variable. - """ - self.teardown_request_funcs.setdefault(None, []).append(f) - return f - - @setupmethod - def teardown_appcontext(self, f): - """Registers a function to be called when the application context - ends. These functions are typically also called when the request - context is popped. - - Example:: - - ctx = app.app_context() - ctx.push() - ... - ctx.pop() - - When ``ctx.pop()`` is executed in the above example, the teardown - functions are called just before the app context moves from the - stack of active contexts. This becomes relevant if you are using - such constructs in tests. - - Since a request context typically also manages an application - context it would also be called when you pop a request context. - - When a teardown function was called because of an unhandled exception - it will be passed an error object. If an :meth:`errorhandler` is - registered, it will handle the exception and the teardown will not - receive it. - - The return values of teardown functions are ignored. - - .. versionadded:: 0.9 - """ - self.teardown_appcontext_funcs.append(f) - return f - - @setupmethod - def context_processor(self, f): - """Registers a template context processor function.""" - self.template_context_processors[None].append(f) - return f - - @setupmethod - def shell_context_processor(self, f): - """Registers a shell context processor function. - - .. versionadded:: 0.11 - """ - self.shell_context_processors.append(f) - return f - - @setupmethod - def url_value_preprocessor(self, f): - """Register a URL value preprocessor function for all view - functions in the application. These functions will be called before the - :meth:`before_request` functions. - - The function can modify the values captured from the matched url before - they are passed to the view. For example, this can be used to pop a - common language code value and place it in ``g`` rather than pass it to - every view. - - The function is passed the endpoint name and values dict. The return - value is ignored. - """ - self.url_value_preprocessors.setdefault(None, []).append(f) - return f - - @setupmethod - def url_defaults(self, f): - """Callback function for URL defaults for all view functions of the - application. It's called with the endpoint and values and should - update the values passed in place. - """ - self.url_default_functions.setdefault(None, []).append(f) - return f - - def _find_error_handler(self, e): - """Return a registered error handler for an exception in this order: - blueprint handler for a specific code, app handler for a specific code, - blueprint handler for an exception class, app handler for an exception - class, or ``None`` if a suitable handler is not found. - """ - exc_class, code = self._get_exc_class_and_code(type(e)) - - for name, c in ( - (request.blueprint, code), - (None, code), - (request.blueprint, None), - (None, None), - ): - handler_map = self.error_handler_spec.setdefault(name, {}).get(c) - - if not handler_map: - continue - - for cls in exc_class.__mro__: - handler = handler_map.get(cls) - - if handler is not None: - return handler - - def handle_http_exception(self, e): - """Handles an HTTP exception. By default this will invoke the - registered error handlers and fall back to returning the - exception as response. - - .. versionchanged:: 1.0.3 - ``RoutingException``, used internally for actions such as - slash redirects during routing, is not passed to error - handlers. - - .. versionchanged:: 1.0 - Exceptions are looked up by code *and* by MRO, so - ``HTTPExcpetion`` subclasses can be handled with a catch-all - handler for the base ``HTTPException``. - - .. versionadded:: 0.3 - """ - # Proxy exceptions don't have error codes. We want to always return - # those unchanged as errors - if e.code is None: - return e - - # RoutingExceptions are used internally to trigger routing - # actions, such as slash redirects raising RequestRedirect. They - # are not raised or handled in user code. - if isinstance(e, RoutingException): - return e - - handler = self._find_error_handler(e) - if handler is None: - return e - return handler(e) - - def trap_http_exception(self, e): - """Checks if an HTTP exception should be trapped or not. By default - this will return ``False`` for all exceptions except for a bad request - key error if ``TRAP_BAD_REQUEST_ERRORS`` is set to ``True``. It - also returns ``True`` if ``TRAP_HTTP_EXCEPTIONS`` is set to ``True``. - - This is called for all HTTP exceptions raised by a view function. - If it returns ``True`` for any exception the error handler for this - exception is not called and it shows up as regular exception in the - traceback. This is helpful for debugging implicitly raised HTTP - exceptions. - - .. versionchanged:: 1.0 - Bad request errors are not trapped by default in debug mode. - - .. versionadded:: 0.8 - """ - if self.config["TRAP_HTTP_EXCEPTIONS"]: - return True - - trap_bad_request = self.config["TRAP_BAD_REQUEST_ERRORS"] - - # if unset, trap key errors in debug mode - if ( - trap_bad_request is None - and self.debug - and isinstance(e, BadRequestKeyError) - ): - return True - - if trap_bad_request: - return isinstance(e, BadRequest) - - return False - - def handle_user_exception(self, e): - """This method is called whenever an exception occurs that - should be handled. A special case is :class:`~werkzeug - .exceptions.HTTPException` which is forwarded to the - :meth:`handle_http_exception` method. This function will either - return a response value or reraise the exception with the same - traceback. - - .. versionchanged:: 1.0 - Key errors raised from request data like ``form`` show the - bad key in debug mode rather than a generic bad request - message. - - .. versionadded:: 0.7 - """ - exc_type, exc_value, tb = sys.exc_info() - assert exc_value is e - # ensure not to trash sys.exc_info() at that point in case someone - # wants the traceback preserved in handle_http_exception. Of course - # we cannot prevent users from trashing it themselves in a custom - # trap_http_exception method so that's their fault then. - - if isinstance(e, BadRequestKeyError): - if self.debug or self.config["TRAP_BAD_REQUEST_ERRORS"]: - e.show_exception = True - - # Werkzeug < 0.15 doesn't add the KeyError to the 400 - # message, add it in manually. - # TODO: clean up once Werkzeug >= 0.15.5 is required - if e.args[0] not in e.get_description(): - e.description = "KeyError: '{}'".format(*e.args) - elif not hasattr(BadRequestKeyError, "show_exception"): - e.args = () - - if isinstance(e, HTTPException) and not self.trap_http_exception(e): - return self.handle_http_exception(e) - - handler = self._find_error_handler(e) - - if handler is None: - reraise(exc_type, exc_value, tb) - return handler(e) - - def handle_exception(self, e): - """Handle an exception that did not have an error handler - associated with it, or that was raised from an error handler. - This always causes a 500 ``InternalServerError``. - - Always sends the :data:`got_request_exception` signal. - - If :attr:`propagate_exceptions` is ``True``, such as in debug - mode, the error will be re-raised so that the debugger can - display it. Otherwise, the original exception is logged, and - an :exc:`~werkzeug.exceptions.InternalServerError` is returned. - - If an error handler is registered for ``InternalServerError`` or - ``500``, it will be used. For consistency, the handler will - always receive the ``InternalServerError``. The original - unhandled exception is available as ``e.original_exception``. - - .. note:: - Prior to Werkzeug 1.0.0, ``InternalServerError`` will not - always have an ``original_exception`` attribute. Use - ``getattr(e, "original_exception", None)`` to simulate the - behavior for compatibility. - - .. versionchanged:: 1.1.0 - Always passes the ``InternalServerError`` instance to the - handler, setting ``original_exception`` to the unhandled - error. - - .. versionchanged:: 1.1.0 - ``after_request`` functions and other finalization is done - even for the default 500 response when there is no handler. - - .. versionadded:: 0.3 - """ - exc_type, exc_value, tb = sys.exc_info() - got_request_exception.send(self, exception=e) - - if self.propagate_exceptions: - # if we want to repropagate the exception, we can attempt to - # raise it with the whole traceback in case we can do that - # (the function was actually called from the except part) - # otherwise, we just raise the error again - if exc_value is e: - reraise(exc_type, exc_value, tb) - else: - raise e - - self.log_exception((exc_type, exc_value, tb)) - server_error = InternalServerError() - # TODO: pass as param when Werkzeug>=1.0.0 is required - # TODO: also remove note about this from docstring and docs - server_error.original_exception = e - handler = self._find_error_handler(server_error) - - if handler is not None: - server_error = handler(server_error) - - return self.finalize_request(server_error, from_error_handler=True) - - def log_exception(self, exc_info): - """Logs an exception. This is called by :meth:`handle_exception` - if debugging is disabled and right before the handler is called. - The default implementation logs the exception as error on the - :attr:`logger`. - - .. versionadded:: 0.8 - """ - self.logger.error( - "Exception on %s [%s]" % (request.path, request.method), exc_info=exc_info - ) - - def raise_routing_exception(self, request): - """Exceptions that are recording during routing are reraised with - this method. During debug we are not reraising redirect requests - for non ``GET``, ``HEAD``, or ``OPTIONS`` requests and we're raising - a different error instead to help debug situations. - - :internal: - """ - if ( - not self.debug - or not isinstance(request.routing_exception, RequestRedirect) - or request.method in ("GET", "HEAD", "OPTIONS") - ): - raise request.routing_exception - - from .debughelpers import FormDataRoutingRedirect - - raise FormDataRoutingRedirect(request) - - def dispatch_request(self): - """Does the request dispatching. Matches the URL and returns the - return value of the view or error handler. This does not have to - be a response object. In order to convert the return value to a - proper response object, call :func:`make_response`. - - .. versionchanged:: 0.7 - This no longer does the exception handling, this code was - moved to the new :meth:`full_dispatch_request`. - """ - req = _request_ctx_stack.top.request - if req.routing_exception is not None: - self.raise_routing_exception(req) - rule = req.url_rule - # if we provide automatic options for this URL and the - # request came with the OPTIONS method, reply automatically - if ( - getattr(rule, "provide_automatic_options", False) - and req.method == "OPTIONS" - ): - return self.make_default_options_response() - # otherwise dispatch to the handler for that endpoint - return self.view_functions[rule.endpoint](**req.view_args) - - def full_dispatch_request(self): - """Dispatches the request and on top of that performs request - pre and postprocessing as well as HTTP exception catching and - error handling. - - .. versionadded:: 0.7 - """ - self.try_trigger_before_first_request_functions() - try: - request_started.send(self) - rv = self.preprocess_request() - if rv is None: - rv = self.dispatch_request() - except Exception as e: - rv = self.handle_user_exception(e) - return self.finalize_request(rv) - - def finalize_request(self, rv, from_error_handler=False): - """Given the return value from a view function this finalizes - the request by converting it into a response and invoking the - postprocessing functions. This is invoked for both normal - request dispatching as well as error handlers. - - Because this means that it might be called as a result of a - failure a special safe mode is available which can be enabled - with the `from_error_handler` flag. If enabled, failures in - response processing will be logged and otherwise ignored. - - :internal: - """ - response = self.make_response(rv) - try: - response = self.process_response(response) - request_finished.send(self, response=response) - except Exception: - if not from_error_handler: - raise - self.logger.exception( - "Request finalizing failed with an error while handling an error" - ) - return response - - def try_trigger_before_first_request_functions(self): - """Called before each request and will ensure that it triggers - the :attr:`before_first_request_funcs` and only exactly once per - application instance (which means process usually). - - :internal: - """ - if self._got_first_request: - return - with self._before_request_lock: - if self._got_first_request: - return - for func in self.before_first_request_funcs: - func() - self._got_first_request = True - - def make_default_options_response(self): - """This method is called to create the default ``OPTIONS`` response. - This can be changed through subclassing to change the default - behavior of ``OPTIONS`` responses. - - .. versionadded:: 0.7 - """ - adapter = _request_ctx_stack.top.url_adapter - if hasattr(adapter, "allowed_methods"): - methods = adapter.allowed_methods() - else: - # fallback for Werkzeug < 0.7 - methods = [] - try: - adapter.match(method="--") - except MethodNotAllowed as e: - methods = e.valid_methods - except HTTPException: - pass - rv = self.response_class() - rv.allow.update(methods) - return rv - - def should_ignore_error(self, error): - """This is called to figure out if an error should be ignored - or not as far as the teardown system is concerned. If this - function returns ``True`` then the teardown handlers will not be - passed the error. - - .. versionadded:: 0.10 - """ - return False - - def make_response(self, rv): - """Convert the return value from a view function to an instance of - :attr:`response_class`. - - :param rv: the return value from the view function. The view function - must return a response. Returning ``None``, or the view ending - without returning, is not allowed. The following types are allowed - for ``view_rv``: - - ``str`` (``unicode`` in Python 2) - A response object is created with the string encoded to UTF-8 - as the body. - - ``bytes`` (``str`` in Python 2) - A response object is created with the bytes as the body. - - ``dict`` - A dictionary that will be jsonify'd before being returned. - - ``tuple`` - Either ``(body, status, headers)``, ``(body, status)``, or - ``(body, headers)``, where ``body`` is any of the other types - allowed here, ``status`` is a string or an integer, and - ``headers`` is a dictionary or a list of ``(key, value)`` - tuples. If ``body`` is a :attr:`response_class` instance, - ``status`` overwrites the exiting value and ``headers`` are - extended. - - :attr:`response_class` - The object is returned unchanged. - - other :class:`~werkzeug.wrappers.Response` class - The object is coerced to :attr:`response_class`. - - :func:`callable` - The function is called as a WSGI application. The result is - used to create a response object. - - .. versionchanged:: 0.9 - Previously a tuple was interpreted as the arguments for the - response object. - """ - - status = headers = None - - # unpack tuple returns - if isinstance(rv, tuple): - len_rv = len(rv) - - # a 3-tuple is unpacked directly - if len_rv == 3: - rv, status, headers = rv - # decide if a 2-tuple has status or headers - elif len_rv == 2: - if isinstance(rv[1], (Headers, dict, tuple, list)): - rv, headers = rv - else: - rv, status = rv - # other sized tuples are not allowed - else: - raise TypeError( - "The view function did not return a valid response tuple." - " The tuple must have the form (body, status, headers)," - " (body, status), or (body, headers)." - ) - - # the body must not be None - if rv is None: - raise TypeError( - "The view function did not return a valid response. The" - " function either returned None or ended without a return" - " statement." - ) - - # make sure the body is an instance of the response class - if not isinstance(rv, self.response_class): - if isinstance(rv, (text_type, bytes, bytearray)): - # let the response class set the status and headers instead of - # waiting to do it manually, so that the class can handle any - # special logic - rv = self.response_class(rv, status=status, headers=headers) - status = headers = None - elif isinstance(rv, dict): - rv = jsonify(rv) - elif isinstance(rv, BaseResponse) or callable(rv): - # evaluate a WSGI callable, or coerce a different response - # class to the correct type - try: - rv = self.response_class.force_type(rv, request.environ) - except TypeError as e: - new_error = TypeError( - "{e}\nThe view function did not return a valid" - " response. The return type must be a string, dict, tuple," - " Response instance, or WSGI callable, but it was a" - " {rv.__class__.__name__}.".format(e=e, rv=rv) - ) - reraise(TypeError, new_error, sys.exc_info()[2]) - else: - raise TypeError( - "The view function did not return a valid" - " response. The return type must be a string, dict, tuple," - " Response instance, or WSGI callable, but it was a" - " {rv.__class__.__name__}.".format(rv=rv) - ) - - # prefer the status if it was provided - if status is not None: - if isinstance(status, (text_type, bytes, bytearray)): - rv.status = status - else: - rv.status_code = status - - # extend existing headers with provided headers - if headers: - rv.headers.extend(headers) - - return rv - - def create_url_adapter(self, request): - """Creates a URL adapter for the given request. The URL adapter - is created at a point where the request context is not yet set - up so the request is passed explicitly. - - .. versionadded:: 0.6 - - .. versionchanged:: 0.9 - This can now also be called without a request object when the - URL adapter is created for the application context. - - .. versionchanged:: 1.0 - :data:`SERVER_NAME` no longer implicitly enables subdomain - matching. Use :attr:`subdomain_matching` instead. - """ - if request is not None: - # If subdomain matching is disabled (the default), use the - # default subdomain in all cases. This should be the default - # in Werkzeug but it currently does not have that feature. - subdomain = ( - (self.url_map.default_subdomain or None) - if not self.subdomain_matching - else None - ) - return self.url_map.bind_to_environ( - request.environ, - server_name=self.config["SERVER_NAME"], - subdomain=subdomain, - ) - # We need at the very least the server name to be set for this - # to work. - if self.config["SERVER_NAME"] is not None: - return self.url_map.bind( - self.config["SERVER_NAME"], - script_name=self.config["APPLICATION_ROOT"], - url_scheme=self.config["PREFERRED_URL_SCHEME"], - ) - - def inject_url_defaults(self, endpoint, values): - """Injects the URL defaults for the given endpoint directly into - the values dictionary passed. This is used internally and - automatically called on URL building. - - .. versionadded:: 0.7 - """ - funcs = self.url_default_functions.get(None, ()) - if "." in endpoint: - bp = endpoint.rsplit(".", 1)[0] - funcs = chain(funcs, self.url_default_functions.get(bp, ())) - for func in funcs: - func(endpoint, values) - - def handle_url_build_error(self, error, endpoint, values): - """Handle :class:`~werkzeug.routing.BuildError` on :meth:`url_for`. - """ - exc_type, exc_value, tb = sys.exc_info() - for handler in self.url_build_error_handlers: - try: - rv = handler(error, endpoint, values) - if rv is not None: - return rv - except BuildError as e: - # make error available outside except block (py3) - error = e - - # At this point we want to reraise the exception. If the error is - # still the same one we can reraise it with the original traceback, - # otherwise we raise it from here. - if error is exc_value: - reraise(exc_type, exc_value, tb) - raise error - - def preprocess_request(self): - """Called before the request is dispatched. Calls - :attr:`url_value_preprocessors` registered with the app and the - current blueprint (if any). Then calls :attr:`before_request_funcs` - registered with the app and the blueprint. - - If any :meth:`before_request` handler returns a non-None value, the - value is handled as if it was the return value from the view, and - further request handling is stopped. - """ - - bp = _request_ctx_stack.top.request.blueprint - - funcs = self.url_value_preprocessors.get(None, ()) - if bp is not None and bp in self.url_value_preprocessors: - funcs = chain(funcs, self.url_value_preprocessors[bp]) - for func in funcs: - func(request.endpoint, request.view_args) - - funcs = self.before_request_funcs.get(None, ()) - if bp is not None and bp in self.before_request_funcs: - funcs = chain(funcs, self.before_request_funcs[bp]) - for func in funcs: - rv = func() - if rv is not None: - return rv - - def process_response(self, response): - """Can be overridden in order to modify the response object - before it's sent to the WSGI server. By default this will - call all the :meth:`after_request` decorated functions. - - .. versionchanged:: 0.5 - As of Flask 0.5 the functions registered for after request - execution are called in reverse order of registration. - - :param response: a :attr:`response_class` object. - :return: a new response object or the same, has to be an - instance of :attr:`response_class`. - """ - ctx = _request_ctx_stack.top - bp = ctx.request.blueprint - funcs = ctx._after_request_functions - if bp is not None and bp in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[bp])) - if None in self.after_request_funcs: - funcs = chain(funcs, reversed(self.after_request_funcs[None])) - for handler in funcs: - response = handler(response) - if not self.session_interface.is_null_session(ctx.session): - self.session_interface.save_session(self, ctx.session, response) - return response - - def do_teardown_request(self, exc=_sentinel): - """Called after the request is dispatched and the response is - returned, right before the request context is popped. - - This calls all functions decorated with - :meth:`teardown_request`, and :meth:`Blueprint.teardown_request` - if a blueprint handled the request. Finally, the - :data:`request_tearing_down` signal is sent. - - This is called by - :meth:`RequestContext.pop() `, - which may be delayed during testing to maintain access to - resources. - - :param exc: An unhandled exception raised while dispatching the - request. Detected from the current exception information if - not passed. Passed to each teardown function. - - .. versionchanged:: 0.9 - Added the ``exc`` argument. - """ - if exc is _sentinel: - exc = sys.exc_info()[1] - funcs = reversed(self.teardown_request_funcs.get(None, ())) - bp = _request_ctx_stack.top.request.blueprint - if bp is not None and bp in self.teardown_request_funcs: - funcs = chain(funcs, reversed(self.teardown_request_funcs[bp])) - for func in funcs: - func(exc) - request_tearing_down.send(self, exc=exc) - - def do_teardown_appcontext(self, exc=_sentinel): - """Called right before the application context is popped. - - When handling a request, the application context is popped - after the request context. See :meth:`do_teardown_request`. - - This calls all functions decorated with - :meth:`teardown_appcontext`. Then the - :data:`appcontext_tearing_down` signal is sent. - - This is called by - :meth:`AppContext.pop() `. - - .. versionadded:: 0.9 - """ - if exc is _sentinel: - exc = sys.exc_info()[1] - for func in reversed(self.teardown_appcontext_funcs): - func(exc) - appcontext_tearing_down.send(self, exc=exc) - - def app_context(self): - """Create an :class:`~flask.ctx.AppContext`. Use as a ``with`` - block to push the context, which will make :data:`current_app` - point at this application. - - An application context is automatically pushed by - :meth:`RequestContext.push() ` - when handling a request, and when running a CLI command. Use - this to manually create a context outside of these situations. - - :: - - with app.app_context(): - init_db() - - See :doc:`/appcontext`. - - .. versionadded:: 0.9 - """ - return AppContext(self) - - def request_context(self, environ): - """Create a :class:`~flask.ctx.RequestContext` representing a - WSGI environment. Use a ``with`` block to push the context, - which will make :data:`request` point at this request. - - See :doc:`/reqcontext`. - - Typically you should not call this from your own code. A request - context is automatically pushed by the :meth:`wsgi_app` when - handling a request. Use :meth:`test_request_context` to create - an environment and context instead of this method. - - :param environ: a WSGI environment - """ - return RequestContext(self, environ) - - def test_request_context(self, *args, **kwargs): - """Create a :class:`~flask.ctx.RequestContext` for a WSGI - environment created from the given values. This is mostly useful - during testing, where you may want to run a function that uses - request data without dispatching a full request. - - See :doc:`/reqcontext`. - - Use a ``with`` block to push the context, which will make - :data:`request` point at the request for the created - environment. :: - - with test_request_context(...): - generate_report() - - When using the shell, it may be easier to push and pop the - context manually to avoid indentation. :: - - ctx = app.test_request_context(...) - ctx.push() - ... - ctx.pop() - - Takes the same arguments as Werkzeug's - :class:`~werkzeug.test.EnvironBuilder`, with some defaults from - the application. See the linked Werkzeug docs for most of the - available arguments. Flask-specific behavior is listed here. - - :param path: URL path being requested. - :param base_url: Base URL where the app is being served, which - ``path`` is relative to. If not given, built from - :data:`PREFERRED_URL_SCHEME`, ``subdomain``, - :data:`SERVER_NAME`, and :data:`APPLICATION_ROOT`. - :param subdomain: Subdomain name to append to - :data:`SERVER_NAME`. - :param url_scheme: Scheme to use instead of - :data:`PREFERRED_URL_SCHEME`. - :param data: The request body, either as a string or a dict of - form keys and values. - :param json: If given, this is serialized as JSON and passed as - ``data``. Also defaults ``content_type`` to - ``application/json``. - :param args: other positional arguments passed to - :class:`~werkzeug.test.EnvironBuilder`. - :param kwargs: other keyword arguments passed to - :class:`~werkzeug.test.EnvironBuilder`. - """ - from .testing import EnvironBuilder - - builder = EnvironBuilder(self, *args, **kwargs) - - try: - return self.request_context(builder.get_environ()) - finally: - builder.close() - - def wsgi_app(self, environ, start_response): - """The actual WSGI application. This is not implemented in - :meth:`__call__` so that middlewares can be applied without - losing a reference to the app object. Instead of doing this:: - - app = MyMiddleware(app) - - It's a better idea to do this instead:: - - app.wsgi_app = MyMiddleware(app.wsgi_app) - - Then you still have the original application object around and - can continue to call methods on it. - - .. versionchanged:: 0.7 - Teardown events for the request and app contexts are called - even if an unhandled error occurs. Other events may not be - called depending on when an error occurs during dispatch. - See :ref:`callbacks-and-errors`. - - :param environ: A WSGI environment. - :param start_response: A callable accepting a status code, - a list of headers, and an optional exception context to - start the response. - """ - ctx = self.request_context(environ) - error = None - try: - try: - ctx.push() - response = self.full_dispatch_request() - except Exception as e: - error = e - response = self.handle_exception(e) - except: # noqa: B001 - error = sys.exc_info()[1] - raise - return response(environ, start_response) - finally: - if self.should_ignore_error(error): - error = None - ctx.auto_pop(error) - - def __call__(self, environ, start_response): - """The WSGI server calls the Flask application object as the - WSGI application. This calls :meth:`wsgi_app` which can be - wrapped to applying middleware.""" - return self.wsgi_app(environ, start_response) - - def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, self.name) diff --git a/venv/lib/python3.7/site-packages/flask/blueprints.py b/venv/lib/python3.7/site-packages/flask/blueprints.py deleted file mode 100644 index 8978104..0000000 --- a/venv/lib/python3.7/site-packages/flask/blueprints.py +++ /dev/null @@ -1,569 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.blueprints - ~~~~~~~~~~~~~~~~ - - Blueprints are the recommended way to implement larger or more - pluggable applications in Flask 0.7 and later. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -from functools import update_wrapper - -from .helpers import _endpoint_from_view_func -from .helpers import _PackageBoundObject - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -class BlueprintSetupState(object): - """Temporary holder object for registering a blueprint with the - application. An instance of this class is created by the - :meth:`~flask.Blueprint.make_setup_state` method and later passed - to all register callback functions. - """ - - def __init__(self, blueprint, app, options, first_registration): - #: a reference to the current application - self.app = app - - #: a reference to the blueprint that created this setup state. - self.blueprint = blueprint - - #: a dictionary with all options that were passed to the - #: :meth:`~flask.Flask.register_blueprint` method. - self.options = options - - #: as blueprints can be registered multiple times with the - #: application and not everything wants to be registered - #: multiple times on it, this attribute can be used to figure - #: out if the blueprint was registered in the past already. - self.first_registration = first_registration - - subdomain = self.options.get("subdomain") - if subdomain is None: - subdomain = self.blueprint.subdomain - - #: The subdomain that the blueprint should be active for, ``None`` - #: otherwise. - self.subdomain = subdomain - - url_prefix = self.options.get("url_prefix") - if url_prefix is None: - url_prefix = self.blueprint.url_prefix - #: The prefix that should be used for all URLs defined on the - #: blueprint. - self.url_prefix = url_prefix - - #: A dictionary with URL defaults that is added to each and every - #: URL that was defined with the blueprint. - self.url_defaults = dict(self.blueprint.url_values_defaults) - self.url_defaults.update(self.options.get("url_defaults", ())) - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """A helper method to register a rule (and optionally a view function) - to the application. The endpoint is automatically prefixed with the - blueprint's name. - """ - if self.url_prefix is not None: - if rule: - rule = "/".join((self.url_prefix.rstrip("/"), rule.lstrip("/"))) - else: - rule = self.url_prefix - options.setdefault("subdomain", self.subdomain) - if endpoint is None: - endpoint = _endpoint_from_view_func(view_func) - defaults = self.url_defaults - if "defaults" in options: - defaults = dict(defaults, **options.pop("defaults")) - self.app.add_url_rule( - rule, - "%s.%s" % (self.blueprint.name, endpoint), - view_func, - defaults=defaults, - **options - ) - - -class Blueprint(_PackageBoundObject): - """Represents a blueprint, a collection of routes and other - app-related functions that can be registered on a real application - later. - - A blueprint is an object that allows defining application functions - without requiring an application object ahead of time. It uses the - same decorators as :class:`~flask.Flask`, but defers the need for an - application by recording them for later registration. - - Decorating a function with a blueprint creates a deferred function - that is called with :class:`~flask.blueprints.BlueprintSetupState` - when the blueprint is registered on an application. - - See :ref:`blueprints` for more information. - - .. versionchanged:: 1.1.0 - Blueprints have a ``cli`` group to register nested CLI commands. - The ``cli_group`` parameter controls the name of the group under - the ``flask`` command. - - .. versionadded:: 0.7 - - :param name: The name of the blueprint. Will be prepended to each - endpoint name. - :param import_name: The name of the blueprint package, usually - ``__name__``. This helps locate the ``root_path`` for the - blueprint. - :param static_folder: A folder with static files that should be - served by the blueprint's static route. The path is relative to - the blueprint's root path. Blueprint static files are disabled - by default. - :param static_url_path: The url to serve static files from. - Defaults to ``static_folder``. If the blueprint does not have - a ``url_prefix``, the app's static route will take precedence, - and the blueprint's static files won't be accessible. - :param template_folder: A folder with templates that should be added - to the app's template search path. The path is relative to the - blueprint's root path. Blueprint templates are disabled by - default. Blueprint templates have a lower precedence than those - in the app's templates folder. - :param url_prefix: A path to prepend to all of the blueprint's URLs, - to make them distinct from the rest of the app's routes. - :param subdomain: A subdomain that blueprint routes will match on by - default. - :param url_defaults: A dict of default values that blueprint routes - will receive by default. - :param root_path: By default, the blueprint will automatically this - based on ``import_name``. In certain situations this automatic - detection can fail, so the path can be specified manually - instead. - """ - - warn_on_modifications = False - _got_registered_once = False - - #: Blueprint local JSON decoder class to use. - #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_encoder`. - json_encoder = None - #: Blueprint local JSON decoder class to use. - #: Set to ``None`` to use the app's :class:`~flask.app.Flask.json_decoder`. - json_decoder = None - - # TODO remove the next three attrs when Sphinx :inherited-members: works - # https://github.com/sphinx-doc/sphinx/issues/741 - - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__( - self, - name, - import_name, - static_folder=None, - static_url_path=None, - template_folder=None, - url_prefix=None, - subdomain=None, - url_defaults=None, - root_path=None, - cli_group=_sentinel, - ): - _PackageBoundObject.__init__( - self, import_name, template_folder, root_path=root_path - ) - self.name = name - self.url_prefix = url_prefix - self.subdomain = subdomain - self.static_folder = static_folder - self.static_url_path = static_url_path - self.deferred_functions = [] - if url_defaults is None: - url_defaults = {} - self.url_values_defaults = url_defaults - self.cli_group = cli_group - - def record(self, func): - """Registers a function that is called when the blueprint is - registered on the application. This function is called with the - state as argument as returned by the :meth:`make_setup_state` - method. - """ - if self._got_registered_once and self.warn_on_modifications: - from warnings import warn - - warn( - Warning( - "The blueprint was already registered once " - "but is getting modified now. These changes " - "will not show up." - ) - ) - self.deferred_functions.append(func) - - def record_once(self, func): - """Works like :meth:`record` but wraps the function in another - function that will ensure the function is only called once. If the - blueprint is registered a second time on the application, the - function passed is not called. - """ - - def wrapper(state): - if state.first_registration: - func(state) - - return self.record(update_wrapper(wrapper, func)) - - def make_setup_state(self, app, options, first_registration=False): - """Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState` - object that is later passed to the register callback functions. - Subclasses can override this to return a subclass of the setup state. - """ - return BlueprintSetupState(self, app, options, first_registration) - - def register(self, app, options, first_registration=False): - """Called by :meth:`Flask.register_blueprint` to register all views - and callbacks registered on the blueprint with the application. Creates - a :class:`.BlueprintSetupState` and calls each :meth:`record` callback - with it. - - :param app: The application this blueprint is being registered with. - :param options: Keyword arguments forwarded from - :meth:`~Flask.register_blueprint`. - :param first_registration: Whether this is the first time this - blueprint has been registered on the application. - """ - self._got_registered_once = True - state = self.make_setup_state(app, options, first_registration) - - if self.has_static_folder: - state.add_url_rule( - self.static_url_path + "/", - view_func=self.send_static_file, - endpoint="static", - ) - - for deferred in self.deferred_functions: - deferred(state) - - cli_resolved_group = options.get("cli_group", self.cli_group) - - if not self.cli.commands: - return - - if cli_resolved_group is None: - app.cli.commands.update(self.cli.commands) - elif cli_resolved_group is _sentinel: - self.cli.name = self.name - app.cli.add_command(self.cli) - else: - self.cli.name = cli_resolved_group - app.cli.add_command(self.cli) - - def route(self, rule, **options): - """Like :meth:`Flask.route` but for a blueprint. The endpoint for the - :func:`url_for` function is prefixed with the name of the blueprint. - """ - - def decorator(f): - endpoint = options.pop("endpoint", f.__name__) - self.add_url_rule(rule, endpoint, f, **options) - return f - - return decorator - - def add_url_rule(self, rule, endpoint=None, view_func=None, **options): - """Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for - the :func:`url_for` function is prefixed with the name of the blueprint. - """ - if endpoint: - assert "." not in endpoint, "Blueprint endpoints should not contain dots" - if view_func and hasattr(view_func, "__name__"): - assert ( - "." not in view_func.__name__ - ), "Blueprint view function name should not contain dots" - self.record(lambda s: s.add_url_rule(rule, endpoint, view_func, **options)) - - def endpoint(self, endpoint): - """Like :meth:`Flask.endpoint` but for a blueprint. This does not - prefix the endpoint with the blueprint name, this has to be done - explicitly by the user of this method. If the endpoint is prefixed - with a `.` it will be registered to the current blueprint, otherwise - it's an application independent endpoint. - """ - - def decorator(f): - def register_endpoint(state): - state.app.view_functions[endpoint] = f - - self.record_once(register_endpoint) - return f - - return decorator - - def app_template_filter(self, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.template_filter` but for a blueprint. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - - def decorator(f): - self.add_app_template_filter(f, name=name) - return f - - return decorator - - def add_app_template_filter(self, f, name=None): - """Register a custom template filter, available application wide. Like - :meth:`Flask.add_template_filter` but for a blueprint. Works exactly - like the :meth:`app_template_filter` decorator. - - :param name: the optional name of the filter, otherwise the - function name will be used. - """ - - def register_template(state): - state.app.jinja_env.filters[name or f.__name__] = f - - self.record_once(register_template) - - def app_template_test(self, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.template_test` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - - def decorator(f): - self.add_app_template_test(f, name=name) - return f - - return decorator - - def add_app_template_test(self, f, name=None): - """Register a custom template test, available application wide. Like - :meth:`Flask.add_template_test` but for a blueprint. Works exactly - like the :meth:`app_template_test` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the test, otherwise the - function name will be used. - """ - - def register_template(state): - state.app.jinja_env.tests[name or f.__name__] = f - - self.record_once(register_template) - - def app_template_global(self, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.template_global` but for a blueprint. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - - def decorator(f): - self.add_app_template_global(f, name=name) - return f - - return decorator - - def add_app_template_global(self, f, name=None): - """Register a custom template global, available application wide. Like - :meth:`Flask.add_template_global` but for a blueprint. Works exactly - like the :meth:`app_template_global` decorator. - - .. versionadded:: 0.10 - - :param name: the optional name of the global, otherwise the - function name will be used. - """ - - def register_template(state): - state.app.jinja_env.globals[name or f.__name__] = f - - self.record_once(register_template) - - def before_request(self, f): - """Like :meth:`Flask.before_request` but for a blueprint. This function - is only executed before each request that is handled by a function of - that blueprint. - """ - self.record_once( - lambda s: s.app.before_request_funcs.setdefault(self.name, []).append(f) - ) - return f - - def before_app_request(self, f): - """Like :meth:`Flask.before_request`. Such a function is executed - before each request, even if outside of a blueprint. - """ - self.record_once( - lambda s: s.app.before_request_funcs.setdefault(None, []).append(f) - ) - return f - - def before_app_first_request(self, f): - """Like :meth:`Flask.before_first_request`. Such a function is - executed before the first request to the application. - """ - self.record_once(lambda s: s.app.before_first_request_funcs.append(f)) - return f - - def after_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. This function - is only executed after each request that is handled by a function of - that blueprint. - """ - self.record_once( - lambda s: s.app.after_request_funcs.setdefault(self.name, []).append(f) - ) - return f - - def after_app_request(self, f): - """Like :meth:`Flask.after_request` but for a blueprint. Such a function - is executed after each request, even if outside of the blueprint. - """ - self.record_once( - lambda s: s.app.after_request_funcs.setdefault(None, []).append(f) - ) - return f - - def teardown_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. This - function is only executed when tearing down requests handled by a - function of that blueprint. Teardown request functions are executed - when the request context is popped, even when no actual request was - performed. - """ - self.record_once( - lambda s: s.app.teardown_request_funcs.setdefault(self.name, []).append(f) - ) - return f - - def teardown_app_request(self, f): - """Like :meth:`Flask.teardown_request` but for a blueprint. Such a - function is executed when tearing down each request, even if outside of - the blueprint. - """ - self.record_once( - lambda s: s.app.teardown_request_funcs.setdefault(None, []).append(f) - ) - return f - - def context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. This - function is only executed for requests handled by a blueprint. - """ - self.record_once( - lambda s: s.app.template_context_processors.setdefault( - self.name, [] - ).append(f) - ) - return f - - def app_context_processor(self, f): - """Like :meth:`Flask.context_processor` but for a blueprint. Such a - function is executed each request, even if outside of the blueprint. - """ - self.record_once( - lambda s: s.app.template_context_processors.setdefault(None, []).append(f) - ) - return f - - def app_errorhandler(self, code): - """Like :meth:`Flask.errorhandler` but for a blueprint. This - handler is used for all requests, even if outside of the blueprint. - """ - - def decorator(f): - self.record_once(lambda s: s.app.errorhandler(code)(f)) - return f - - return decorator - - def url_value_preprocessor(self, f): - """Registers a function as URL value preprocessor for this - blueprint. It's called before the view functions are called and - can modify the url values provided. - """ - self.record_once( - lambda s: s.app.url_value_preprocessors.setdefault(self.name, []).append(f) - ) - return f - - def url_defaults(self, f): - """Callback function for URL defaults for this blueprint. It's called - with the endpoint and values and should update the values passed - in place. - """ - self.record_once( - lambda s: s.app.url_default_functions.setdefault(self.name, []).append(f) - ) - return f - - def app_url_value_preprocessor(self, f): - """Same as :meth:`url_value_preprocessor` but application wide. - """ - self.record_once( - lambda s: s.app.url_value_preprocessors.setdefault(None, []).append(f) - ) - return f - - def app_url_defaults(self, f): - """Same as :meth:`url_defaults` but application wide. - """ - self.record_once( - lambda s: s.app.url_default_functions.setdefault(None, []).append(f) - ) - return f - - def errorhandler(self, code_or_exception): - """Registers an error handler that becomes active for this blueprint - only. Please be aware that routing does not happen local to a - blueprint so an error handler for 404 usually is not handled by - a blueprint unless it is caused inside a view function. Another - special case is the 500 internal server error which is always looked - up from the application. - - Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator - of the :class:`~flask.Flask` object. - """ - - def decorator(f): - self.record_once( - lambda s: s.app._register_error_handler(self.name, code_or_exception, f) - ) - return f - - return decorator - - def register_error_handler(self, code_or_exception, f): - """Non-decorator version of the :meth:`errorhandler` error attach - function, akin to the :meth:`~flask.Flask.register_error_handler` - application-wide function of the :class:`~flask.Flask` object but - for error handlers limited to this blueprint. - - .. versionadded:: 0.11 - """ - self.record_once( - lambda s: s.app._register_error_handler(self.name, code_or_exception, f) - ) diff --git a/venv/lib/python3.7/site-packages/flask/cli.py b/venv/lib/python3.7/site-packages/flask/cli.py deleted file mode 100644 index 1158545..0000000 --- a/venv/lib/python3.7/site-packages/flask/cli.py +++ /dev/null @@ -1,970 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.cli - ~~~~~~~~~ - - A simple command line application to run flask apps. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -from __future__ import print_function - -import ast -import inspect -import os -import platform -import re -import sys -import traceback -from functools import update_wrapper -from operator import attrgetter -from threading import Lock -from threading import Thread - -import click -from werkzeug.utils import import_string - -from ._compat import getargspec -from ._compat import itervalues -from ._compat import reraise -from ._compat import text_type -from .globals import current_app -from .helpers import get_debug_flag -from .helpers import get_env -from .helpers import get_load_dotenv - -try: - import dotenv -except ImportError: - dotenv = None - -try: - import ssl -except ImportError: - ssl = None - - -class NoAppException(click.UsageError): - """Raised if an application cannot be found or loaded.""" - - -def find_best_app(script_info, module): - """Given a module instance this tries to find the best possible - application in the module or raises an exception. - """ - from . import Flask - - # Search for the most common names first. - for attr_name in ("app", "application"): - app = getattr(module, attr_name, None) - - if isinstance(app, Flask): - return app - - # Otherwise find the only object that is a Flask instance. - matches = [v for v in itervalues(module.__dict__) if isinstance(v, Flask)] - - if len(matches) == 1: - return matches[0] - elif len(matches) > 1: - raise NoAppException( - 'Detected multiple Flask applications in module "{module}". Use ' - '"FLASK_APP={module}:name" to specify the correct ' - "one.".format(module=module.__name__) - ) - - # Search for app factory functions. - for attr_name in ("create_app", "make_app"): - app_factory = getattr(module, attr_name, None) - - if inspect.isfunction(app_factory): - try: - app = call_factory(script_info, app_factory) - - if isinstance(app, Flask): - return app - except TypeError: - if not _called_with_wrong_args(app_factory): - raise - raise NoAppException( - 'Detected factory "{factory}" in module "{module}", but ' - "could not call it without arguments. Use " - "\"FLASK_APP='{module}:{factory}(args)'\" to specify " - "arguments.".format(factory=attr_name, module=module.__name__) - ) - - raise NoAppException( - 'Failed to find Flask application or factory in module "{module}". ' - 'Use "FLASK_APP={module}:name to specify one.'.format(module=module.__name__) - ) - - -def call_factory(script_info, app_factory, arguments=()): - """Takes an app factory, a ``script_info` object and optionally a tuple - of arguments. Checks for the existence of a script_info argument and calls - the app_factory depending on that and the arguments provided. - """ - args_spec = getargspec(app_factory) - arg_names = args_spec.args - arg_defaults = args_spec.defaults - - if "script_info" in arg_names: - return app_factory(*arguments, script_info=script_info) - elif arguments: - return app_factory(*arguments) - elif not arguments and len(arg_names) == 1 and arg_defaults is None: - return app_factory(script_info) - - return app_factory() - - -def _called_with_wrong_args(factory): - """Check whether calling a function raised a ``TypeError`` because - the call failed or because something in the factory raised the - error. - - :param factory: the factory function that was called - :return: true if the call failed - """ - tb = sys.exc_info()[2] - - try: - while tb is not None: - if tb.tb_frame.f_code is factory.__code__: - # in the factory, it was called successfully - return False - - tb = tb.tb_next - - # didn't reach the factory - return True - finally: - # explicitly delete tb as it is circular referenced - # https://docs.python.org/2/library/sys.html#sys.exc_info - del tb - - -def find_app_by_string(script_info, module, app_name): - """Checks if the given string is a variable name or a function. If it is a - function, it checks for specified arguments and whether it takes a - ``script_info`` argument and calls the function with the appropriate - arguments. - """ - from . import Flask - - match = re.match(r"^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$", app_name) - - if not match: - raise NoAppException( - '"{name}" is not a valid variable name or function ' - "expression.".format(name=app_name) - ) - - name, args = match.groups() - - try: - attr = getattr(module, name) - except AttributeError as e: - raise NoAppException(e.args[0]) - - if inspect.isfunction(attr): - if args: - try: - args = ast.literal_eval("({args},)".format(args=args)) - except (ValueError, SyntaxError) as e: - raise NoAppException( - "Could not parse the arguments in " - '"{app_name}".'.format(e=e, app_name=app_name) - ) - else: - args = () - - try: - app = call_factory(script_info, attr, args) - except TypeError as e: - if not _called_with_wrong_args(attr): - raise - - raise NoAppException( - '{e}\nThe factory "{app_name}" in module "{module}" could not ' - "be called with the specified arguments.".format( - e=e, app_name=app_name, module=module.__name__ - ) - ) - else: - app = attr - - if isinstance(app, Flask): - return app - - raise NoAppException( - "A valid Flask application was not obtained from " - '"{module}:{app_name}".'.format(module=module.__name__, app_name=app_name) - ) - - -def prepare_import(path): - """Given a filename this will try to calculate the python path, add it - to the search path and return the actual module name that is expected. - """ - path = os.path.realpath(path) - - fname, ext = os.path.splitext(path) - if ext == ".py": - path = fname - - if os.path.basename(path) == "__init__": - path = os.path.dirname(path) - - module_name = [] - - # move up until outside package structure (no __init__.py) - while True: - path, name = os.path.split(path) - module_name.append(name) - - if not os.path.exists(os.path.join(path, "__init__.py")): - break - - if sys.path[0] != path: - sys.path.insert(0, path) - - return ".".join(module_name[::-1]) - - -def locate_app(script_info, module_name, app_name, raise_if_not_found=True): - __traceback_hide__ = True # noqa: F841 - - try: - __import__(module_name) - except ImportError: - # Reraise the ImportError if it occurred within the imported module. - # Determine this by checking whether the trace has a depth > 1. - if sys.exc_info()[-1].tb_next: - raise NoAppException( - 'While importing "{name}", an ImportError was raised:' - "\n\n{tb}".format(name=module_name, tb=traceback.format_exc()) - ) - elif raise_if_not_found: - raise NoAppException('Could not import "{name}".'.format(name=module_name)) - else: - return - - module = sys.modules[module_name] - - if app_name is None: - return find_best_app(script_info, module) - else: - return find_app_by_string(script_info, module, app_name) - - -def get_version(ctx, param, value): - if not value or ctx.resilient_parsing: - return - - import werkzeug - from . import __version__ - - message = "Python %(python)s\nFlask %(flask)s\nWerkzeug %(werkzeug)s" - click.echo( - message - % { - "python": platform.python_version(), - "flask": __version__, - "werkzeug": werkzeug.__version__, - }, - color=ctx.color, - ) - ctx.exit() - - -version_option = click.Option( - ["--version"], - help="Show the flask version", - expose_value=False, - callback=get_version, - is_flag=True, - is_eager=True, -) - - -class DispatchingApp(object): - """Special application that dispatches to a Flask application which - is imported by name in a background thread. If an error happens - it is recorded and shown as part of the WSGI handling which in case - of the Werkzeug debugger means that it shows up in the browser. - """ - - def __init__(self, loader, use_eager_loading=False): - self.loader = loader - self._app = None - self._lock = Lock() - self._bg_loading_exc_info = None - if use_eager_loading: - self._load_unlocked() - else: - self._load_in_background() - - def _load_in_background(self): - def _load_app(): - __traceback_hide__ = True # noqa: F841 - with self._lock: - try: - self._load_unlocked() - except Exception: - self._bg_loading_exc_info = sys.exc_info() - - t = Thread(target=_load_app, args=()) - t.start() - - def _flush_bg_loading_exception(self): - __traceback_hide__ = True # noqa: F841 - exc_info = self._bg_loading_exc_info - if exc_info is not None: - self._bg_loading_exc_info = None - reraise(*exc_info) - - def _load_unlocked(self): - __traceback_hide__ = True # noqa: F841 - self._app = rv = self.loader() - self._bg_loading_exc_info = None - return rv - - def __call__(self, environ, start_response): - __traceback_hide__ = True # noqa: F841 - if self._app is not None: - return self._app(environ, start_response) - self._flush_bg_loading_exception() - with self._lock: - if self._app is not None: - rv = self._app - else: - rv = self._load_unlocked() - return rv(environ, start_response) - - -class ScriptInfo(object): - """Helper object to deal with Flask applications. This is usually not - necessary to interface with as it's used internally in the dispatching - to click. In future versions of Flask this object will most likely play - a bigger role. Typically it's created automatically by the - :class:`FlaskGroup` but you can also manually create it and pass it - onwards as click object. - """ - - def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True): - #: Optionally the import path for the Flask application. - self.app_import_path = app_import_path or os.environ.get("FLASK_APP") - #: Optionally a function that is passed the script info to create - #: the instance of the application. - self.create_app = create_app - #: A dictionary with arbitrary data that can be associated with - #: this script info. - self.data = {} - self.set_debug_flag = set_debug_flag - self._loaded_app = None - - def load_app(self): - """Loads the Flask app (if not yet loaded) and returns it. Calling - this multiple times will just result in the already loaded app to - be returned. - """ - __traceback_hide__ = True # noqa: F841 - - if self._loaded_app is not None: - return self._loaded_app - - app = None - - if self.create_app is not None: - app = call_factory(self, self.create_app) - else: - if self.app_import_path: - path, name = ( - re.split(r":(?![\\/])", self.app_import_path, 1) + [None] - )[:2] - import_name = prepare_import(path) - app = locate_app(self, import_name, name) - else: - for path in ("wsgi.py", "app.py"): - import_name = prepare_import(path) - app = locate_app(self, import_name, None, raise_if_not_found=False) - - if app: - break - - if not app: - raise NoAppException( - "Could not locate a Flask application. You did not provide " - 'the "FLASK_APP" environment variable, and a "wsgi.py" or ' - '"app.py" module was not found in the current directory.' - ) - - if self.set_debug_flag: - # Update the app's debug flag through the descriptor so that - # other values repopulate as well. - app.debug = get_debug_flag() - - self._loaded_app = app - return app - - -pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True) - - -def with_appcontext(f): - """Wraps a callback so that it's guaranteed to be executed with the - script's application context. If callbacks are registered directly - to the ``app.cli`` object then they are wrapped with this function - by default unless it's disabled. - """ - - @click.pass_context - def decorator(__ctx, *args, **kwargs): - with __ctx.ensure_object(ScriptInfo).load_app().app_context(): - return __ctx.invoke(f, *args, **kwargs) - - return update_wrapper(decorator, f) - - -class AppGroup(click.Group): - """This works similar to a regular click :class:`~click.Group` but it - changes the behavior of the :meth:`command` decorator so that it - automatically wraps the functions in :func:`with_appcontext`. - - Not to be confused with :class:`FlaskGroup`. - """ - - def command(self, *args, **kwargs): - """This works exactly like the method of the same name on a regular - :class:`click.Group` but it wraps callbacks in :func:`with_appcontext` - unless it's disabled by passing ``with_appcontext=False``. - """ - wrap_for_ctx = kwargs.pop("with_appcontext", True) - - def decorator(f): - if wrap_for_ctx: - f = with_appcontext(f) - return click.Group.command(self, *args, **kwargs)(f) - - return decorator - - def group(self, *args, **kwargs): - """This works exactly like the method of the same name on a regular - :class:`click.Group` but it defaults the group class to - :class:`AppGroup`. - """ - kwargs.setdefault("cls", AppGroup) - return click.Group.group(self, *args, **kwargs) - - -class FlaskGroup(AppGroup): - """Special subclass of the :class:`AppGroup` group that supports - loading more commands from the configured Flask app. Normally a - developer does not have to interface with this class but there are - some very advanced use cases for which it makes sense to create an - instance of this. - - For information as of why this is useful see :ref:`custom-scripts`. - - :param add_default_commands: if this is True then the default run and - shell commands will be added. - :param add_version_option: adds the ``--version`` option. - :param create_app: an optional callback that is passed the script info and - returns the loaded app. - :param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv` - files to set environment variables. Will also change the working - directory to the directory containing the first file found. - :param set_debug_flag: Set the app's debug flag based on the active - environment - - .. versionchanged:: 1.0 - If installed, python-dotenv will be used to load environment variables - from :file:`.env` and :file:`.flaskenv` files. - """ - - def __init__( - self, - add_default_commands=True, - create_app=None, - add_version_option=True, - load_dotenv=True, - set_debug_flag=True, - **extra - ): - params = list(extra.pop("params", None) or ()) - - if add_version_option: - params.append(version_option) - - AppGroup.__init__(self, params=params, **extra) - self.create_app = create_app - self.load_dotenv = load_dotenv - self.set_debug_flag = set_debug_flag - - if add_default_commands: - self.add_command(run_command) - self.add_command(shell_command) - self.add_command(routes_command) - - self._loaded_plugin_commands = False - - def _load_plugin_commands(self): - if self._loaded_plugin_commands: - return - try: - import pkg_resources - except ImportError: - self._loaded_plugin_commands = True - return - - for ep in pkg_resources.iter_entry_points("flask.commands"): - self.add_command(ep.load(), ep.name) - self._loaded_plugin_commands = True - - def get_command(self, ctx, name): - self._load_plugin_commands() - - # We load built-in commands first as these should always be the - # same no matter what the app does. If the app does want to - # override this it needs to make a custom instance of this group - # and not attach the default commands. - # - # This also means that the script stays functional in case the - # application completely fails. - rv = AppGroup.get_command(self, ctx, name) - if rv is not None: - return rv - - info = ctx.ensure_object(ScriptInfo) - try: - rv = info.load_app().cli.get_command(ctx, name) - if rv is not None: - return rv - except NoAppException: - pass - - def list_commands(self, ctx): - self._load_plugin_commands() - - # The commands available is the list of both the application (if - # available) plus the builtin commands. - rv = set(click.Group.list_commands(self, ctx)) - info = ctx.ensure_object(ScriptInfo) - try: - rv.update(info.load_app().cli.list_commands(ctx)) - except Exception: - # Here we intentionally swallow all exceptions as we don't - # want the help page to break if the app does not exist. - # If someone attempts to use the command we try to create - # the app again and this will give us the error. - # However, we will not do so silently because that would confuse - # users. - traceback.print_exc() - return sorted(rv) - - def main(self, *args, **kwargs): - # Set a global flag that indicates that we were invoked from the - # command line interface. This is detected by Flask.run to make the - # call into a no-op. This is necessary to avoid ugly errors when the - # script that is loaded here also attempts to start a server. - os.environ["FLASK_RUN_FROM_CLI"] = "true" - - if get_load_dotenv(self.load_dotenv): - load_dotenv() - - obj = kwargs.get("obj") - - if obj is None: - obj = ScriptInfo( - create_app=self.create_app, set_debug_flag=self.set_debug_flag - ) - - kwargs["obj"] = obj - kwargs.setdefault("auto_envvar_prefix", "FLASK") - return super(FlaskGroup, self).main(*args, **kwargs) - - -def _path_is_ancestor(path, other): - """Take ``other`` and remove the length of ``path`` from it. Then join it - to ``path``. If it is the original value, ``path`` is an ancestor of - ``other``.""" - return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other - - -def load_dotenv(path=None): - """Load "dotenv" files in order of precedence to set environment variables. - - If an env var is already set it is not overwritten, so earlier files in the - list are preferred over later files. - - Changes the current working directory to the location of the first file - found, with the assumption that it is in the top level project directory - and will be where the Python path should import local packages from. - - This is a no-op if `python-dotenv`_ is not installed. - - .. _python-dotenv: https://github.com/theskumar/python-dotenv#readme - - :param path: Load the file at this location instead of searching. - :return: ``True`` if a file was loaded. - - .. versionchanged:: 1.1.0 - Returns ``False`` when python-dotenv is not installed, or when - the given path isn't a file. - - .. versionadded:: 1.0 - """ - if dotenv is None: - if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"): - click.secho( - " * Tip: There are .env or .flaskenv files present." - ' Do "pip install python-dotenv" to use them.', - fg="yellow", - err=True, - ) - - return False - - # if the given path specifies the actual file then return True, - # else False - if path is not None: - if os.path.isfile(path): - return dotenv.load_dotenv(path) - - return False - - new_dir = None - - for name in (".env", ".flaskenv"): - path = dotenv.find_dotenv(name, usecwd=True) - - if not path: - continue - - if new_dir is None: - new_dir = os.path.dirname(path) - - dotenv.load_dotenv(path) - - if new_dir and os.getcwd() != new_dir: - os.chdir(new_dir) - - return new_dir is not None # at least one file was located and loaded - - -def show_server_banner(env, debug, app_import_path, eager_loading): - """Show extra startup messages the first time the server is run, - ignoring the reloader. - """ - if os.environ.get("WERKZEUG_RUN_MAIN") == "true": - return - - if app_import_path is not None: - message = ' * Serving Flask app "{0}"'.format(app_import_path) - - if not eager_loading: - message += " (lazy loading)" - - click.echo(message) - - click.echo(" * Environment: {0}".format(env)) - - if env == "production": - click.secho( - " WARNING: This is a development server. " - "Do not use it in a production deployment.", - fg="red", - ) - click.secho(" Use a production WSGI server instead.", dim=True) - - if debug is not None: - click.echo(" * Debug mode: {0}".format("on" if debug else "off")) - - -class CertParamType(click.ParamType): - """Click option type for the ``--cert`` option. Allows either an - existing file, the string ``'adhoc'``, or an import for a - :class:`~ssl.SSLContext` object. - """ - - name = "path" - - def __init__(self): - self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True) - - def convert(self, value, param, ctx): - if ssl is None: - raise click.BadParameter( - 'Using "--cert" requires Python to be compiled with SSL support.', - ctx, - param, - ) - - try: - return self.path_type(value, param, ctx) - except click.BadParameter: - value = click.STRING(value, param, ctx).lower() - - if value == "adhoc": - try: - import OpenSSL # noqa: F401 - except ImportError: - raise click.BadParameter( - "Using ad-hoc certificates requires pyOpenSSL.", ctx, param - ) - - return value - - obj = import_string(value, silent=True) - - if sys.version_info < (2, 7, 9): - if obj: - return obj - else: - if isinstance(obj, ssl.SSLContext): - return obj - - raise - - -def _validate_key(ctx, param, value): - """The ``--key`` option must be specified when ``--cert`` is a file. - Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed. - """ - cert = ctx.params.get("cert") - is_adhoc = cert == "adhoc" - - if sys.version_info < (2, 7, 9): - is_context = cert and not isinstance(cert, (text_type, bytes)) - else: - is_context = isinstance(cert, ssl.SSLContext) - - if value is not None: - if is_adhoc: - raise click.BadParameter( - 'When "--cert" is "adhoc", "--key" is not used.', ctx, param - ) - - if is_context: - raise click.BadParameter( - 'When "--cert" is an SSLContext object, "--key is not used.', ctx, param - ) - - if not cert: - raise click.BadParameter('"--cert" must also be specified.', ctx, param) - - ctx.params["cert"] = cert, value - - else: - if cert and not (is_adhoc or is_context): - raise click.BadParameter('Required when using "--cert".', ctx, param) - - return value - - -class SeparatedPathType(click.Path): - """Click option type that accepts a list of values separated by the - OS's path separator (``:``, ``;`` on Windows). Each value is - validated as a :class:`click.Path` type. - """ - - def convert(self, value, param, ctx): - items = self.split_envvar_value(value) - super_convert = super(SeparatedPathType, self).convert - return [super_convert(item, param, ctx) for item in items] - - -@click.command("run", short_help="Run a development server.") -@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.") -@click.option("--port", "-p", default=5000, help="The port to bind to.") -@click.option( - "--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS." -) -@click.option( - "--key", - type=click.Path(exists=True, dir_okay=False, resolve_path=True), - callback=_validate_key, - expose_value=False, - help="The key file to use when specifying a certificate.", -) -@click.option( - "--reload/--no-reload", - default=None, - help="Enable or disable the reloader. By default the reloader " - "is active if debug is enabled.", -) -@click.option( - "--debugger/--no-debugger", - default=None, - help="Enable or disable the debugger. By default the debugger " - "is active if debug is enabled.", -) -@click.option( - "--eager-loading/--lazy-loader", - default=None, - help="Enable or disable eager loading. By default eager " - "loading is enabled if the reloader is disabled.", -) -@click.option( - "--with-threads/--without-threads", - default=True, - help="Enable or disable multithreading.", -) -@click.option( - "--extra-files", - default=None, - type=SeparatedPathType(), - help=( - "Extra files that trigger a reload on change. Multiple paths" - " are separated by '{}'.".format(os.path.pathsep) - ), -) -@pass_script_info -def run_command( - info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files -): - """Run a local development server. - - This server is for development purposes only. It does not provide - the stability, security, or performance of production WSGI servers. - - The reloader and debugger are enabled by default if - FLASK_ENV=development or FLASK_DEBUG=1. - """ - debug = get_debug_flag() - - if reload is None: - reload = debug - - if debugger is None: - debugger = debug - - if eager_loading is None: - eager_loading = not reload - - show_server_banner(get_env(), debug, info.app_import_path, eager_loading) - app = DispatchingApp(info.load_app, use_eager_loading=eager_loading) - - from werkzeug.serving import run_simple - - run_simple( - host, - port, - app, - use_reloader=reload, - use_debugger=debugger, - threaded=with_threads, - ssl_context=cert, - extra_files=extra_files, - ) - - -@click.command("shell", short_help="Run a shell in the app context.") -@with_appcontext -def shell_command(): - """Run an interactive Python shell in the context of a given - Flask application. The application will populate the default - namespace of this shell according to it's configuration. - - This is useful for executing small snippets of management code - without having to manually configure the application. - """ - import code - from .globals import _app_ctx_stack - - app = _app_ctx_stack.top.app - banner = "Python %s on %s\nApp: %s [%s]\nInstance: %s" % ( - sys.version, - sys.platform, - app.import_name, - app.env, - app.instance_path, - ) - ctx = {} - - # Support the regular Python interpreter startup script if someone - # is using it. - startup = os.environ.get("PYTHONSTARTUP") - if startup and os.path.isfile(startup): - with open(startup, "r") as f: - eval(compile(f.read(), startup, "exec"), ctx) - - ctx.update(app.make_shell_context()) - - code.interact(banner=banner, local=ctx) - - -@click.command("routes", short_help="Show the routes for the app.") -@click.option( - "--sort", - "-s", - type=click.Choice(("endpoint", "methods", "rule", "match")), - default="endpoint", - help=( - 'Method to sort routes by. "match" is the order that Flask will match ' - "routes when dispatching a request." - ), -) -@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.") -@with_appcontext -def routes_command(sort, all_methods): - """Show all registered routes with endpoints and methods.""" - - rules = list(current_app.url_map.iter_rules()) - if not rules: - click.echo("No routes were registered.") - return - - ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS")) - - if sort in ("endpoint", "rule"): - rules = sorted(rules, key=attrgetter(sort)) - elif sort == "methods": - rules = sorted(rules, key=lambda rule: sorted(rule.methods)) - - rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules] - - headers = ("Endpoint", "Methods", "Rule") - widths = ( - max(len(rule.endpoint) for rule in rules), - max(len(methods) for methods in rule_methods), - max(len(rule.rule) for rule in rules), - ) - widths = [max(len(h), w) for h, w in zip(headers, widths)] - row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths) - - click.echo(row.format(*headers).strip()) - click.echo(row.format(*("-" * width for width in widths))) - - for rule, methods in zip(rules, rule_methods): - click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip()) - - -cli = FlaskGroup( - help="""\ -A general utility script for Flask applications. - -Provides commands from Flask, extensions, and the application. Loads the -application defined in the FLASK_APP environment variable, or from a wsgi.py -file. Setting the FLASK_ENV environment variable to 'development' will enable -debug mode. - -\b - {prefix}{cmd} FLASK_APP=hello.py - {prefix}{cmd} FLASK_ENV=development - {prefix}flask run -""".format( - cmd="export" if os.name == "posix" else "set", - prefix="$ " if os.name == "posix" else "> ", - ) -) - - -def main(as_module=False): - cli.main(prog_name="python -m flask" if as_module else None) - - -if __name__ == "__main__": - main(as_module=True) diff --git a/venv/lib/python3.7/site-packages/flask/config.py b/venv/lib/python3.7/site-packages/flask/config.py deleted file mode 100644 index 809de33..0000000 --- a/venv/lib/python3.7/site-packages/flask/config.py +++ /dev/null @@ -1,269 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.config - ~~~~~~~~~~~~ - - Implements the configuration related objects. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -import errno -import os -import types - -from werkzeug.utils import import_string - -from . import json -from ._compat import iteritems -from ._compat import string_types - - -class ConfigAttribute(object): - """Makes an attribute forward to the config""" - - def __init__(self, name, get_converter=None): - self.__name__ = name - self.get_converter = get_converter - - def __get__(self, obj, type=None): - if obj is None: - return self - rv = obj.config[self.__name__] - if self.get_converter is not None: - rv = self.get_converter(rv) - return rv - - def __set__(self, obj, value): - obj.config[self.__name__] = value - - -class Config(dict): - """Works exactly like a dict but provides ways to fill it from files - or special dictionaries. There are two common patterns to populate the - config. - - Either you can fill the config from a config file:: - - app.config.from_pyfile('yourconfig.cfg') - - Or alternatively you can define the configuration options in the - module that calls :meth:`from_object` or provide an import path to - a module that should be loaded. It is also possible to tell it to - use the same module and with that provide the configuration values - just before the call:: - - DEBUG = True - SECRET_KEY = 'development key' - app.config.from_object(__name__) - - In both cases (loading from any Python file or loading from modules), - only uppercase keys are added to the config. This makes it possible to use - lowercase values in the config file for temporary values that are not added - to the config or to define the config keys in the same file that implements - the application. - - Probably the most interesting way to load configurations is from an - environment variable pointing to a file:: - - app.config.from_envvar('YOURAPPLICATION_SETTINGS') - - In this case before launching the application you have to set this - environment variable to the file you want to use. On Linux and OS X - use the export statement:: - - export YOURAPPLICATION_SETTINGS='/path/to/config/file' - - On windows use `set` instead. - - :param root_path: path to which files are read relative from. When the - config object is created by the application, this is - the application's :attr:`~flask.Flask.root_path`. - :param defaults: an optional dictionary of default values - """ - - def __init__(self, root_path, defaults=None): - dict.__init__(self, defaults or {}) - self.root_path = root_path - - def from_envvar(self, variable_name, silent=False): - """Loads a configuration from an environment variable pointing to - a configuration file. This is basically just a shortcut with nicer - error messages for this line of code:: - - app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS']) - - :param variable_name: name of the environment variable - :param silent: set to ``True`` if you want silent failure for missing - files. - :return: bool. ``True`` if able to load config, ``False`` otherwise. - """ - rv = os.environ.get(variable_name) - if not rv: - if silent: - return False - raise RuntimeError( - "The environment variable %r is not set " - "and as such configuration could not be " - "loaded. Set this variable and make it " - "point to a configuration file" % variable_name - ) - return self.from_pyfile(rv, silent=silent) - - def from_pyfile(self, filename, silent=False): - """Updates the values in the config from a Python file. This function - behaves as if the file was imported as module with the - :meth:`from_object` function. - - :param filename: the filename of the config. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to ``True`` if you want silent failure for missing - files. - - .. versionadded:: 0.7 - `silent` parameter. - """ - filename = os.path.join(self.root_path, filename) - d = types.ModuleType("config") - d.__file__ = filename - try: - with open(filename, mode="rb") as config_file: - exec(compile(config_file.read(), filename, "exec"), d.__dict__) - except IOError as e: - if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR): - return False - e.strerror = "Unable to load configuration file (%s)" % e.strerror - raise - self.from_object(d) - return True - - def from_object(self, obj): - """Updates the values from the given object. An object can be of one - of the following two types: - - - a string: in this case the object with that name will be imported - - an actual object reference: that object is used directly - - Objects are usually either modules or classes. :meth:`from_object` - loads only the uppercase attributes of the module/class. A ``dict`` - object will not work with :meth:`from_object` because the keys of a - ``dict`` are not attributes of the ``dict`` class. - - Example of module-based configuration:: - - app.config.from_object('yourapplication.default_config') - from yourapplication import default_config - app.config.from_object(default_config) - - Nothing is done to the object before loading. If the object is a - class and has ``@property`` attributes, it needs to be - instantiated before being passed to this method. - - You should not use this function to load the actual configuration but - rather configuration defaults. The actual config should be loaded - with :meth:`from_pyfile` and ideally from a location not within the - package because the package might be installed system wide. - - See :ref:`config-dev-prod` for an example of class-based configuration - using :meth:`from_object`. - - :param obj: an import name or object - """ - if isinstance(obj, string_types): - obj = import_string(obj) - for key in dir(obj): - if key.isupper(): - self[key] = getattr(obj, key) - - def from_json(self, filename, silent=False): - """Updates the values in the config from a JSON file. This function - behaves as if the JSON object was a dictionary and passed to the - :meth:`from_mapping` function. - - :param filename: the filename of the JSON file. This can either be an - absolute filename or a filename relative to the - root path. - :param silent: set to ``True`` if you want silent failure for missing - files. - - .. versionadded:: 0.11 - """ - filename = os.path.join(self.root_path, filename) - - try: - with open(filename) as json_file: - obj = json.loads(json_file.read()) - except IOError as e: - if silent and e.errno in (errno.ENOENT, errno.EISDIR): - return False - e.strerror = "Unable to load configuration file (%s)" % e.strerror - raise - return self.from_mapping(obj) - - def from_mapping(self, *mapping, **kwargs): - """Updates the config like :meth:`update` ignoring items with non-upper - keys. - - .. versionadded:: 0.11 - """ - mappings = [] - if len(mapping) == 1: - if hasattr(mapping[0], "items"): - mappings.append(mapping[0].items()) - else: - mappings.append(mapping[0]) - elif len(mapping) > 1: - raise TypeError( - "expected at most 1 positional argument, got %d" % len(mapping) - ) - mappings.append(kwargs.items()) - for mapping in mappings: - for (key, value) in mapping: - if key.isupper(): - self[key] = value - return True - - def get_namespace(self, namespace, lowercase=True, trim_namespace=True): - """Returns a dictionary containing a subset of configuration options - that match the specified namespace/prefix. Example usage:: - - app.config['IMAGE_STORE_TYPE'] = 'fs' - app.config['IMAGE_STORE_PATH'] = '/var/app/images' - app.config['IMAGE_STORE_BASE_URL'] = 'http://img.website.com' - image_store_config = app.config.get_namespace('IMAGE_STORE_') - - The resulting dictionary `image_store_config` would look like:: - - { - 'type': 'fs', - 'path': '/var/app/images', - 'base_url': 'http://img.website.com' - } - - This is often useful when configuration options map directly to - keyword arguments in functions or class constructors. - - :param namespace: a configuration namespace - :param lowercase: a flag indicating if the keys of the resulting - dictionary should be lowercase - :param trim_namespace: a flag indicating if the keys of the resulting - dictionary should not include the namespace - - .. versionadded:: 0.11 - """ - rv = {} - for k, v in iteritems(self): - if not k.startswith(namespace): - continue - if trim_namespace: - key = k[len(namespace) :] - else: - key = k - if lowercase: - key = key.lower() - rv[key] = v - return rv - - def __repr__(self): - return "<%s %s>" % (self.__class__.__name__, dict.__repr__(self)) diff --git a/venv/lib/python3.7/site-packages/flask/ctx.py b/venv/lib/python3.7/site-packages/flask/ctx.py deleted file mode 100644 index 172f6a0..0000000 --- a/venv/lib/python3.7/site-packages/flask/ctx.py +++ /dev/null @@ -1,475 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.ctx - ~~~~~~~~~ - - Implements the objects required to keep the context. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -import sys -from functools import update_wrapper - -from werkzeug.exceptions import HTTPException - -from ._compat import BROKEN_PYPY_CTXMGR_EXIT -from ._compat import reraise -from .globals import _app_ctx_stack -from .globals import _request_ctx_stack -from .signals import appcontext_popped -from .signals import appcontext_pushed - - -# a singleton sentinel value for parameter defaults -_sentinel = object() - - -class _AppCtxGlobals(object): - """A plain object. Used as a namespace for storing data during an - application context. - - Creating an app context automatically creates this object, which is - made available as the :data:`g` proxy. - - .. describe:: 'key' in g - - Check whether an attribute is present. - - .. versionadded:: 0.10 - - .. describe:: iter(g) - - Return an iterator over the attribute names. - - .. versionadded:: 0.10 - """ - - def get(self, name, default=None): - """Get an attribute by name, or a default value. Like - :meth:`dict.get`. - - :param name: Name of attribute to get. - :param default: Value to return if the attribute is not present. - - .. versionadded:: 0.10 - """ - return self.__dict__.get(name, default) - - def pop(self, name, default=_sentinel): - """Get and remove an attribute by name. Like :meth:`dict.pop`. - - :param name: Name of attribute to pop. - :param default: Value to return if the attribute is not present, - instead of raise a ``KeyError``. - - .. versionadded:: 0.11 - """ - if default is _sentinel: - return self.__dict__.pop(name) - else: - return self.__dict__.pop(name, default) - - def setdefault(self, name, default=None): - """Get the value of an attribute if it is present, otherwise - set and return a default value. Like :meth:`dict.setdefault`. - - :param name: Name of attribute to get. - :param: default: Value to set and return if the attribute is not - present. - - .. versionadded:: 0.11 - """ - return self.__dict__.setdefault(name, default) - - def __contains__(self, item): - return item in self.__dict__ - - def __iter__(self): - return iter(self.__dict__) - - def __repr__(self): - top = _app_ctx_stack.top - if top is not None: - return "" % top.app.name - return object.__repr__(self) - - -def after_this_request(f): - """Executes a function after this request. This is useful to modify - response objects. The function is passed the response object and has - to return the same or a new one. - - Example:: - - @app.route('/') - def index(): - @after_this_request - def add_header(response): - response.headers['X-Foo'] = 'Parachute' - return response - return 'Hello World!' - - This is more useful if a function other than the view function wants to - modify a response. For instance think of a decorator that wants to add - some headers without converting the return value into a response object. - - .. versionadded:: 0.9 - """ - _request_ctx_stack.top._after_request_functions.append(f) - return f - - -def copy_current_request_context(f): - """A helper function that decorates a function to retain the current - request context. This is useful when working with greenlets. The moment - the function is decorated a copy of the request context is created and - then pushed when the function is called. The current session is also - included in the copied request context. - - Example:: - - import gevent - from flask import copy_current_request_context - - @app.route('/') - def index(): - @copy_current_request_context - def do_some_work(): - # do some work here, it can access flask.request or - # flask.session like you would otherwise in the view function. - ... - gevent.spawn(do_some_work) - return 'Regular response' - - .. versionadded:: 0.10 - """ - top = _request_ctx_stack.top - if top is None: - raise RuntimeError( - "This decorator can only be used at local scopes " - "when a request context is on the stack. For instance within " - "view functions." - ) - reqctx = top.copy() - - def wrapper(*args, **kwargs): - with reqctx: - return f(*args, **kwargs) - - return update_wrapper(wrapper, f) - - -def has_request_context(): - """If you have code that wants to test if a request context is there or - not this function can be used. For instance, you may want to take advantage - of request information if the request object is available, but fail - silently if it is unavailable. - - :: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and has_request_context(): - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - Alternatively you can also just test any of the context bound objects - (such as :class:`request` or :class:`g`) for truthness:: - - class User(db.Model): - - def __init__(self, username, remote_addr=None): - self.username = username - if remote_addr is None and request: - remote_addr = request.remote_addr - self.remote_addr = remote_addr - - .. versionadded:: 0.7 - """ - return _request_ctx_stack.top is not None - - -def has_app_context(): - """Works like :func:`has_request_context` but for the application - context. You can also just do a boolean check on the - :data:`current_app` object instead. - - .. versionadded:: 0.9 - """ - return _app_ctx_stack.top is not None - - -class AppContext(object): - """The application context binds an application object implicitly - to the current thread or greenlet, similar to how the - :class:`RequestContext` binds request information. The application - context is also implicitly created if a request context is created - but the application is not on top of the individual application - context. - """ - - def __init__(self, app): - self.app = app - self.url_adapter = app.create_url_adapter(None) - self.g = app.app_ctx_globals_class() - - # Like request context, app contexts can be pushed multiple times - # but there a basic "refcount" is enough to track them. - self._refcnt = 0 - - def push(self): - """Binds the app context to the current context.""" - self._refcnt += 1 - if hasattr(sys, "exc_clear"): - sys.exc_clear() - _app_ctx_stack.push(self) - appcontext_pushed.send(self.app) - - def pop(self, exc=_sentinel): - """Pops the app context.""" - try: - self._refcnt -= 1 - if self._refcnt <= 0: - if exc is _sentinel: - exc = sys.exc_info()[1] - self.app.do_teardown_appcontext(exc) - finally: - rv = _app_ctx_stack.pop() - assert rv is self, "Popped wrong app context. (%r instead of %r)" % (rv, self) - appcontext_popped.send(self.app) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.pop(exc_value) - - if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: - reraise(exc_type, exc_value, tb) - - -class RequestContext(object): - """The request context contains all request relevant information. It is - created at the beginning of the request and pushed to the - `_request_ctx_stack` and removed at the end of it. It will create the - URL adapter and request object for the WSGI environment provided. - - Do not attempt to use this class directly, instead use - :meth:`~flask.Flask.test_request_context` and - :meth:`~flask.Flask.request_context` to create this object. - - When the request context is popped, it will evaluate all the - functions registered on the application for teardown execution - (:meth:`~flask.Flask.teardown_request`). - - The request context is automatically popped at the end of the request - for you. In debug mode the request context is kept around if - exceptions happen so that interactive debuggers have a chance to - introspect the data. With 0.4 this can also be forced for requests - that did not fail and outside of ``DEBUG`` mode. By setting - ``'flask._preserve_context'`` to ``True`` on the WSGI environment the - context will not pop itself at the end of the request. This is used by - the :meth:`~flask.Flask.test_client` for example to implement the - deferred cleanup functionality. - - You might find this helpful for unittests where you need the - information from the context local around for a little longer. Make - sure to properly :meth:`~werkzeug.LocalStack.pop` the stack yourself in - that situation, otherwise your unittests will leak memory. - """ - - def __init__(self, app, environ, request=None, session=None): - self.app = app - if request is None: - request = app.request_class(environ) - self.request = request - self.url_adapter = None - try: - self.url_adapter = app.create_url_adapter(self.request) - except HTTPException as e: - self.request.routing_exception = e - self.flashes = None - self.session = session - - # Request contexts can be pushed multiple times and interleaved with - # other request contexts. Now only if the last level is popped we - # get rid of them. Additionally if an application context is missing - # one is created implicitly so for each level we add this information - self._implicit_app_ctx_stack = [] - - # indicator if the context was preserved. Next time another context - # is pushed the preserved context is popped. - self.preserved = False - - # remembers the exception for pop if there is one in case the context - # preservation kicks in. - self._preserved_exc = None - - # Functions that should be executed after the request on the response - # object. These will be called before the regular "after_request" - # functions. - self._after_request_functions = [] - - @property - def g(self): - return _app_ctx_stack.top.g - - @g.setter - def g(self, value): - _app_ctx_stack.top.g = value - - def copy(self): - """Creates a copy of this request context with the same request object. - This can be used to move a request context to a different greenlet. - Because the actual request object is the same this cannot be used to - move a request context to a different thread unless access to the - request object is locked. - - .. versionadded:: 0.10 - - .. versionchanged:: 1.1 - The current session object is used instead of reloading the original - data. This prevents `flask.session` pointing to an out-of-date object. - """ - return self.__class__( - self.app, - environ=self.request.environ, - request=self.request, - session=self.session, - ) - - def match_request(self): - """Can be overridden by a subclass to hook into the matching - of the request. - """ - try: - result = self.url_adapter.match(return_rule=True) - self.request.url_rule, self.request.view_args = result - except HTTPException as e: - self.request.routing_exception = e - - def push(self): - """Binds the request context to the current context.""" - # If an exception occurs in debug mode or if context preservation is - # activated under exception situations exactly one context stays - # on the stack. The rationale is that you want to access that - # information under debug situations. However if someone forgets to - # pop that context again we want to make sure that on the next push - # it's invalidated, otherwise we run at risk that something leaks - # memory. This is usually only a problem in test suite since this - # functionality is not active in production environments. - top = _request_ctx_stack.top - if top is not None and top.preserved: - top.pop(top._preserved_exc) - - # Before we push the request context we have to ensure that there - # is an application context. - app_ctx = _app_ctx_stack.top - if app_ctx is None or app_ctx.app != self.app: - app_ctx = self.app.app_context() - app_ctx.push() - self._implicit_app_ctx_stack.append(app_ctx) - else: - self._implicit_app_ctx_stack.append(None) - - if hasattr(sys, "exc_clear"): - sys.exc_clear() - - _request_ctx_stack.push(self) - - # Open the session at the moment that the request context is available. - # This allows a custom open_session method to use the request context. - # Only open a new session if this is the first time the request was - # pushed, otherwise stream_with_context loses the session. - if self.session is None: - session_interface = self.app.session_interface - self.session = session_interface.open_session(self.app, self.request) - - if self.session is None: - self.session = session_interface.make_null_session(self.app) - - if self.url_adapter is not None: - self.match_request() - - def pop(self, exc=_sentinel): - """Pops the request context and unbinds it by doing that. This will - also trigger the execution of functions registered by the - :meth:`~flask.Flask.teardown_request` decorator. - - .. versionchanged:: 0.9 - Added the `exc` argument. - """ - app_ctx = self._implicit_app_ctx_stack.pop() - - try: - clear_request = False - if not self._implicit_app_ctx_stack: - self.preserved = False - self._preserved_exc = None - if exc is _sentinel: - exc = sys.exc_info()[1] - self.app.do_teardown_request(exc) - - # If this interpreter supports clearing the exception information - # we do that now. This will only go into effect on Python 2.x, - # on 3.x it disappears automatically at the end of the exception - # stack. - if hasattr(sys, "exc_clear"): - sys.exc_clear() - - request_close = getattr(self.request, "close", None) - if request_close is not None: - request_close() - clear_request = True - finally: - rv = _request_ctx_stack.pop() - - # get rid of circular dependencies at the end of the request - # so that we don't require the GC to be active. - if clear_request: - rv.request.environ["werkzeug.request"] = None - - # Get rid of the app as well if necessary. - if app_ctx is not None: - app_ctx.pop(exc) - - assert rv is self, "Popped wrong request context. (%r instead of %r)" % ( - rv, - self, - ) - - def auto_pop(self, exc): - if self.request.environ.get("flask._preserve_context") or ( - exc is not None and self.app.preserve_context_on_exception - ): - self.preserved = True - self._preserved_exc = exc - else: - self.pop(exc) - - def __enter__(self): - self.push() - return self - - def __exit__(self, exc_type, exc_value, tb): - # do not pop the request stack if we are in debug mode and an - # exception happened. This will allow the debugger to still - # access the request object in the interactive shell. Furthermore - # the context can be force kept alive for the test client. - # See flask.testing for how this works. - self.auto_pop(exc_value) - - if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None: - reraise(exc_type, exc_value, tb) - - def __repr__(self): - return "<%s '%s' [%s] of %s>" % ( - self.__class__.__name__, - self.request.url, - self.request.method, - self.app.name, - ) diff --git a/venv/lib/python3.7/site-packages/flask/debughelpers.py b/venv/lib/python3.7/site-packages/flask/debughelpers.py deleted file mode 100644 index e475bd1..0000000 --- a/venv/lib/python3.7/site-packages/flask/debughelpers.py +++ /dev/null @@ -1,183 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.debughelpers - ~~~~~~~~~~~~~~~~~~ - - Various helpers to make the development experience better. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -import os -from warnings import warn - -from ._compat import implements_to_string -from ._compat import text_type -from .app import Flask -from .blueprints import Blueprint -from .globals import _request_ctx_stack - - -class UnexpectedUnicodeError(AssertionError, UnicodeError): - """Raised in places where we want some better error reporting for - unexpected unicode or binary data. - """ - - -@implements_to_string -class DebugFilesKeyError(KeyError, AssertionError): - """Raised from request.files during debugging. The idea is that it can - provide a better error message than just a generic KeyError/BadRequest. - """ - - def __init__(self, request, key): - form_matches = request.form.getlist(key) - buf = [ - 'You tried to access the file "%s" in the request.files ' - "dictionary but it does not exist. The mimetype for the request " - 'is "%s" instead of "multipart/form-data" which means that no ' - "file contents were transmitted. To fix this error you should " - 'provide enctype="multipart/form-data" in your form.' - % (key, request.mimetype) - ] - if form_matches: - buf.append( - "\n\nThe browser instead transmitted some file names. " - "This was submitted: %s" % ", ".join('"%s"' % x for x in form_matches) - ) - self.msg = "".join(buf) - - def __str__(self): - return self.msg - - -class FormDataRoutingRedirect(AssertionError): - """This exception is raised by Flask in debug mode if it detects a - redirect caused by the routing system when the request method is not - GET, HEAD or OPTIONS. Reasoning: form data will be dropped. - """ - - def __init__(self, request): - exc = request.routing_exception - buf = [ - "A request was sent to this URL (%s) but a redirect was " - 'issued automatically by the routing system to "%s".' - % (request.url, exc.new_url) - ] - - # In case just a slash was appended we can be extra helpful - if request.base_url + "/" == exc.new_url.split("?")[0]: - buf.append( - " The URL was defined with a trailing slash so " - "Flask will automatically redirect to the URL " - "with the trailing slash if it was accessed " - "without one." - ) - - buf.append( - " Make sure to directly send your %s-request to this URL " - "since we can't make browsers or HTTP clients redirect " - "with form data reliably or without user interaction." % request.method - ) - buf.append("\n\nNote: this exception is only raised in debug mode") - AssertionError.__init__(self, "".join(buf).encode("utf-8")) - - -def attach_enctype_error_multidict(request): - """Since Flask 0.8 we're monkeypatching the files object in case a - request is detected that does not use multipart form data but the files - object is accessed. - """ - oldcls = request.files.__class__ - - class newcls(oldcls): - def __getitem__(self, key): - try: - return oldcls.__getitem__(self, key) - except KeyError: - if key not in request.form: - raise - raise DebugFilesKeyError(request, key) - - newcls.__name__ = oldcls.__name__ - newcls.__module__ = oldcls.__module__ - request.files.__class__ = newcls - - -def _dump_loader_info(loader): - yield "class: %s.%s" % (type(loader).__module__, type(loader).__name__) - for key, value in sorted(loader.__dict__.items()): - if key.startswith("_"): - continue - if isinstance(value, (tuple, list)): - if not all(isinstance(x, (str, text_type)) for x in value): - continue - yield "%s:" % key - for item in value: - yield " - %s" % item - continue - elif not isinstance(value, (str, text_type, int, float, bool)): - continue - yield "%s: %r" % (key, value) - - -def explain_template_loading_attempts(app, template, attempts): - """This should help developers understand what failed""" - info = ['Locating template "%s":' % template] - total_found = 0 - blueprint = None - reqctx = _request_ctx_stack.top - if reqctx is not None and reqctx.request.blueprint is not None: - blueprint = reqctx.request.blueprint - - for idx, (loader, srcobj, triple) in enumerate(attempts): - if isinstance(srcobj, Flask): - src_info = 'application "%s"' % srcobj.import_name - elif isinstance(srcobj, Blueprint): - src_info = 'blueprint "%s" (%s)' % (srcobj.name, srcobj.import_name) - else: - src_info = repr(srcobj) - - info.append("% 5d: trying loader of %s" % (idx + 1, src_info)) - - for line in _dump_loader_info(loader): - info.append(" %s" % line) - - if triple is None: - detail = "no match" - else: - detail = "found (%r)" % (triple[1] or "") - total_found += 1 - info.append(" -> %s" % detail) - - seems_fishy = False - if total_found == 0: - info.append("Error: the template could not be found.") - seems_fishy = True - elif total_found > 1: - info.append("Warning: multiple loaders returned a match for the template.") - seems_fishy = True - - if blueprint is not None and seems_fishy: - info.append( - " The template was looked up from an endpoint that " - 'belongs to the blueprint "%s".' % blueprint - ) - info.append(" Maybe you did not place a template in the right folder?") - info.append(" See http://flask.pocoo.org/docs/blueprints/#templates") - - app.logger.info("\n".join(info)) - - -def explain_ignored_app_run(): - if os.environ.get("WERKZEUG_RUN_MAIN") != "true": - warn( - Warning( - "Silently ignoring app.run() because the " - "application is run from the flask command line " - "executable. Consider putting app.run() behind an " - 'if __name__ == "__main__" guard to silence this ' - "warning." - ), - stacklevel=3, - ) diff --git a/venv/lib/python3.7/site-packages/flask/globals.py b/venv/lib/python3.7/site-packages/flask/globals.py deleted file mode 100644 index 6d32dcf..0000000 --- a/venv/lib/python3.7/site-packages/flask/globals.py +++ /dev/null @@ -1,62 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.globals - ~~~~~~~~~~~~~ - - Defines all the global objects that are proxies to the current - active context. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -from functools import partial - -from werkzeug.local import LocalProxy -from werkzeug.local import LocalStack - - -_request_ctx_err_msg = """\ -Working outside of request context. - -This typically means that you attempted to use functionality that needed -an active HTTP request. Consult the documentation on testing for -information about how to avoid this problem.\ -""" -_app_ctx_err_msg = """\ -Working outside of application context. - -This typically means that you attempted to use functionality that needed -to interface with the current application object in some way. To solve -this, set up an application context with app.app_context(). See the -documentation for more information.\ -""" - - -def _lookup_req_object(name): - top = _request_ctx_stack.top - if top is None: - raise RuntimeError(_request_ctx_err_msg) - return getattr(top, name) - - -def _lookup_app_object(name): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError(_app_ctx_err_msg) - return getattr(top, name) - - -def _find_app(): - top = _app_ctx_stack.top - if top is None: - raise RuntimeError(_app_ctx_err_msg) - return top.app - - -# context locals -_request_ctx_stack = LocalStack() -_app_ctx_stack = LocalStack() -current_app = LocalProxy(_find_app) -request = LocalProxy(partial(_lookup_req_object, "request")) -session = LocalProxy(partial(_lookup_req_object, "session")) -g = LocalProxy(partial(_lookup_app_object, "g")) diff --git a/venv/lib/python3.7/site-packages/flask/helpers.py b/venv/lib/python3.7/site-packages/flask/helpers.py deleted file mode 100644 index 3f401a5..0000000 --- a/venv/lib/python3.7/site-packages/flask/helpers.py +++ /dev/null @@ -1,1153 +0,0 @@ -# -*- coding: utf-8 -*- -""" - flask.helpers - ~~~~~~~~~~~~~ - - Implements various helpers. - - :copyright: 2010 Pallets - :license: BSD-3-Clause -""" -import io -import mimetypes -import os -import pkgutil -import posixpath -import socket -import sys -import unicodedata -from functools import update_wrapper -from threading import RLock -from time import time -from zlib import adler32 - -from jinja2 import FileSystemLoader -from werkzeug.datastructures import Headers -from werkzeug.exceptions import BadRequest -from werkzeug.exceptions import NotFound -from werkzeug.exceptions import RequestedRangeNotSatisfiable -from werkzeug.routing import BuildError -from werkzeug.urls import url_quote -from werkzeug.wsgi import wrap_file - -from ._compat import fspath -from ._compat import PY2 -from ._compat import string_types -from ._compat import text_type -from .globals import _app_ctx_stack -from .globals import _request_ctx_stack -from .globals import current_app -from .globals import request -from .globals import session -from .signals import message_flashed - -# sentinel -_missing = object() - - -# what separators does this operating system provide that are not a slash? -# this is used by the send_from_directory function to ensure that nobody is -# able to access files from outside the filesystem. -_os_alt_seps = list( - sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/") -) - - -def get_env(): - """Get the environment the app is running in, indicated by the - :envvar:`FLASK_ENV` environment variable. The default is - ``'production'``. - """ - return os.environ.get("FLASK_ENV") or "production" - - -def get_debug_flag(): - """Get whether debug mode should be enabled for the app, indicated - by the :envvar:`FLASK_DEBUG` environment variable. The default is - ``True`` if :func:`.get_env` returns ``'development'``, or ``False`` - otherwise. - """ - val = os.environ.get("FLASK_DEBUG") - - if not val: - return get_env() == "development" - - return val.lower() not in ("0", "false", "no") - - -def get_load_dotenv(default=True): - """Get whether the user has disabled loading dotenv files by setting - :envvar:`FLASK_SKIP_DOTENV`. The default is ``True``, load the - files. - - :param default: What to return if the env var isn't set. - """ - val = os.environ.get("FLASK_SKIP_DOTENV") - - if not val: - return default - - return val.lower() in ("0", "false", "no") - - -def _endpoint_from_view_func(view_func): - """Internal helper that returns the default endpoint for a given - function. This always is the function name. - """ - assert view_func is not None, "expected view func if endpoint is not provided." - return view_func.__name__ - - -def stream_with_context(generator_or_function): - """Request contexts disappear when the response is started on the server. - This is done for efficiency reasons and to make it less likely to encounter - memory leaks with badly written WSGI middlewares. The downside is that if - you are using streamed responses, the generator cannot access request bound - information any more. - - This function however can help you keep the context around for longer:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - @stream_with_context - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(generate()) - - Alternatively it can also be used around a specific generator:: - - from flask import stream_with_context, request, Response - - @app.route('/stream') - def streamed_response(): - def generate(): - yield 'Hello ' - yield request.args['name'] - yield '!' - return Response(stream_with_context(generate())) - - .. versionadded:: 0.9 - """ - try: - gen = iter(generator_or_function) - except TypeError: - - def decorator(*args, **kwargs): - gen = generator_or_function(*args, **kwargs) - return stream_with_context(gen) - - return update_wrapper(decorator, generator_or_function) - - def generator(): - ctx = _request_ctx_stack.top - if ctx is None: - raise RuntimeError( - "Attempted to stream with context but " - "there was no context in the first place to keep around." - ) - with ctx: - # Dummy sentinel. Has to be inside the context block or we're - # not actually keeping the context around. - yield None - - # The try/finally is here so that if someone passes a WSGI level - # iterator in we're still running the cleanup logic. Generators - # don't need that because they are closed on their destruction - # automatically. - try: - for item in gen: - yield item - finally: - if hasattr(gen, "close"): - gen.close() - - # The trick is to start the generator. Then the code execution runs until - # the first dummy None is yielded at which point the context was already - # pushed. This item is discarded. Then when the iteration continues the - # real generator is executed. - wrapped_g = generator() - next(wrapped_g) - return wrapped_g - - -def make_response(*args): - """Sometimes it is necessary to set additional headers in a view. Because - views do not have to return response objects but can return a value that - is converted into a response object by Flask itself, it becomes tricky to - add headers to it. This function can be called instead of using a return - and you will get a response object which you can use to attach headers. - - If view looked like this and you want to add a new header:: - - def index(): - return render_template('index.html', foo=42) - - You can now do something like this:: - - def index(): - response = make_response(render_template('index.html', foo=42)) - response.headers['X-Parachutes'] = 'parachutes are cool' - return response - - This function accepts the very same arguments you can return from a - view function. This for example creates a response with a 404 error - code:: - - response = make_response(render_template('not_found.html'), 404) - - The other use case of this function is to force the return value of a - view function into a response which is helpful with view - decorators:: - - response = make_response(view_function()) - response.headers['X-Parachutes'] = 'parachutes are cool' - - Internally this function does the following things: - - - if no arguments are passed, it creates a new response argument - - if one argument is passed, :meth:`flask.Flask.make_response` - is invoked with it. - - if more than one argument is passed, the arguments are passed - to the :meth:`flask.Flask.make_response` function as tuple. - - .. versionadded:: 0.6 - """ - if not args: - return current_app.response_class() - if len(args) == 1: - args = args[0] - return current_app.make_response(args) - - -def url_for(endpoint, **values): - """Generates a URL to the given endpoint with the method provided. - - Variable arguments that are unknown to the target endpoint are appended - to the generated URL as query arguments. If the value of a query argument - is ``None``, the whole pair is skipped. In case blueprints are active - you can shortcut references to the same blueprint by prefixing the - local endpoint with a dot (``.``). - - This will reference the index function local to the current blueprint:: - - url_for('.index') - - For more information, head over to the :ref:`Quickstart `. - - Configuration values ``APPLICATION_ROOT`` and ``SERVER_NAME`` are only used when - generating URLs outside of a request context. - - To integrate applications, :class:`Flask` has a hook to intercept URL build - errors through :attr:`Flask.url_build_error_handlers`. The `url_for` - function results in a :exc:`~werkzeug.routing.BuildError` when the current - app does not have a URL for the given endpoint and values. When it does, the - :data:`~flask.current_app` calls its :attr:`~Flask.url_build_error_handlers` if - it is not ``None``, which can return a string to use as the result of - `url_for` (instead of `url_for`'s default to raise the - :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. - An example:: - - def external_url_handler(error, endpoint, values): - "Looks up an external URL when `url_for` cannot build a URL." - # This is an example of hooking the build_error_handler. - # Here, lookup_url is some utility function you've built - # which looks up the endpoint in some external URL registry. - url = lookup_url(endpoint, **values) - if url is None: - # External lookup did not have a URL. - # Re-raise the BuildError, in context of original traceback. - exc_type, exc_value, tb = sys.exc_info() - if exc_value is error: - raise exc_type, exc_value, tb - else: - raise error - # url_for will use this result, instead of raising BuildError. - return url - - app.url_build_error_handlers.append(external_url_handler) - - Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and - `endpoint` and `values` are the arguments passed into `url_for`. Note - that this is for building URLs outside the current application, and not for - handling 404 NotFound errors. - - .. versionadded:: 0.10 - The `_scheme` parameter was added. - - .. versionadded:: 0.9 - The `_anchor` and `_method` parameters were added. - - .. versionadded:: 0.9 - Calls :meth:`Flask.handle_build_error` on - :exc:`~werkzeug.routing.BuildError`. - - :param endpoint: the endpoint of the URL (name of the function) - :param values: the variable arguments of the URL rule - :param _external: if set to ``True``, an absolute URL is generated. Server - address can be changed via ``SERVER_NAME`` configuration variable which - falls back to the `Host` header, then to the IP and port of the request. - :param _scheme: a string specifying the desired URL scheme. The `_external` - parameter must be set to ``True`` or a :exc:`ValueError` is raised. The default - behavior uses the same scheme as the current request, or - ``PREFERRED_URL_SCHEME`` from the :ref:`app configuration ` if no - request context is available. As of Werkzeug 0.10, this also can be set - to an empty string to build protocol-relative URLs. - :param _anchor: if provided this is added as anchor to the URL. - :param _method: if provided this explicitly specifies an HTTP method. - """ - appctx = _app_ctx_stack.top - reqctx = _request_ctx_stack.top - - if appctx is None: - raise RuntimeError( - "Attempted to generate a URL without the application context being" - " pushed. This has to be executed when application context is" - " available." - ) - - # If request specific information is available we have some extra - # features that support "relative" URLs. - if reqctx is not None: - url_adapter = reqctx.url_adapter - blueprint_name = request.blueprint - - if endpoint[:1] == ".": - if blueprint_name is not None: - endpoint = blueprint_name + endpoint - else: - endpoint = endpoint[1:] - - external = values.pop("_external", False) - - # Otherwise go with the url adapter from the appctx and make - # the URLs external by default. - else: - url_adapter = appctx.url_adapter - - if url_adapter is None: - raise RuntimeError( - "Application was not able to create a URL adapter for request" - " independent URL generation. You might be able to fix this by" - " setting the SERVER_NAME config variable." - ) - - external = values.pop("_external", True) - - anchor = values.pop("_anchor", None) - method = values.pop("_method", None) - scheme = values.pop("_scheme", None) - appctx.app.inject_url_defaults(endpoint, values) - - # This is not the best way to deal with this but currently the - # underlying Werkzeug router does not support overriding the scheme on - # a per build call basis. - old_scheme = None - if scheme is not None: - if not external: - raise ValueError("When specifying _scheme, _external must be True") - old_scheme = url_adapter.url_scheme - url_adapter.url_scheme = scheme - - try: - try: - rv = url_adapter.build( - endpoint, values, method=method, force_external=external - ) - finally: - if old_scheme is not None: - url_adapter.url_scheme = old_scheme - except BuildError as error: - # We need to inject the values again so that the app callback can - # deal with that sort of stuff. - values["_external"] = external - values["_anchor"] = anchor - values["_method"] = method - values["_scheme"] = scheme - return appctx.app.handle_url_build_error(error, endpoint, values) - - if anchor is not None: - rv += "#" + url_quote(anchor) - return rv - - -def get_template_attribute(template_name, attribute): - """Loads a macro (or variable) a template exports. This can be used to - invoke a macro from within Python code. If you for example have a - template named :file:`_cider.html` with the following contents: - - .. sourcecode:: html+jinja - - {% macro hello(name) %}Hello {{ name }}!{% endmacro %} - - You can access this from Python code like this:: - - hello = get_template_attribute('_cider.html', 'hello') - return hello('World') - - .. versionadded:: 0.2 - - :param template_name: the name of the template - :param attribute: the name of the variable of macro to access - """ - return getattr(current_app.jinja_env.get_template(template_name).module, attribute) - - -def flash(message, category="message"): - """Flashes a message to the next request. In order to remove the - flashed message from the session and to display it to the user, - the template has to call :func:`get_flashed_messages`. - - .. versionchanged:: 0.3 - `category` parameter added. - - :param message: the message to be flashed. - :param category: the category for the message. The following values - are recommended: ``'message'`` for any kind of message, - ``'error'`` for errors, ``'info'`` for information - messages and ``'warning'`` for warnings. However any - kind of string can be used as category. - """ - # Original implementation: - # - # session.setdefault('_flashes', []).append((category, message)) - # - # This assumed that changes made to mutable structures in the session are - # always in sync with the session object, which is not true for session - # implementations that use external storage for keeping their keys/values. - flashes = session.get("_flashes", []) - flashes.append((category, message)) - session["_flashes"] = flashes - message_flashed.send( - current_app._get_current_object(), message=message, category=category - ) - - -def get_flashed_messages(with_categories=False, category_filter=()): - """Pulls all flashed messages from the session and returns them. - Further calls in the same request to the function will return - the same messages. By default just the messages are returned, - but when `with_categories` is set to ``True``, the return value will - be a list of tuples in the form ``(category, message)`` instead. - - Filter the flashed messages to one or more categories by providing those - categories in `category_filter`. This allows rendering categories in - separate html blocks. The `with_categories` and `category_filter` - arguments are distinct: - - * `with_categories` controls whether categories are returned with message - text (``True`` gives a tuple, where ``False`` gives just the message text). - * `category_filter` filters the messages down to only those matching the - provided categories. - - See :ref:`message-flashing-pattern` for examples. - - .. versionchanged:: 0.3 - `with_categories` parameter added. - - .. versionchanged:: 0.9 - `category_filter` parameter added. - - :param with_categories: set to ``True`` to also receive categories. - :param category_filter: whitelist of categories to limit return values - """ - flashes = _request_ctx_stack.top.flashes - if flashes is None: - _request_ctx_stack.top.flashes = flashes = ( - session.pop("_flashes") if "_flashes" in session else [] - ) - if category_filter: - flashes = list(filter(lambda f: f[0] in category_filter, flashes)) - if not with_categories: - return [x[1] for x in flashes] - return flashes - - -def send_file( - filename_or_fp, - mimetype=None, - as_attachment=False, - attachment_filename=None, - add_etags=True, - cache_timeout=None, - conditional=False, - last_modified=None, -): - """Sends the contents of a file to the client. This will use the - most efficient method available and configured. By default it will - try to use the WSGI server's file_wrapper support. Alternatively - you can set the application's :attr:`~Flask.use_x_sendfile` attribute - to ``True`` to directly emit an ``X-Sendfile`` header. This however - requires support of the underlying webserver for ``X-Sendfile``. - - By default it will try to guess the mimetype for you, but you can - also explicitly provide one. For extra security you probably want - to send certain files as attachment (HTML for instance). The mimetype - guessing requires a `filename` or an `attachment_filename` to be - provided. - - ETags will also be attached automatically if a `filename` is provided. You - can turn this off by setting `add_etags=False`. - - If `conditional=True` and `filename` is provided, this method will try to - upgrade the response stream to support range requests. This will allow - the request to be answered with partial content response. - - Please never pass filenames to this function from user sources; - you should use :func:`send_from_directory` instead. - - .. versionadded:: 0.2 - - .. versionadded:: 0.5 - The `add_etags`, `cache_timeout` and `conditional` parameters were - added. The default behavior is now to attach etags. - - .. versionchanged:: 0.7 - mimetype guessing and etag support for file objects was - deprecated because it was unreliable. Pass a filename if you are - able to, otherwise attach an etag yourself. This functionality - will be removed in Flask 1.0 - - .. versionchanged:: 0.9 - cache_timeout pulls its default from application config, when None. - - .. versionchanged:: 0.12 - The filename is no longer automatically inferred from file objects. If - you want to use automatic mimetype and etag support, pass a filepath via - `filename_or_fp` or `attachment_filename`. - - .. versionchanged:: 0.12 - The `attachment_filename` is preferred over `filename` for MIME-type - detection. - - .. versionchanged:: 1.0 - UTF-8 filenames, as specified in `RFC 2231`_, are supported. - - .. _RFC 2231: https://tools.ietf.org/html/rfc2231#section-4 - - .. versionchanged:: 1.0.3 - Filenames are encoded with ASCII instead of Latin-1 for broader - compatibility with WSGI servers. - - .. versionchanged:: 1.1 - Filename may be a :class:`~os.PathLike` object. - - .. versionadded:: 1.1 - Partial content supports :class:`~io.BytesIO`. - - :param filename_or_fp: the filename of the file to send. - This is relative to the :attr:`~Flask.root_path` - if a relative path is specified. - Alternatively a file object might be provided in - which case ``X-Sendfile`` might not work and fall - back to the traditional method. Make sure that the - file pointer is positioned at the start of data to - send before calling :func:`send_file`. - :param mimetype: the mimetype of the file if provided. If a file path is - given, auto detection happens as fallback, otherwise an - error will be raised. - :param as_attachment: set to ``True`` if you want to send this file with - a ``Content-Disposition: attachment`` header. - :param attachment_filename: the filename for the attachment if it - differs from the file's filename. - :param add_etags: set to ``False`` to disable attaching of etags. - :param conditional: set to ``True`` to enable conditional responses. - - :param cache_timeout: the timeout in seconds for the headers. When ``None`` - (default), this value is set by - :meth:`~Flask.get_send_file_max_age` of - :data:`~flask.current_app`. - :param last_modified: set the ``Last-Modified`` header to this value, - a :class:`~datetime.datetime` or timestamp. - If a file was passed, this overrides its mtime. - """ - mtime = None - fsize = None - - if hasattr(filename_or_fp, "__fspath__"): - filename_or_fp = fspath(filename_or_fp) - - if isinstance(filename_or_fp, string_types): - filename = filename_or_fp - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - file = None - if attachment_filename is None: - attachment_filename = os.path.basename(filename) - else: - file = filename_or_fp - filename = None - - if mimetype is None: - if attachment_filename is not None: - mimetype = ( - mimetypes.guess_type(attachment_filename)[0] - or "application/octet-stream" - ) - - if mimetype is None: - raise ValueError( - "Unable to infer MIME-type because no filename is available. " - "Please set either `attachment_filename`, pass a filepath to " - "`filename_or_fp` or set your own MIME-type via `mimetype`." - ) - - headers = Headers() - if as_attachment: - if attachment_filename is None: - raise TypeError("filename unavailable, required for sending as attachment") - - if not isinstance(attachment_filename, text_type): - attachment_filename = attachment_filename.decode("utf-8") - - try: - attachment_filename = attachment_filename.encode("ascii") - except UnicodeEncodeError: - filenames = { - "filename": unicodedata.normalize("NFKD", attachment_filename).encode( - "ascii", "ignore" - ), - "filename*": "UTF-8''%s" % url_quote(attachment_filename, safe=b""), - } - else: - filenames = {"filename": attachment_filename} - - headers.add("Content-Disposition", "attachment", **filenames) - - if current_app.use_x_sendfile and filename: - if file is not None: - file.close() - headers["X-Sendfile"] = filename - fsize = os.path.getsize(filename) - headers["Content-Length"] = fsize - data = None - else: - if file is None: - file = open(filename, "rb") - mtime = os.path.getmtime(filename) - fsize = os.path.getsize(filename) - headers["Content-Length"] = fsize - elif isinstance(file, io.BytesIO): - try: - fsize = file.getbuffer().nbytes - except AttributeError: - # Python 2 doesn't have getbuffer - fsize = len(file.getvalue()) - headers["Content-Length"] = fsize - data = wrap_file(request.environ, file) - - rv = current_app.response_class( - data, mimetype=mimetype, headers=headers, direct_passthrough=True - ) - - if last_modified is not None: - rv.last_modified = last_modified - elif mtime is not None: - rv.last_modified = mtime - - rv.cache_control.public = True - if cache_timeout is None: - cache_timeout = current_app.get_send_file_max_age(filename) - if cache_timeout is not None: - rv.cache_control.max_age = cache_timeout - rv.expires = int(time() + cache_timeout) - - if add_etags and filename is not None: - from warnings import warn - - try: - rv.set_etag( - "%s-%s-%s" - % ( - os.path.getmtime(filename), - os.path.getsize(filename), - adler32( - filename.encode("utf-8") - if isinstance(filename, text_type) - else filename - ) - & 0xFFFFFFFF, - ) - ) - except OSError: - warn( - "Access %s failed, maybe it does not exist, so ignore etags in " - "headers" % filename, - stacklevel=2, - ) - - if conditional: - try: - rv = rv.make_conditional(request, accept_ranges=True, complete_length=fsize) - except RequestedRangeNotSatisfiable: - if file is not None: - file.close() - raise - # make sure we don't send x-sendfile for servers that - # ignore the 304 status code for x-sendfile. - if rv.status_code == 304: - rv.headers.pop("x-sendfile", None) - return rv - - -def safe_join(directory, *pathnames): - """Safely join `directory` and zero or more untrusted `pathnames` - components. - - Example usage:: - - @app.route('/wiki/') - def wiki_page(filename): - filename = safe_join(app.config['WIKI_FOLDER'], filename) - with open(filename, 'rb') as fd: - content = fd.read() # Read and process the file content... - - :param directory: the trusted base directory. - :param pathnames: the untrusted pathnames relative to that directory. - :raises: :class:`~werkzeug.exceptions.NotFound` if one or more passed - paths fall out of its boundaries. - """ - - parts = [directory] - - for filename in pathnames: - if filename != "": - filename = posixpath.normpath(filename) - - if ( - any(sep in filename for sep in _os_alt_seps) - or os.path.isabs(filename) - or filename == ".." - or filename.startswith("../") - ): - raise NotFound() - - parts.append(filename) - - return posixpath.join(*parts) - - -def send_from_directory(directory, filename, **options): - """Send a file from a given directory with :func:`send_file`. This - is a secure way to quickly expose static files from an upload folder - or something similar. - - Example usage:: - - @app.route('/uploads/') - def download_file(filename): - return send_from_directory(app.config['UPLOAD_FOLDER'], - filename, as_attachment=True) - - .. admonition:: Sending files and Performance - - It is strongly recommended to activate either ``X-Sendfile`` support in - your webserver or (if no authentication happens) to tell the webserver - to serve files for the given path on its own without calling into the - web application for improved performance. - - .. versionadded:: 0.5 - - :param directory: the directory where all the files are stored. - :param filename: the filename relative to that directory to - download. - :param options: optional keyword arguments that are directly - forwarded to :func:`send_file`. - """ - filename = fspath(filename) - directory = fspath(directory) - filename = safe_join(directory, filename) - if not os.path.isabs(filename): - filename = os.path.join(current_app.root_path, filename) - try: - if not os.path.isfile(filename): - raise NotFound() - except (TypeError, ValueError): - raise BadRequest() - options.setdefault("conditional", True) - return send_file(filename, **options) - - -def get_root_path(import_name): - """Returns the path to a package or cwd if that cannot be found. This - returns the path of a package or the folder that contains a module. - - Not to be confused with the package path returned by :func:`find_package`. - """ - # Module already imported and has a file attribute. Use that first. - mod = sys.modules.get(import_name) - if mod is not None and hasattr(mod, "__file__"): - return os.path.dirname(os.path.abspath(mod.__file__)) - - # Next attempt: check the loader. - loader = pkgutil.get_loader(import_name) - - # Loader does not exist or we're referring to an unloaded main module - # or a main module without path (interactive sessions), go with the - # current working directory. - if loader is None or import_name == "__main__": - return os.getcwd() - - # For .egg, zipimporter does not have get_filename until Python 2.7. - # Some other loaders might exhibit the same behavior. - if hasattr(loader, "get_filename"): - filepath = loader.get_filename(import_name) - else: - # Fall back to imports. - __import__(import_name) - mod = sys.modules[import_name] - filepath = getattr(mod, "__file__", None) - - # If we don't have a filepath it might be because we are a - # namespace package. In this case we pick the root path from the - # first module that is contained in our package. - if filepath is None: - raise RuntimeError( - "No root path can be found for the provided " - 'module "%s". This can happen because the ' - "module came from an import hook that does " - "not provide file name information or because " - "it's a namespace package. In this case " - "the root path needs to be explicitly " - "provided." % import_name - ) - - # filepath is import_name.py for a module, or __init__.py for a package. - return os.path.dirname(os.path.abspath(filepath)) - - -def _matching_loader_thinks_module_is_package(loader, mod_name): - """Given the loader that loaded a module and the module this function - attempts to figure out if the given module is actually a package. - """ - # If the loader can tell us if something is a package, we can - # directly ask the loader. - if hasattr(loader, "is_package"): - return loader.is_package(mod_name) - # importlib's namespace loaders do not have this functionality but - # all the modules it loads are packages, so we can take advantage of - # this information. - elif ( - loader.__class__.__module__ == "_frozen_importlib" - and loader.__class__.__name__ == "NamespaceLoader" - ): - return True - # Otherwise we need to fail with an error that explains what went - # wrong. - raise AttributeError( - ( - "%s.is_package() method is missing but is required by Flask of " - "PEP 302 import hooks. If you do not use import hooks and " - "you encounter this error please file a bug against Flask." - ) - % loader.__class__.__name__ - ) - - -def _find_package_path(root_mod_name): - """Find the path where the module's root exists in""" - if sys.version_info >= (3, 4): - import importlib.util - - try: - spec = importlib.util.find_spec(root_mod_name) - if spec is None: - raise ValueError("not found") - # ImportError: the machinery told us it does not exist - # ValueError: - # - the module name was invalid - # - the module name is __main__ - # - *we* raised `ValueError` due to `spec` being `None` - except (ImportError, ValueError): - pass # handled below - else: - # namespace package - if spec.origin in {"namespace", None}: - return os.path.dirname(next(iter(spec.submodule_search_locations))) - # a package (with __init__.py) - elif spec.submodule_search_locations: - return os.path.dirname(os.path.dirname(spec.origin)) - # just a normal module - else: - return os.path.dirname(spec.origin) - - # we were unable to find the `package_path` using PEP 451 loaders - loader = pkgutil.get_loader(root_mod_name) - if loader is None or root_mod_name == "__main__": - # import name is not found, or interactive/main module - return os.getcwd() - else: - # For .egg, zipimporter does not have get_filename until Python 2.7. - if hasattr(loader, "get_filename"): - filename = loader.get_filename(root_mod_name) - elif hasattr(loader, "archive"): - # zipimporter's loader.archive points to the .egg or .zip - # archive filename is dropped in call to dirname below. - filename = loader.archive - else: - # At least one loader is missing both get_filename and archive: - # Google App Engine's HardenedModulesHook - # - # Fall back to imports. - __import__(root_mod_name) - filename = sys.modules[root_mod_name].__file__ - package_path = os.path.abspath(os.path.dirname(filename)) - - # In case the root module is a package we need to chop of the - # rightmost part. This needs to go through a helper function - # because of python 3.3 namespace packages. - if _matching_loader_thinks_module_is_package(loader, root_mod_name): - package_path = os.path.dirname(package_path) - - return package_path - - -def find_package(import_name): - """Finds a package and returns the prefix (or None if the package is - not installed) as well as the folder that contains the package or - module as a tuple. The package path returned is the module that would - have to be added to the pythonpath in order to make it possible to - import the module. The prefix is the path below which a UNIX like - folder structure exists (lib, share etc.). - """ - root_mod_name, _, _ = import_name.partition(".") - package_path = _find_package_path(root_mod_name) - site_parent, site_folder = os.path.split(package_path) - py_prefix = os.path.abspath(sys.prefix) - if package_path.startswith(py_prefix): - return py_prefix, package_path - elif site_folder.lower() == "site-packages": - parent, folder = os.path.split(site_parent) - # Windows like installations - if folder.lower() == "lib": - base_dir = parent - # UNIX like installations - elif os.path.basename(parent).lower() == "lib": - base_dir = os.path.dirname(parent) - else: - base_dir = site_parent - return base_dir, package_path - return None, package_path - - -class locked_cached_property(object): - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value. Works like the one in Werkzeug but has a lock for - thread safety. - """ - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - self.lock = RLock() - - def __get__(self, obj, type=None): - if obj is None: - return self - with self.lock: - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -class _PackageBoundObject(object): - #: The name of the package or module that this app belongs to. Do not - #: change this once it is set by the constructor. - import_name = None - - #: Location of the template files to be added to the template lookup. - #: ``None`` if templates should not be added. - template_folder = None - - #: Absolute path to the package on the filesystem. Used to look up - #: resources contained in the package. - root_path = None - - def __init__(self, import_name, template_folder=None, root_path=None): - self.import_name = import_name - self.template_folder = template_folder - - if root_path is None: - root_path = get_root_path(self.import_name) - - self.root_path = root_path - self._static_folder = None - self._static_url_path = None - - # circular import - from .cli import AppGroup - - #: The Click command group for registration of CLI commands - #: on the application and associated blueprints. These commands - #: are accessible via the :command:`flask` command once the - #: application has been discovered and blueprints registered. - self.cli = AppGroup() - - @property - def static_folder(self): - """The absolute path to the configured static folder.""" - if self._static_folder is not None: - return os.path.join(self.root_path, self._static_folder) - - @static_folder.setter - def static_folder(self, value): - self._static_folder = value - - @property - def static_url_path(self): - """The URL prefix that the static route will be accessible from. - - If it was not configured during init, it is derived from - :attr:`static_folder`. - """ - if self._static_url_path is not None: - return self._static_url_path - - if self.static_folder is not None: - basename = os.path.basename(self.static_folder) - return ("/" + basename).rstrip("/") - - @static_url_path.setter - def static_url_path(self, value): - if value is not None: - value = value.rstrip("/") - - self._static_url_path = value - - @property - def has_static_folder(self): - """This is ``True`` if the package bound object's container has a - folder for static files. - - .. versionadded:: 0.5 - """ - return self.static_folder is not None - - @locked_cached_property - def jinja_loader(self): - """The Jinja loader for this package bound object. - - .. versionadded:: 0.5 - """ - if self.template_folder is not None: - return FileSystemLoader(os.path.join(self.root_path, self.template_folder)) - - def get_send_file_max_age(self, filename): - """Provides default cache_timeout for the :func:`send_file` functions. - - By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from - the configuration of :data:`~flask.current_app`. - - Static file functions such as :func:`send_from_directory` use this - function, and :func:`send_file` calls this function on - :data:`~flask.current_app` when the given cache_timeout is ``None``. If a - cache_timeout is given in :func:`send_file`, that timeout is used; - otherwise, this method is called. - - This allows subclasses to change the behavior when sending files based - on the filename. For example, to set the cache timeout for .js files - to 60 seconds:: - - class MyFlask(flask.Flask): - def get_send_file_max_age(self, name): - if name.lower().endswith('.js'): - return 60 - return flask.Flask.get_send_file_max_age(self, name) - - .. versionadded:: 0.9 - """ - return total_seconds(current_app.send_file_max_age_default) - - def send_static_file(self, filename): - """Function used internally to send static files from the static - folder to the browser. - - .. versionadded:: 0.5 - """ - if not self.has_static_folder: - raise RuntimeError("No static folder for this object") - # Ensure get_send_file_max_age is called in all cases. - # Here, we ensure get_send_file_max_age is called for Blueprints. - cache_timeout = self.get_send_file_max_age(filename) - return send_from_directory( - self.static_folder, filename, cache_timeout=cache_timeout - ) - - def open_resource(self, resource, mode="rb"): - """Opens a resource from the application's resource folder. To see - how this works, consider the following folder structure:: - - /myapplication.py - /schema.sql - /static - /style.css - /templates - /layout.html - /index.html - - If you want to open the :file:`schema.sql` file you would do the - following:: - - with app.open_resource('schema.sql') as f: - contents = f.read() - do_something_with(contents) - - :param resource: the name of the resource. To access resources within - subfolders use forward slashes as separator. - :param mode: Open file in this mode. Only reading is supported, - valid values are "r" (or "rt") and "rb". - """ - if mode not in {"r", "rt", "rb"}: - raise ValueError("Resources can only be opened for reading") - - return open(os.path.join(self.root_path, resource), mode) - - -def total_seconds(td): - """Returns the total seconds from a timedelta object. - - :param timedelta td: the timedelta to be converted in seconds - - :returns: number of seconds - :rtype: int - """ - return td.days * 60 * 60 * 24 + td.seconds - - -def is_ip(value): - """Determine if the given string is an IP address. - - Python 2 on Windows doesn't provide ``inet_pton``, so this only - checks IPv4 addresses in that environment. - - :param value: value to check - :type value: str - - :return: True if string is an IP address - :rtype: bool - """ - if PY2 and os.name == "nt": - try: - socket.inet_aton(value) - return True - except socket.error: - return False - - for family in (socket.AF_INET, socket.AF_INET6): - try: - socket.inet_pton(family, value) - except socket.error: - pass - else: - return True - - return False diff --git a/venv/lib/python3.7/site-packages/flask/json/__init__.py b/venv/lib/python3.7/site-packages/flask/json/__init__.py deleted file mode 100644 index a141068..0000000 --- a/venv/lib/python3.7/site-packages/flask/json/__init__.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -""" -flask.json -~~~~~~~~~~ - -:copyright: 2010 Pallets -:license: BSD-3-Clause -""" -import codecs -import io -import uuid -from datetime import date -from datetime import datetime - -from itsdangerous import json as _json -from jinja2 import Markup -from werkzeug.http import http_date - -from .._compat import PY2 -from .._compat import text_type -from ..globals import current_app -from ..globals import request - -try: - import dataclasses -except ImportError: - dataclasses = None - -# Figure out if simplejson escapes slashes. This behavior was changed -# from one version to another without reason. -_slash_escape = "\\/" not in _json.dumps("/") - - -__all__ = [ - "dump", - "dumps", - "load", - "loads", - "htmlsafe_dump", - "htmlsafe_dumps", - "JSONDecoder", - "JSONEncoder", - "jsonify", -] - - -def _wrap_reader_for_text(fp, encoding): - if isinstance(fp.read(0), bytes): - fp = io.TextIOWrapper(io.BufferedReader(fp), encoding) - return fp - - -def _wrap_writer_for_text(fp, encoding): - try: - fp.write("") - except TypeError: - fp = io.TextIOWrapper(fp, encoding) - return fp - - -class JSONEncoder(_json.JSONEncoder): - """The default Flask JSON encoder. This one extends the default - encoder by also supporting ``datetime``, ``UUID``, ``dataclasses``, - and ``Markup`` objects. - - ``datetime`` objects are serialized as RFC 822 datetime strings. - This is the same as the HTTP date format. - - In order to support more data types, override the :meth:`default` - method. - """ - - def default(self, o): - """Implement this method in a subclass such that it returns a - serializable object for ``o``, or calls the base implementation (to - raise a :exc:`TypeError`). - - For example, to support arbitrary iterators, you could implement - default like this:: - - def default(self, o): - try: - iterable = iter(o) - except TypeError: - pass - else: - return list(iterable) - return JSONEncoder.default(self, o) - """ - if isinstance(o, datetime): - return http_date(o.utctimetuple()) - if isinstance(o, date): - return http_date(o.timetuple()) - if isinstance(o, uuid.UUID): - return str(o) - if dataclasses and dataclasses.is_dataclass(o): - return dataclasses.asdict(o) - if hasattr(o, "__html__"): - return text_type(o.__html__()) - return _json.JSONEncoder.default(self, o) - - -class JSONDecoder(_json.JSONDecoder): - """The default JSON decoder. This one does not change the behavior from - the default simplejson decoder. Consult the :mod:`json` documentation - for more information. This decoder is not only used for the load - functions of this module but also :attr:`~flask.Request`. - """ - - -def _dump_arg_defaults(kwargs, app=None): - """Inject default arguments for dump functions.""" - if app is None: - app = current_app - - if app: - bp = app.blueprints.get(request.blueprint) if request else None - kwargs.setdefault( - "cls", bp.json_encoder if bp and bp.json_encoder else app.json_encoder - ) - - if not app.config["JSON_AS_ASCII"]: - kwargs.setdefault("ensure_ascii", False) - - kwargs.setdefault("sort_keys", app.config["JSON_SORT_KEYS"]) - else: - kwargs.setdefault("sort_keys", True) - kwargs.setdefault("cls", JSONEncoder) - - -def _load_arg_defaults(kwargs, app=None): - """Inject default arguments for load functions.""" - if app is None: - app = current_app - - if app: - bp = app.blueprints.get(request.blueprint) if request else None - kwargs.setdefault( - "cls", bp.json_decoder if bp and bp.json_decoder else app.json_decoder - ) - else: - kwargs.setdefault("cls", JSONDecoder) - - -def detect_encoding(data): - """Detect which UTF codec was used to encode the given bytes. - - The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is - accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big - or little endian. Some editors or libraries may prepend a BOM. - - :param data: Bytes in unknown UTF encoding. - :return: UTF encoding name - """ - head = data[:4] - - if head[:3] == codecs.BOM_UTF8: - return "utf-8-sig" - - if b"\x00" not in head: - return "utf-8" - - if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE): - return "utf-32" - - if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): - return "utf-16" - - if len(head) == 4: - if head[:3] == b"\x00\x00\x00": - return "utf-32-be" - - if head[::2] == b"\x00\x00": - return "utf-16-be" - - if head[1:] == b"\x00\x00\x00": - return "utf-32-le" - - if head[1::2] == b"\x00\x00": - return "utf-16-le" - - if len(head) == 2: - return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le" - - return "utf-8" - - -def dumps(obj, app=None, **kwargs): - """Serialize ``obj`` to a JSON-formatted string. If there is an - app context pushed, use the current app's configured encoder - (:attr:`~flask.Flask.json_encoder`), or fall back to the default - :class:`JSONEncoder`. - - Takes the same arguments as the built-in :func:`json.dumps`, and - does some extra configuration based on the application. If the - simplejson package is installed, it is preferred. - - :param obj: Object to serialize to JSON. - :param app: App instance to use to configure the JSON encoder. - Uses ``current_app`` if not given, and falls back to the default - encoder when not in an app context. - :param kwargs: Extra arguments passed to :func:`json.dumps`. - - .. versionchanged:: 1.0.3 - - ``app`` can be passed directly, rather than requiring an app - context for configuration. - """ - _dump_arg_defaults(kwargs, app=app) - encoding = kwargs.pop("encoding", None) - rv = _json.dumps(obj, **kwargs) - if encoding is not None and isinstance(rv, text_type): - rv = rv.encode(encoding) - return rv - - -def dump(obj, fp, app=None, **kwargs): - """Like :func:`dumps` but writes into a file object.""" - _dump_arg_defaults(kwargs, app=app) - encoding = kwargs.pop("encoding", None) - if encoding is not None: - fp = _wrap_writer_for_text(fp, encoding) - _json.dump(obj, fp, **kwargs) - - -def loads(s, app=None, **kwargs): - """Deserialize an object from a JSON-formatted string ``s``. If - there is an app context pushed, use the current app's configured - decoder (:attr:`~flask.Flask.json_decoder`), or fall back to the - default :class:`JSONDecoder`. - - Takes the same arguments as the built-in :func:`json.loads`, and - does some extra configuration based on the application. If the - simplejson package is installed, it is preferred. - - :param s: JSON string to deserialize. - :param app: App instance to use to configure the JSON decoder. - Uses ``current_app`` if not given, and falls back to the default - encoder when not in an app context. - :param kwargs: Extra arguments passed to :func:`json.dumps`. - - .. versionchanged:: 1.0.3 - - ``app`` can be passed directly, rather than requiring an app - context for configuration. - """ - _load_arg_defaults(kwargs, app=app) - if isinstance(s, bytes): - encoding = kwargs.pop("encoding", None) - if encoding is None: - encoding = detect_encoding(s) - s = s.decode(encoding) - return _json.loads(s, **kwargs) - - -def load(fp, app=None, **kwargs): - """Like :func:`loads` but reads from a file object.""" - _load_arg_defaults(kwargs, app=app) - if not PY2: - fp = _wrap_reader_for_text(fp, kwargs.pop("encoding", None) or "utf-8") - return _json.load(fp, **kwargs) - - -def htmlsafe_dumps(obj, **kwargs): - """Works exactly like :func:`dumps` but is safe for use in `` - - - - -
-""" -FOOTER = u"""\ - -
- -
-
-

Console Locked

-

- The console is locked and needs to be unlocked by entering the PIN. - You can find the PIN printed out on the standard output of your - shell that runs the server. -

-

PIN: - - -

-
-
- - -""" - -PAGE_HTML = ( - HEADER - + u"""\ -

%(exception_type)s

-
-

%(exception)s

-
-

Traceback (most recent call last)

-%(summary)s -
-
-

- - This is the Copy/Paste friendly version of the traceback. You can also paste this traceback into - a gist: - -

- -
-
-
- The debugger caught an exception in your WSGI application. You can now - look at the traceback which led to the error. - If you enable JavaScript you can also use additional features such as code - execution (if the evalex feature is enabled), automatic pasting of the - exceptions and much more. -
-""" - + FOOTER - + """ - -""" -) - -CONSOLE_HTML = ( - HEADER - + u"""\ -

Interactive Console

-
-In this console you can execute Python expressions in the context of the -application. The initial namespace was created by the debugger automatically. -
-
The Console requires JavaScript.
-""" - + FOOTER -) - -SUMMARY_HTML = u"""\ -
- %(title)s -
    %(frames)s
- %(description)s -
-""" - -FRAME_HTML = u"""\ -
-

File "%(filename)s", - line %(lineno)s, - in %(function_name)s

-
%(lines)s
-
-""" - -SOURCE_LINE_HTML = u"""\ - - %(lineno)s - %(code)s - -""" - - -def render_console_html(secret, evalex_trusted=True): - return CONSOLE_HTML % { - "evalex": "true", - "evalex_trusted": "true" if evalex_trusted else "false", - "console": "true", - "title": "Console", - "secret": secret, - "traceback_id": -1, - } - - -def get_current_traceback( - ignore_system_exceptions=False, show_hidden_frames=False, skip=0 -): - """Get the current exception info as `Traceback` object. Per default - calling this method will reraise system exceptions such as generator exit, - system exit or others. This behavior can be disabled by passing `False` - to the function as first parameter. - """ - exc_type, exc_value, tb = sys.exc_info() - if ignore_system_exceptions and exc_type in system_exceptions: - reraise(exc_type, exc_value, tb) - for _ in range_type(skip): - if tb.tb_next is None: - break - tb = tb.tb_next - tb = Traceback(exc_type, exc_value, tb) - if not show_hidden_frames: - tb.filter_hidden_frames() - return tb - - -class Line(object): - """Helper for the source renderer.""" - - __slots__ = ("lineno", "code", "in_frame", "current") - - def __init__(self, lineno, code): - self.lineno = lineno - self.code = code - self.in_frame = False - self.current = False - - @property - def classes(self): - rv = ["line"] - if self.in_frame: - rv.append("in-frame") - if self.current: - rv.append("current") - return rv - - def render(self): - return SOURCE_LINE_HTML % { - "classes": u" ".join(self.classes), - "lineno": self.lineno, - "code": escape(self.code), - } - - -class Traceback(object): - """Wraps a traceback.""" - - def __init__(self, exc_type, exc_value, tb): - self.exc_type = exc_type - self.exc_value = exc_value - self.tb = tb - - exception_type = exc_type.__name__ - if exc_type.__module__ not in {"builtins", "__builtin__", "exceptions"}: - exception_type = exc_type.__module__ + "." + exception_type - self.exception_type = exception_type - - self.groups = [] - memo = set() - while True: - self.groups.append(Group(exc_type, exc_value, tb)) - memo.add(id(exc_value)) - if PY2: - break - exc_value = exc_value.__cause__ or exc_value.__context__ - if exc_value is None or id(exc_value) in memo: - break - exc_type = type(exc_value) - tb = exc_value.__traceback__ - self.groups.reverse() - self.frames = [frame for group in self.groups for frame in group.frames] - - def filter_hidden_frames(self): - """Remove the frames according to the paste spec.""" - for group in self.groups: - group.filter_hidden_frames() - - self.frames[:] = [frame for group in self.groups for frame in group.frames] - - @property - def is_syntax_error(self): - """Is it a syntax error?""" - return isinstance(self.exc_value, SyntaxError) - - @property - def exception(self): - """String representation of the final exception.""" - return self.groups[-1].exception - - def log(self, logfile=None): - """Log the ASCII traceback into a file object.""" - if logfile is None: - logfile = sys.stderr - tb = self.plaintext.rstrip() + u"\n" - logfile.write(to_native(tb, "utf-8", "replace")) - - def paste(self): - """Create a paste and return the paste id.""" - data = json.dumps( - { - "description": "Werkzeug Internal Server Error", - "public": False, - "files": {"traceback.txt": {"content": self.plaintext}}, - } - ).encode("utf-8") - try: - from urllib2 import urlopen - except ImportError: - from urllib.request import urlopen - rv = urlopen("https://api.github.com/gists", data=data) - resp = json.loads(rv.read().decode("utf-8")) - rv.close() - return {"url": resp["html_url"], "id": resp["id"]} - - def render_summary(self, include_title=True): - """Render the traceback for the interactive console.""" - title = "" - classes = ["traceback"] - if not self.frames: - classes.append("noframe-traceback") - frames = [] - else: - library_frames = sum(frame.is_library for frame in self.frames) - mark_lib = 0 < library_frames < len(self.frames) - frames = [group.render(mark_lib=mark_lib) for group in self.groups] - - if include_title: - if self.is_syntax_error: - title = u"Syntax Error" - else: - title = u"Traceback (most recent call last):" - - if self.is_syntax_error: - description_wrapper = u"
%s
" - else: - description_wrapper = u"
%s
" - - return SUMMARY_HTML % { - "classes": u" ".join(classes), - "title": u"

%s

" % title if title else u"", - "frames": u"\n".join(frames), - "description": description_wrapper % escape(self.exception), - } - - def render_full(self, evalex=False, secret=None, evalex_trusted=True): - """Render the Full HTML page with the traceback info.""" - exc = escape(self.exception) - return PAGE_HTML % { - "evalex": "true" if evalex else "false", - "evalex_trusted": "true" if evalex_trusted else "false", - "console": "false", - "title": exc, - "exception": exc, - "exception_type": escape(self.exception_type), - "summary": self.render_summary(include_title=False), - "plaintext": escape(self.plaintext), - "plaintext_cs": re.sub("-{2,}", "-", self.plaintext), - "traceback_id": self.id, - "secret": secret, - } - - @cached_property - def plaintext(self): - return u"\n".join([group.render_text() for group in self.groups]) - - @property - def id(self): - return id(self) - - -class Group(object): - """A group of frames for an exception in a traceback. On Python 3, - if the exception has a ``__cause__`` or ``__context__``, there are - multiple exception groups. - """ - - def __init__(self, exc_type, exc_value, tb): - self.exc_type = exc_type - self.exc_value = exc_value - self.info = None - if not PY2: - if exc_value.__cause__ is not None: - self.info = ( - u"The above exception was the direct cause of the" - u" following exception" - ) - elif exc_value.__context__ is not None: - self.info = ( - u"During handling of the above exception, another" - u" exception occurred" - ) - - self.frames = [] - while tb is not None: - self.frames.append(Frame(exc_type, exc_value, tb)) - tb = tb.tb_next - - def filter_hidden_frames(self): - new_frames = [] - hidden = False - - for frame in self.frames: - hide = frame.hide - if hide in ("before", "before_and_this"): - new_frames = [] - hidden = False - if hide == "before_and_this": - continue - elif hide in ("reset", "reset_and_this"): - hidden = False - if hide == "reset_and_this": - continue - elif hide in ("after", "after_and_this"): - hidden = True - if hide == "after_and_this": - continue - elif hide or hidden: - continue - new_frames.append(frame) - - # if we only have one frame and that frame is from the codeop - # module, remove it. - if len(new_frames) == 1 and self.frames[0].module == "codeop": - del self.frames[:] - - # if the last frame is missing something went terrible wrong :( - elif self.frames[-1] in new_frames: - self.frames[:] = new_frames - - @property - def exception(self): - """String representation of the exception.""" - buf = traceback.format_exception_only(self.exc_type, self.exc_value) - rv = "".join(buf).strip() - return to_unicode(rv, "utf-8", "replace") - - def render(self, mark_lib=True): - out = [] - if self.info is not None: - out.append(u'
  • %s:
    ' % self.info) - for frame in self.frames: - out.append( - u"%s" - % ( - u' title="%s"' % escape(frame.info) if frame.info else u"", - frame.render(mark_lib=mark_lib), - ) - ) - return u"\n".join(out) - - def render_text(self): - out = [] - if self.info is not None: - out.append(u"\n%s:\n" % self.info) - out.append(u"Traceback (most recent call last):") - for frame in self.frames: - out.append(frame.render_text()) - out.append(self.exception) - return u"\n".join(out) - - -class Frame(object): - """A single frame in a traceback.""" - - def __init__(self, exc_type, exc_value, tb): - self.lineno = tb.tb_lineno - self.function_name = tb.tb_frame.f_code.co_name - self.locals = tb.tb_frame.f_locals - self.globals = tb.tb_frame.f_globals - - fn = inspect.getsourcefile(tb) or inspect.getfile(tb) - if fn[-4:] in (".pyo", ".pyc"): - fn = fn[:-1] - # if it's a file on the file system resolve the real filename. - if os.path.isfile(fn): - fn = os.path.realpath(fn) - self.filename = to_unicode(fn, get_filesystem_encoding()) - self.module = self.globals.get("__name__", self.locals.get("__name__")) - self.loader = self.globals.get("__loader__", self.locals.get("__loader__")) - self.code = tb.tb_frame.f_code - - # support for paste's traceback extensions - self.hide = self.locals.get("__traceback_hide__", False) - info = self.locals.get("__traceback_info__") - if info is not None: - info = to_unicode(info, "utf-8", "replace") - self.info = info - - def render(self, mark_lib=True): - """Render a single frame in a traceback.""" - return FRAME_HTML % { - "id": self.id, - "filename": escape(self.filename), - "lineno": self.lineno, - "function_name": escape(self.function_name), - "lines": self.render_line_context(), - "library": "library" if mark_lib and self.is_library else "", - } - - @cached_property - def is_library(self): - return any( - self.filename.startswith(path) for path in sysconfig.get_paths().values() - ) - - def render_text(self): - return u' File "%s", line %s, in %s\n %s' % ( - self.filename, - self.lineno, - self.function_name, - self.current_line.strip(), - ) - - def render_line_context(self): - before, current, after = self.get_context_lines() - rv = [] - - def render_line(line, cls): - line = line.expandtabs().rstrip() - stripped_line = line.strip() - prefix = len(line) - len(stripped_line) - rv.append( - '
    %s%s
    ' - % (cls, " " * prefix, escape(stripped_line) or " ") - ) - - for line in before: - render_line(line, "before") - render_line(current, "current") - for line in after: - render_line(line, "after") - - return "\n".join(rv) - - def get_annotated_lines(self): - """Helper function that returns lines with extra information.""" - lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)] - - # find function definition and mark lines - if hasattr(self.code, "co_firstlineno"): - lineno = self.code.co_firstlineno - 1 - while lineno > 0: - if _funcdef_re.match(lines[lineno].code): - break - lineno -= 1 - try: - offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]])) - except TokenError: - offset = 0 - for line in lines[lineno : lineno + offset]: - line.in_frame = True - - # mark current line - try: - lines[self.lineno - 1].current = True - except IndexError: - pass - - return lines - - def eval(self, code, mode="single"): - """Evaluate code in the context of the frame.""" - if isinstance(code, string_types): - if PY2 and isinstance(code, text_type): # noqa - code = UTF8_COOKIE + code.encode("utf-8") - code = compile(code, "", mode) - return eval(code, self.globals, self.locals) - - @cached_property - def sourcelines(self): - """The sourcecode of the file as list of unicode strings.""" - # get sourcecode from loader or file - source = None - if self.loader is not None: - try: - if hasattr(self.loader, "get_source"): - source = self.loader.get_source(self.module) - elif hasattr(self.loader, "get_source_by_code"): - source = self.loader.get_source_by_code(self.code) - except Exception: - # we munch the exception so that we don't cause troubles - # if the loader is broken. - pass - - if source is None: - try: - with open( - to_native(self.filename, get_filesystem_encoding()), mode="rb" - ) as f: - source = f.read() - except IOError: - return [] - - # already unicode? return right away - if isinstance(source, text_type): - return source.splitlines() - - # yes. it should be ascii, but we don't want to reject too many - # characters in the debugger if something breaks - charset = "utf-8" - if source.startswith(UTF8_COOKIE): - source = source[3:] - else: - for idx, match in enumerate(_line_re.finditer(source)): - match = _coding_re.search(match.group()) - if match is not None: - charset = match.group(1) - break - if idx > 1: - break - - # on broken cookies we fall back to utf-8 too - charset = to_native(charset) - try: - codecs.lookup(charset) - except LookupError: - charset = "utf-8" - - return source.decode(charset, "replace").splitlines() - - def get_context_lines(self, context=5): - before = self.sourcelines[self.lineno - context - 1 : self.lineno - 1] - past = self.sourcelines[self.lineno : self.lineno + context] - return (before, self.current_line, past) - - @property - def current_line(self): - try: - return self.sourcelines[self.lineno - 1] - except IndexError: - return u"" - - @cached_property - def console(self): - return Console(self.globals, self.locals) - - @property - def id(self): - return id(self) diff --git a/venv/lib/python3.7/site-packages/werkzeug/exceptions.py b/venv/lib/python3.7/site-packages/werkzeug/exceptions.py deleted file mode 100644 index 82e99c2..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/exceptions.py +++ /dev/null @@ -1,829 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.exceptions - ~~~~~~~~~~~~~~~~~~~ - - This module implements a number of Python exceptions you can raise from - within your views to trigger a standard non-200 response. - - - Usage Example - ------------- - - :: - - from werkzeug.wrappers import BaseRequest - from werkzeug.wsgi import responder - from werkzeug.exceptions import HTTPException, NotFound - - def view(request): - raise NotFound() - - @responder - def application(environ, start_response): - request = BaseRequest(environ) - try: - return view(request) - except HTTPException as e: - return e - - - As you can see from this example those exceptions are callable WSGI - applications. Because of Python 2.4 compatibility those do not extend - from the response objects but only from the python exception class. - - As a matter of fact they are not Werkzeug response objects. However you - can get a response object by calling ``get_response()`` on a HTTP - exception. - - Keep in mind that you have to pass an environment to ``get_response()`` - because some errors fetch additional information from the WSGI - environment. - - If you want to hook in a different exception page to say, a 404 status - code, you can add a second except for a specific subclass of an error:: - - @responder - def application(environ, start_response): - request = BaseRequest(environ) - try: - return view(request) - except NotFound, e: - return not_found(request) - except HTTPException, e: - return e - - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import sys -from datetime import datetime - -from ._compat import implements_to_string -from ._compat import integer_types -from ._compat import iteritems -from ._compat import text_type -from ._internal import _get_environ -from .utils import escape - - -@implements_to_string -class HTTPException(Exception): - """Baseclass for all HTTP exceptions. This exception can be called as WSGI - application to render a default error page or you can catch the subclasses - of it independently and render nicer error messages. - """ - - code = None - description = None - - def __init__(self, description=None, response=None): - super(HTTPException, self).__init__() - if description is not None: - self.description = description - self.response = response - - @classmethod - def wrap(cls, exception, name=None): - """Create an exception that is a subclass of the calling HTTP - exception and the ``exception`` argument. - - The first argument to the class will be passed to the - wrapped ``exception``, the rest to the HTTP exception. If - ``e.args`` is not empty and ``e.show_exception`` is ``True``, - the wrapped exception message is added to the HTTP error - description. - - .. versionchanged:: 0.15.5 - The ``show_exception`` attribute controls whether the - description includes the wrapped exception message. - - .. versionchanged:: 0.15.0 - The description includes the wrapped exception message. - """ - - class newcls(cls, exception): - _description = cls.description - show_exception = False - - def __init__(self, arg=None, *args, **kwargs): - super(cls, self).__init__(*args, **kwargs) - - if arg is None: - exception.__init__(self) - else: - exception.__init__(self, arg) - - @property - def description(self): - if self.show_exception: - return "{}\n{}: {}".format( - self._description, exception.__name__, exception.__str__(self) - ) - - return self._description - - @description.setter - def description(self, value): - self._description = value - - newcls.__module__ = sys._getframe(1).f_globals.get("__name__") - name = name or cls.__name__ + exception.__name__ - newcls.__name__ = newcls.__qualname__ = name - return newcls - - @property - def name(self): - """The status name.""" - from .http import HTTP_STATUS_CODES - - return HTTP_STATUS_CODES.get(self.code, "Unknown Error") - - def get_description(self, environ=None): - """Get the description.""" - return u"

    %s

    " % escape(self.description).replace("\n", "
    ") - - def get_body(self, environ=None): - """Get the HTML body.""" - return text_type( - ( - u'\n' - u"%(code)s %(name)s\n" - u"

    %(name)s

    \n" - u"%(description)s\n" - ) - % { - "code": self.code, - "name": escape(self.name), - "description": self.get_description(environ), - } - ) - - def get_headers(self, environ=None): - """Get a list of headers.""" - return [("Content-Type", "text/html; charset=utf-8")] - - def get_response(self, environ=None): - """Get a response object. If one was passed to the exception - it's returned directly. - - :param environ: the optional environ for the request. This - can be used to modify the response depending - on how the request looked like. - :return: a :class:`Response` object or a subclass thereof. - """ - from .wrappers.response import Response - - if self.response is not None: - return self.response - if environ is not None: - environ = _get_environ(environ) - headers = self.get_headers(environ) - return Response(self.get_body(environ), self.code, headers) - - def __call__(self, environ, start_response): - """Call the exception as WSGI application. - - :param environ: the WSGI environment. - :param start_response: the response callable provided by the WSGI - server. - """ - response = self.get_response(environ) - return response(environ, start_response) - - def __str__(self): - code = self.code if self.code is not None else "???" - return "%s %s: %s" % (code, self.name, self.description) - - def __repr__(self): - code = self.code if self.code is not None else "???" - return "<%s '%s: %s'>" % (self.__class__.__name__, code, self.name) - - -class BadRequest(HTTPException): - """*400* `Bad Request` - - Raise if the browser sends something to the application the application - or server cannot handle. - """ - - code = 400 - description = ( - "The browser (or proxy) sent a request that this server could " - "not understand." - ) - - -class ClientDisconnected(BadRequest): - """Internal exception that is raised if Werkzeug detects a disconnected - client. Since the client is already gone at that point attempting to - send the error message to the client might not work and might ultimately - result in another exception in the server. Mainly this is here so that - it is silenced by default as far as Werkzeug is concerned. - - Since disconnections cannot be reliably detected and are unspecified - by WSGI to a large extent this might or might not be raised if a client - is gone. - - .. versionadded:: 0.8 - """ - - -class SecurityError(BadRequest): - """Raised if something triggers a security error. This is otherwise - exactly like a bad request error. - - .. versionadded:: 0.9 - """ - - -class BadHost(BadRequest): - """Raised if the submitted host is badly formatted. - - .. versionadded:: 0.11.2 - """ - - -class Unauthorized(HTTPException): - """*401* ``Unauthorized`` - - Raise if the user is not authorized to access a resource. - - The ``www_authenticate`` argument should be used to set the - ``WWW-Authenticate`` header. This is used for HTTP basic auth and - other schemes. Use :class:`~werkzeug.datastructures.WWWAuthenticate` - to create correctly formatted values. Strictly speaking a 401 - response is invalid if it doesn't provide at least one value for - this header, although real clients typically don't care. - - :param description: Override the default message used for the body - of the response. - :param www-authenticate: A single value, or list of values, for the - WWW-Authenticate header. - - .. versionchanged:: 0.15.3 - If the ``www_authenticate`` argument is not set, the - ``WWW-Authenticate`` header is not set. - - .. versionchanged:: 0.15.3 - The ``response`` argument was restored. - - .. versionchanged:: 0.15.1 - ``description`` was moved back as the first argument, restoring - its previous position. - - .. versionchanged:: 0.15.0 - ``www_authenticate`` was added as the first argument, ahead of - ``description``. - """ - - code = 401 - description = ( - "The server could not verify that you are authorized to access" - " the URL requested. You either supplied the wrong credentials" - " (e.g. a bad password), or your browser doesn't understand" - " how to supply the credentials required." - ) - - def __init__(self, description=None, response=None, www_authenticate=None): - HTTPException.__init__(self, description, response) - - if www_authenticate is not None: - if not isinstance(www_authenticate, (tuple, list)): - www_authenticate = (www_authenticate,) - - self.www_authenticate = www_authenticate - - def get_headers(self, environ=None): - headers = HTTPException.get_headers(self, environ) - if self.www_authenticate: - headers.append( - ("WWW-Authenticate", ", ".join([str(x) for x in self.www_authenticate])) - ) - return headers - - -class Forbidden(HTTPException): - """*403* `Forbidden` - - Raise if the user doesn't have the permission for the requested resource - but was authenticated. - """ - - code = 403 - description = ( - "You don't have the permission to access the requested" - " resource. It is either read-protected or not readable by the" - " server." - ) - - -class NotFound(HTTPException): - """*404* `Not Found` - - Raise if a resource does not exist and never existed. - """ - - code = 404 - description = ( - "The requested URL was not found on the server. If you entered" - " the URL manually please check your spelling and try again." - ) - - -class MethodNotAllowed(HTTPException): - """*405* `Method Not Allowed` - - Raise if the server used a method the resource does not handle. For - example `POST` if the resource is view only. Especially useful for REST. - - The first argument for this exception should be a list of allowed methods. - Strictly speaking the response would be invalid if you don't provide valid - methods in the header which you can do with that list. - """ - - code = 405 - description = "The method is not allowed for the requested URL." - - def __init__(self, valid_methods=None, description=None): - """Takes an optional list of valid http methods - starting with werkzeug 0.3 the list will be mandatory.""" - HTTPException.__init__(self, description) - self.valid_methods = valid_methods - - def get_headers(self, environ=None): - headers = HTTPException.get_headers(self, environ) - if self.valid_methods: - headers.append(("Allow", ", ".join(self.valid_methods))) - return headers - - -class NotAcceptable(HTTPException): - """*406* `Not Acceptable` - - Raise if the server can't return any content conforming to the - `Accept` headers of the client. - """ - - code = 406 - - description = ( - "The resource identified by the request is only capable of" - " generating response entities which have content" - " characteristics not acceptable according to the accept" - " headers sent in the request." - ) - - -class RequestTimeout(HTTPException): - """*408* `Request Timeout` - - Raise to signalize a timeout. - """ - - code = 408 - description = ( - "The server closed the network connection because the browser" - " didn't finish the request within the specified time." - ) - - -class Conflict(HTTPException): - """*409* `Conflict` - - Raise to signal that a request cannot be completed because it conflicts - with the current state on the server. - - .. versionadded:: 0.7 - """ - - code = 409 - description = ( - "A conflict happened while processing the request. The" - " resource might have been modified while the request was being" - " processed." - ) - - -class Gone(HTTPException): - """*410* `Gone` - - Raise if a resource existed previously and went away without new location. - """ - - code = 410 - description = ( - "The requested URL is no longer available on this server and" - " there is no forwarding address. If you followed a link from a" - " foreign page, please contact the author of this page." - ) - - -class LengthRequired(HTTPException): - """*411* `Length Required` - - Raise if the browser submitted data but no ``Content-Length`` header which - is required for the kind of processing the server does. - """ - - code = 411 - description = ( - "A request with this method requires a valid Content-" - "Length header." - ) - - -class PreconditionFailed(HTTPException): - """*412* `Precondition Failed` - - Status code used in combination with ``If-Match``, ``If-None-Match``, or - ``If-Unmodified-Since``. - """ - - code = 412 - description = ( - "The precondition on the request for the URL failed positive evaluation." - ) - - -class RequestEntityTooLarge(HTTPException): - """*413* `Request Entity Too Large` - - The status code one should return if the data submitted exceeded a given - limit. - """ - - code = 413 - description = "The data value transmitted exceeds the capacity limit." - - -class RequestURITooLarge(HTTPException): - """*414* `Request URI Too Large` - - Like *413* but for too long URLs. - """ - - code = 414 - description = ( - "The length of the requested URL exceeds the capacity limit for" - " this server. The request cannot be processed." - ) - - -class UnsupportedMediaType(HTTPException): - """*415* `Unsupported Media Type` - - The status code returned if the server is unable to handle the media type - the client transmitted. - """ - - code = 415 - description = ( - "The server does not support the media type transmitted in the request." - ) - - -class RequestedRangeNotSatisfiable(HTTPException): - """*416* `Requested Range Not Satisfiable` - - The client asked for an invalid part of the file. - - .. versionadded:: 0.7 - """ - - code = 416 - description = "The server cannot provide the requested range." - - def __init__(self, length=None, units="bytes", description=None): - """Takes an optional `Content-Range` header value based on ``length`` - parameter. - """ - HTTPException.__init__(self, description) - self.length = length - self.units = units - - def get_headers(self, environ=None): - headers = HTTPException.get_headers(self, environ) - if self.length is not None: - headers.append(("Content-Range", "%s */%d" % (self.units, self.length))) - return headers - - -class ExpectationFailed(HTTPException): - """*417* `Expectation Failed` - - The server cannot meet the requirements of the Expect request-header. - - .. versionadded:: 0.7 - """ - - code = 417 - description = "The server could not meet the requirements of the Expect header" - - -class ImATeapot(HTTPException): - """*418* `I'm a teapot` - - The server should return this if it is a teapot and someone attempted - to brew coffee with it. - - .. versionadded:: 0.7 - """ - - code = 418 - description = "This server is a teapot, not a coffee machine" - - -class UnprocessableEntity(HTTPException): - """*422* `Unprocessable Entity` - - Used if the request is well formed, but the instructions are otherwise - incorrect. - """ - - code = 422 - description = ( - "The request was well-formed but was unable to be followed due" - " to semantic errors." - ) - - -class Locked(HTTPException): - """*423* `Locked` - - Used if the resource that is being accessed is locked. - """ - - code = 423 - description = "The resource that is being accessed is locked." - - -class FailedDependency(HTTPException): - """*424* `Failed Dependency` - - Used if the method could not be performed on the resource - because the requested action depended on another action and that action failed. - """ - - code = 424 - description = ( - "The method could not be performed on the resource because the" - " requested action depended on another action and that action" - " failed." - ) - - -class PreconditionRequired(HTTPException): - """*428* `Precondition Required` - - The server requires this request to be conditional, typically to prevent - the lost update problem, which is a race condition between two or more - clients attempting to update a resource through PUT or DELETE. By requiring - each client to include a conditional header ("If-Match" or "If-Unmodified- - Since") with the proper value retained from a recent GET request, the - server ensures that each client has at least seen the previous revision of - the resource. - """ - - code = 428 - description = ( - "This request is required to be conditional; try using" - ' "If-Match" or "If-Unmodified-Since".' - ) - - -class _RetryAfter(HTTPException): - """Adds an optional ``retry_after`` parameter which will set the - ``Retry-After`` header. May be an :class:`int` number of seconds or - a :class:`~datetime.datetime`. - """ - - def __init__(self, description=None, response=None, retry_after=None): - super(_RetryAfter, self).__init__(description, response) - self.retry_after = retry_after - - def get_headers(self, environ=None): - headers = super(_RetryAfter, self).get_headers(environ) - - if self.retry_after: - if isinstance(self.retry_after, datetime): - from .http import http_date - - value = http_date(self.retry_after) - else: - value = str(self.retry_after) - - headers.append(("Retry-After", value)) - - return headers - - -class TooManyRequests(_RetryAfter): - """*429* `Too Many Requests` - - The server is limiting the rate at which this user receives - responses, and this request exceeds that rate. (The server may use - any convenient method to identify users and their request rates). - The server may include a "Retry-After" header to indicate how long - the user should wait before retrying. - - :param retry_after: If given, set the ``Retry-After`` header to this - value. May be an :class:`int` number of seconds or a - :class:`~datetime.datetime`. - - .. versionchanged:: 1.0 - Added ``retry_after`` parameter. - """ - - code = 429 - description = "This user has exceeded an allotted request count. Try again later." - - -class RequestHeaderFieldsTooLarge(HTTPException): - """*431* `Request Header Fields Too Large` - - The server refuses to process the request because the header fields are too - large. One or more individual fields may be too large, or the set of all - headers is too large. - """ - - code = 431 - description = "One or more header fields exceeds the maximum size." - - -class UnavailableForLegalReasons(HTTPException): - """*451* `Unavailable For Legal Reasons` - - This status code indicates that the server is denying access to the - resource as a consequence of a legal demand. - """ - - code = 451 - description = "Unavailable for legal reasons." - - -class InternalServerError(HTTPException): - """*500* `Internal Server Error` - - Raise if an internal server error occurred. This is a good fallback if an - unknown error occurred in the dispatcher. - - .. versionchanged:: 1.0.0 - Added the :attr:`original_exception` attribute. - """ - - code = 500 - description = ( - "The server encountered an internal error and was unable to" - " complete your request. Either the server is overloaded or" - " there is an error in the application." - ) - - def __init__(self, description=None, response=None, original_exception=None): - #: The original exception that caused this 500 error. Can be - #: used by frameworks to provide context when handling - #: unexpected errors. - self.original_exception = original_exception - super(InternalServerError, self).__init__( - description=description, response=response - ) - - -class NotImplemented(HTTPException): - """*501* `Not Implemented` - - Raise if the application does not support the action requested by the - browser. - """ - - code = 501 - description = "The server does not support the action requested by the browser." - - -class BadGateway(HTTPException): - """*502* `Bad Gateway` - - If you do proxying in your application you should return this status code - if you received an invalid response from the upstream server it accessed - in attempting to fulfill the request. - """ - - code = 502 - description = ( - "The proxy server received an invalid response from an upstream server." - ) - - -class ServiceUnavailable(_RetryAfter): - """*503* `Service Unavailable` - - Status code you should return if a service is temporarily - unavailable. - - :param retry_after: If given, set the ``Retry-After`` header to this - value. May be an :class:`int` number of seconds or a - :class:`~datetime.datetime`. - - .. versionchanged:: 1.0 - Added ``retry_after`` parameter. - """ - - code = 503 - description = ( - "The server is temporarily unable to service your request due" - " to maintenance downtime or capacity problems. Please try" - " again later." - ) - - -class GatewayTimeout(HTTPException): - """*504* `Gateway Timeout` - - Status code you should return if a connection to an upstream server - times out. - """ - - code = 504 - description = "The connection to an upstream server timed out." - - -class HTTPVersionNotSupported(HTTPException): - """*505* `HTTP Version Not Supported` - - The server does not support the HTTP protocol version used in the request. - """ - - code = 505 - description = ( - "The server does not support the HTTP protocol version used in the request." - ) - - -default_exceptions = {} -__all__ = ["HTTPException"] - - -def _find_exceptions(): - for _name, obj in iteritems(globals()): - try: - is_http_exception = issubclass(obj, HTTPException) - except TypeError: - is_http_exception = False - if not is_http_exception or obj.code is None: - continue - __all__.append(obj.__name__) - old_obj = default_exceptions.get(obj.code, None) - if old_obj is not None and issubclass(obj, old_obj): - continue - default_exceptions[obj.code] = obj - - -_find_exceptions() -del _find_exceptions - - -class Aborter(object): - """When passed a dict of code -> exception items it can be used as - callable that raises exceptions. If the first argument to the - callable is an integer it will be looked up in the mapping, if it's - a WSGI application it will be raised in a proxy exception. - - The rest of the arguments are forwarded to the exception constructor. - """ - - def __init__(self, mapping=None, extra=None): - if mapping is None: - mapping = default_exceptions - self.mapping = dict(mapping) - if extra is not None: - self.mapping.update(extra) - - def __call__(self, code, *args, **kwargs): - if not args and not kwargs and not isinstance(code, integer_types): - raise HTTPException(response=code) - if code not in self.mapping: - raise LookupError("no exception for %r" % code) - raise self.mapping[code](*args, **kwargs) - - -def abort(status, *args, **kwargs): - """Raises an :py:exc:`HTTPException` for the given status code or WSGI - application. - - If a status code is given, it will be looked up in the list of - exceptions and will raise that exception. If passed a WSGI application, - it will wrap it in a proxy WSGI exception and raise that:: - - abort(404) # 404 Not Found - abort(Response('Hello World')) - - """ - return _aborter(status, *args, **kwargs) - - -_aborter = Aborter() - -#: An exception that is used to signal both a :exc:`KeyError` and a -#: :exc:`BadRequest`. Used by many of the datastructures. -BadRequestKeyError = BadRequest.wrap(KeyError) diff --git a/venv/lib/python3.7/site-packages/werkzeug/filesystem.py b/venv/lib/python3.7/site-packages/werkzeug/filesystem.py deleted file mode 100644 index d016cae..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/filesystem.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.filesystem - ~~~~~~~~~~~~~~~~~~~ - - Various utilities for the local filesystem. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import codecs -import sys -import warnings - -# We do not trust traditional unixes. -has_likely_buggy_unicode_filesystem = ( - sys.platform.startswith("linux") or "bsd" in sys.platform -) - - -def _is_ascii_encoding(encoding): - """Given an encoding this figures out if the encoding is actually ASCII (which - is something we don't actually want in most cases). This is necessary - because ASCII comes under many names such as ANSI_X3.4-1968. - """ - if encoding is None: - return False - try: - return codecs.lookup(encoding).name == "ascii" - except LookupError: - return False - - -class BrokenFilesystemWarning(RuntimeWarning, UnicodeWarning): - """The warning used by Werkzeug to signal a broken filesystem. Will only be - used once per runtime.""" - - -_warned_about_filesystem_encoding = False - - -def get_filesystem_encoding(): - """Returns the filesystem encoding that should be used. Note that this is - different from the Python understanding of the filesystem encoding which - might be deeply flawed. Do not use this value against Python's unicode APIs - because it might be different. See :ref:`filesystem-encoding` for the exact - behavior. - - The concept of a filesystem encoding in generally is not something you - should rely on. As such if you ever need to use this function except for - writing wrapper code reconsider. - """ - global _warned_about_filesystem_encoding - rv = sys.getfilesystemencoding() - if has_likely_buggy_unicode_filesystem and not rv or _is_ascii_encoding(rv): - if not _warned_about_filesystem_encoding: - warnings.warn( - "Detected a misconfigured UNIX filesystem: Will use" - " UTF-8 as filesystem encoding instead of {0!r}".format(rv), - BrokenFilesystemWarning, - ) - _warned_about_filesystem_encoding = True - return "utf-8" - return rv diff --git a/venv/lib/python3.7/site-packages/werkzeug/formparser.py b/venv/lib/python3.7/site-packages/werkzeug/formparser.py deleted file mode 100644 index ffdb9b0..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/formparser.py +++ /dev/null @@ -1,584 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.formparser - ~~~~~~~~~~~~~~~~~~~ - - This module implements the form parsing. It supports url-encoded forms - as well as non-nested multipart uploads. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import codecs -import re -from functools import update_wrapper -from itertools import chain -from itertools import repeat -from itertools import tee - -from . import exceptions -from ._compat import BytesIO -from ._compat import text_type -from ._compat import to_native -from .datastructures import FileStorage -from .datastructures import Headers -from .datastructures import MultiDict -from .http import parse_options_header -from .urls import url_decode_stream -from .wsgi import get_content_length -from .wsgi import get_input_stream -from .wsgi import make_line_iter - -# there are some platforms where SpooledTemporaryFile is not available. -# In that case we need to provide a fallback. -try: - from tempfile import SpooledTemporaryFile -except ImportError: - from tempfile import TemporaryFile - - SpooledTemporaryFile = None - - -#: an iterator that yields empty strings -_empty_string_iter = repeat("") - -#: a regular expression for multipart boundaries -_multipart_boundary_re = re.compile("^[ -~]{0,200}[!-~]$") - -#: supported http encodings that are also available in python we support -#: for multipart messages. -_supported_multipart_encodings = frozenset(["base64", "quoted-printable"]) - - -def default_stream_factory( - total_content_length, filename, content_type, content_length=None -): - """The stream factory that is used per default.""" - max_size = 1024 * 500 - if SpooledTemporaryFile is not None: - return SpooledTemporaryFile(max_size=max_size, mode="wb+") - if total_content_length is None or total_content_length > max_size: - return TemporaryFile("wb+") - return BytesIO() - - -def parse_form_data( - environ, - stream_factory=None, - charset="utf-8", - errors="replace", - max_form_memory_size=None, - max_content_length=None, - cls=None, - silent=True, -): - """Parse the form data in the environ and return it as tuple in the form - ``(stream, form, files)``. You should only call this method if the - transport method is `POST`, `PUT`, or `PATCH`. - - If the mimetype of the data transmitted is `multipart/form-data` the - files multidict will be filled with `FileStorage` objects. If the - mimetype is unknown the input stream is wrapped and returned as first - argument, else the stream is empty. - - This is a shortcut for the common usage of :class:`FormDataParser`. - - Have a look at :ref:`dealing-with-request-data` for more details. - - .. versionadded:: 0.5 - The `max_form_memory_size`, `max_content_length` and - `cls` parameters were added. - - .. versionadded:: 0.5.1 - The optional `silent` flag was added. - - :param environ: the WSGI environment to be used for parsing. - :param stream_factory: An optional callable that returns a new read and - writeable file descriptor. This callable works - the same as :meth:`~BaseResponse._get_file_stream`. - :param charset: The character set for URL and url encoded form data. - :param errors: The encoding error behavior. - :param max_form_memory_size: the maximum number of bytes to be accepted for - in-memory stored form data. If the data - exceeds the value specified an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param max_content_length: If this is provided and the transmitted data - is longer than this value an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param silent: If set to False parsing errors will not be caught. - :return: A tuple in the form ``(stream, form, files)``. - """ - return FormDataParser( - stream_factory, - charset, - errors, - max_form_memory_size, - max_content_length, - cls, - silent, - ).parse_from_environ(environ) - - -def exhaust_stream(f): - """Helper decorator for methods that exhausts the stream on return.""" - - def wrapper(self, stream, *args, **kwargs): - try: - return f(self, stream, *args, **kwargs) - finally: - exhaust = getattr(stream, "exhaust", None) - if exhaust is not None: - exhaust() - else: - while 1: - chunk = stream.read(1024 * 64) - if not chunk: - break - - return update_wrapper(wrapper, f) - - -class FormDataParser(object): - """This class implements parsing of form data for Werkzeug. By itself - it can parse multipart and url encoded form data. It can be subclassed - and extended but for most mimetypes it is a better idea to use the - untouched stream and expose it as separate attributes on a request - object. - - .. versionadded:: 0.8 - - :param stream_factory: An optional callable that returns a new read and - writeable file descriptor. This callable works - the same as :meth:`~BaseResponse._get_file_stream`. - :param charset: The character set for URL and url encoded form data. - :param errors: The encoding error behavior. - :param max_form_memory_size: the maximum number of bytes to be accepted for - in-memory stored form data. If the data - exceeds the value specified an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param max_content_length: If this is provided and the transmitted data - is longer than this value an - :exc:`~exceptions.RequestEntityTooLarge` - exception is raised. - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param silent: If set to False parsing errors will not be caught. - """ - - def __init__( - self, - stream_factory=None, - charset="utf-8", - errors="replace", - max_form_memory_size=None, - max_content_length=None, - cls=None, - silent=True, - ): - if stream_factory is None: - stream_factory = default_stream_factory - self.stream_factory = stream_factory - self.charset = charset - self.errors = errors - self.max_form_memory_size = max_form_memory_size - self.max_content_length = max_content_length - if cls is None: - cls = MultiDict - self.cls = cls - self.silent = silent - - def get_parse_func(self, mimetype, options): - return self.parse_functions.get(mimetype) - - def parse_from_environ(self, environ): - """Parses the information from the environment as form data. - - :param environ: the WSGI environment to be used for parsing. - :return: A tuple in the form ``(stream, form, files)``. - """ - content_type = environ.get("CONTENT_TYPE", "") - content_length = get_content_length(environ) - mimetype, options = parse_options_header(content_type) - return self.parse(get_input_stream(environ), mimetype, content_length, options) - - def parse(self, stream, mimetype, content_length, options=None): - """Parses the information from the given stream, mimetype, - content length and mimetype parameters. - - :param stream: an input stream - :param mimetype: the mimetype of the data - :param content_length: the content length of the incoming data - :param options: optional mimetype parameters (used for - the multipart boundary for instance) - :return: A tuple in the form ``(stream, form, files)``. - """ - if ( - self.max_content_length is not None - and content_length is not None - and content_length > self.max_content_length - ): - raise exceptions.RequestEntityTooLarge() - if options is None: - options = {} - - parse_func = self.get_parse_func(mimetype, options) - if parse_func is not None: - try: - return parse_func(self, stream, mimetype, content_length, options) - except ValueError: - if not self.silent: - raise - - return stream, self.cls(), self.cls() - - @exhaust_stream - def _parse_multipart(self, stream, mimetype, content_length, options): - parser = MultiPartParser( - self.stream_factory, - self.charset, - self.errors, - max_form_memory_size=self.max_form_memory_size, - cls=self.cls, - ) - boundary = options.get("boundary") - if boundary is None: - raise ValueError("Missing boundary") - if isinstance(boundary, text_type): - boundary = boundary.encode("ascii") - form, files = parser.parse(stream, boundary, content_length) - return stream, form, files - - @exhaust_stream - def _parse_urlencoded(self, stream, mimetype, content_length, options): - if ( - self.max_form_memory_size is not None - and content_length is not None - and content_length > self.max_form_memory_size - ): - raise exceptions.RequestEntityTooLarge() - form = url_decode_stream(stream, self.charset, errors=self.errors, cls=self.cls) - return stream, form, self.cls() - - #: mapping of mimetypes to parsing functions - parse_functions = { - "multipart/form-data": _parse_multipart, - "application/x-www-form-urlencoded": _parse_urlencoded, - "application/x-url-encoded": _parse_urlencoded, - } - - -def is_valid_multipart_boundary(boundary): - """Checks if the string given is a valid multipart boundary.""" - return _multipart_boundary_re.match(boundary) is not None - - -def _line_parse(line): - """Removes line ending characters and returns a tuple (`stripped_line`, - `is_terminated`). - """ - if line[-2:] in ["\r\n", b"\r\n"]: - return line[:-2], True - elif line[-1:] in ["\r", "\n", b"\r", b"\n"]: - return line[:-1], True - return line, False - - -def parse_multipart_headers(iterable): - """Parses multipart headers from an iterable that yields lines (including - the trailing newline symbol). The iterable has to be newline terminated. - - The iterable will stop at the line where the headers ended so it can be - further consumed. - - :param iterable: iterable of strings that are newline terminated - """ - result = [] - for line in iterable: - line = to_native(line) - line, line_terminated = _line_parse(line) - if not line_terminated: - raise ValueError("unexpected end of line in multipart header") - if not line: - break - elif line[0] in " \t" and result: - key, value = result[-1] - result[-1] = (key, value + "\n " + line[1:]) - else: - parts = line.split(":", 1) - if len(parts) == 2: - result.append((parts[0].strip(), parts[1].strip())) - - # we link the list to the headers, no need to create a copy, the - # list was not shared anyways. - return Headers(result) - - -_begin_form = "begin_form" -_begin_file = "begin_file" -_cont = "cont" -_end = "end" - - -class MultiPartParser(object): - def __init__( - self, - stream_factory=None, - charset="utf-8", - errors="replace", - max_form_memory_size=None, - cls=None, - buffer_size=64 * 1024, - ): - self.charset = charset - self.errors = errors - self.max_form_memory_size = max_form_memory_size - self.stream_factory = ( - default_stream_factory if stream_factory is None else stream_factory - ) - self.cls = MultiDict if cls is None else cls - - # make sure the buffer size is divisible by four so that we can base64 - # decode chunk by chunk - assert buffer_size % 4 == 0, "buffer size has to be divisible by 4" - # also the buffer size has to be at least 1024 bytes long or long headers - # will freak out the system - assert buffer_size >= 1024, "buffer size has to be at least 1KB" - - self.buffer_size = buffer_size - - def _fix_ie_filename(self, filename): - """Internet Explorer 6 transmits the full file name if a file is - uploaded. This function strips the full path if it thinks the - filename is Windows-like absolute. - """ - if filename[1:3] == ":\\" or filename[:2] == "\\\\": - return filename.split("\\")[-1] - return filename - - def _find_terminator(self, iterator): - """The terminator might have some additional newlines before it. - There is at least one application that sends additional newlines - before headers (the python setuptools package). - """ - for line in iterator: - if not line: - break - line = line.strip() - if line: - return line - return b"" - - def fail(self, message): - raise ValueError(message) - - def get_part_encoding(self, headers): - transfer_encoding = headers.get("content-transfer-encoding") - if ( - transfer_encoding is not None - and transfer_encoding in _supported_multipart_encodings - ): - return transfer_encoding - - def get_part_charset(self, headers): - # Figure out input charset for current part - content_type = headers.get("content-type") - if content_type: - mimetype, ct_params = parse_options_header(content_type) - return ct_params.get("charset", self.charset) - return self.charset - - def start_file_streaming(self, filename, headers, total_content_length): - if isinstance(filename, bytes): - filename = filename.decode(self.charset, self.errors) - filename = self._fix_ie_filename(filename) - content_type = headers.get("content-type") - try: - content_length = int(headers["content-length"]) - except (KeyError, ValueError): - content_length = 0 - container = self.stream_factory( - total_content_length=total_content_length, - filename=filename, - content_type=content_type, - content_length=content_length, - ) - return filename, container - - def in_memory_threshold_reached(self, bytes): - raise exceptions.RequestEntityTooLarge() - - def validate_boundary(self, boundary): - if not boundary: - self.fail("Missing boundary") - if not is_valid_multipart_boundary(boundary): - self.fail("Invalid boundary: %s" % boundary) - if len(boundary) > self.buffer_size: # pragma: no cover - # this should never happen because we check for a minimum size - # of 1024 and boundaries may not be longer than 200. The only - # situation when this happens is for non debug builds where - # the assert is skipped. - self.fail("Boundary longer than buffer size") - - def parse_lines(self, file, boundary, content_length, cap_at_buffer=True): - """Generate parts of - ``('begin_form', (headers, name))`` - ``('begin_file', (headers, name, filename))`` - ``('cont', bytestring)`` - ``('end', None)`` - - Always obeys the grammar - parts = ( begin_form cont* end | - begin_file cont* end )* - """ - next_part = b"--" + boundary - last_part = next_part + b"--" - - iterator = chain( - make_line_iter( - file, - limit=content_length, - buffer_size=self.buffer_size, - cap_at_buffer=cap_at_buffer, - ), - _empty_string_iter, - ) - - terminator = self._find_terminator(iterator) - - if terminator == last_part: - return - elif terminator != next_part: - self.fail("Expected boundary at start of multipart data") - - while terminator != last_part: - headers = parse_multipart_headers(iterator) - - disposition = headers.get("content-disposition") - if disposition is None: - self.fail("Missing Content-Disposition header") - disposition, extra = parse_options_header(disposition) - transfer_encoding = self.get_part_encoding(headers) - name = extra.get("name") - filename = extra.get("filename") - - # if no content type is given we stream into memory. A list is - # used as a temporary container. - if filename is None: - yield _begin_form, (headers, name) - - # otherwise we parse the rest of the headers and ask the stream - # factory for something we can write in. - else: - yield _begin_file, (headers, name, filename) - - buf = b"" - for line in iterator: - if not line: - self.fail("unexpected end of stream") - - if line[:2] == b"--": - terminator = line.rstrip() - if terminator in (next_part, last_part): - break - - if transfer_encoding is not None: - if transfer_encoding == "base64": - transfer_encoding = "base64_codec" - try: - line = codecs.decode(line, transfer_encoding) - except Exception: - self.fail("could not decode transfer encoded chunk") - - # we have something in the buffer from the last iteration. - # this is usually a newline delimiter. - if buf: - yield _cont, buf - buf = b"" - - # If the line ends with windows CRLF we write everything except - # the last two bytes. In all other cases however we write - # everything except the last byte. If it was a newline, that's - # fine, otherwise it does not matter because we will write it - # the next iteration. this ensures we do not write the - # final newline into the stream. That way we do not have to - # truncate the stream. However we do have to make sure that - # if something else than a newline is in there we write it - # out. - if line[-2:] == b"\r\n": - buf = b"\r\n" - cutoff = -2 - else: - buf = line[-1:] - cutoff = -1 - yield _cont, line[:cutoff] - - else: # pragma: no cover - raise ValueError("unexpected end of part") - - # if we have a leftover in the buffer that is not a newline - # character we have to flush it, otherwise we will chop of - # certain values. - if buf not in (b"", b"\r", b"\n", b"\r\n"): - yield _cont, buf - - yield _end, None - - def parse_parts(self, file, boundary, content_length): - """Generate ``('file', (name, val))`` and - ``('form', (name, val))`` parts. - """ - in_memory = 0 - - for ellt, ell in self.parse_lines(file, boundary, content_length): - if ellt == _begin_file: - headers, name, filename = ell - is_file = True - guard_memory = False - filename, container = self.start_file_streaming( - filename, headers, content_length - ) - _write = container.write - - elif ellt == _begin_form: - headers, name = ell - is_file = False - container = [] - _write = container.append - guard_memory = self.max_form_memory_size is not None - - elif ellt == _cont: - _write(ell) - # if we write into memory and there is a memory size limit we - # count the number of bytes in memory and raise an exception if - # there is too much data in memory. - if guard_memory: - in_memory += len(ell) - if in_memory > self.max_form_memory_size: - self.in_memory_threshold_reached(in_memory) - - elif ellt == _end: - if is_file: - container.seek(0) - yield ( - "file", - (name, FileStorage(container, filename, name, headers=headers)), - ) - else: - part_charset = self.get_part_charset(headers) - yield ( - "form", - (name, b"".join(container).decode(part_charset, self.errors)), - ) - - def parse(self, file, boundary, content_length): - formstream, filestream = tee( - self.parse_parts(file, boundary, content_length), 2 - ) - form = (p[1] for p in formstream if p[0] == "form") - files = (p[1] for p in filestream if p[0] == "file") - return self.cls(form), self.cls(files) diff --git a/venv/lib/python3.7/site-packages/werkzeug/http.py b/venv/lib/python3.7/site-packages/werkzeug/http.py deleted file mode 100644 index b428cee..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/http.py +++ /dev/null @@ -1,1305 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.http - ~~~~~~~~~~~~~ - - Werkzeug comes with a bunch of utilities that help Werkzeug to deal with - HTTP data. Most of the classes and functions provided by this module are - used by the wrappers, but they are useful on their own, too, especially if - the response and request objects are not used. - - This covers some of the more HTTP centric features of WSGI, some other - utilities such as cookie handling are documented in the `werkzeug.utils` - module. - - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import base64 -import re -import warnings -from datetime import datetime -from datetime import timedelta -from hashlib import md5 -from time import gmtime -from time import time - -from ._compat import integer_types -from ._compat import iteritems -from ._compat import PY2 -from ._compat import string_types -from ._compat import text_type -from ._compat import to_bytes -from ._compat import to_unicode -from ._compat import try_coerce_native -from ._internal import _cookie_parse_impl -from ._internal import _cookie_quote -from ._internal import _make_cookie_domain - -try: - from email.utils import parsedate_tz -except ImportError: - from email.Utils import parsedate_tz - -try: - from urllib.request import parse_http_list as _parse_list_header - from urllib.parse import unquote_to_bytes as _unquote -except ImportError: - from urllib2 import parse_http_list as _parse_list_header - from urllib2 import unquote as _unquote - -_cookie_charset = "latin1" -_basic_auth_charset = "utf-8" -# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231 -_accept_re = re.compile( - r""" - ( # media-range capturing-parenthesis - [^\s;,]+ # type/subtype - (?:[ \t]*;[ \t]* # ";" - (?: # parameter non-capturing-parenthesis - [^\s;,q][^\s;,]* # token that doesn't start with "q" - | # or - q[^\s;,=][^\s;,]* # token that is more than just "q" - ) - )* # zero or more parameters - ) # end of media-range - (?:[ \t]*;[ \t]*q= # weight is a "q" parameter - (\d*(?:\.\d+)?) # qvalue capturing-parentheses - [^,]* # "extension" accept params: who cares? - )? # accept params are optional - """, - re.VERBOSE, -) -_token_chars = frozenset( - "!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ^_`abcdefghijklmnopqrstuvwxyz|~" -) -_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)') -_unsafe_header_chars = set('()<>@,;:"/[]?={} \t') -_option_header_piece_re = re.compile( - r""" - ;\s*,?\s* # newlines were replaced with commas - (?P - "[^"\\]*(?:\\.[^"\\]*)*" # quoted string - | - [^\s;,=*]+ # token - ) - (?:\*(?P\d+))? # *1, optional continuation index - \s* - (?: # optionally followed by =value - (?: # equals sign, possibly with encoding - \*\s*=\s* # * indicates extended notation - (?: # optional encoding - (?P[^\s]+?) - '(?P[^\s]*?)' - )? - | - =\s* # basic notation - ) - (?P - "[^"\\]*(?:\\.[^"\\]*)*" # quoted string - | - [^;,]+ # token - )? - )? - \s* - """, - flags=re.VERBOSE, -) -_option_header_start_mime_type = re.compile(r",\s*([^;,\s]+)([;,]\s*.+)?") - -_entity_headers = frozenset( - [ - "allow", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-md5", - "content-range", - "content-type", - "expires", - "last-modified", - ] -) -_hop_by_hop_headers = frozenset( - [ - "connection", - "keep-alive", - "proxy-authenticate", - "proxy-authorization", - "te", - "trailer", - "transfer-encoding", - "upgrade", - ] -) - - -HTTP_STATUS_CODES = { - 100: "Continue", - 101: "Switching Protocols", - 102: "Processing", - 103: "Early Hints", # see RFC 8297 - 200: "OK", - 201: "Created", - 202: "Accepted", - 203: "Non Authoritative Information", - 204: "No Content", - 205: "Reset Content", - 206: "Partial Content", - 207: "Multi Status", - 208: "Already Reported", # see RFC 5842 - 226: "IM Used", # see RFC 3229 - 300: "Multiple Choices", - 301: "Moved Permanently", - 302: "Found", - 303: "See Other", - 304: "Not Modified", - 305: "Use Proxy", - 306: "Switch Proxy", # unused - 307: "Temporary Redirect", - 308: "Permanent Redirect", - 400: "Bad Request", - 401: "Unauthorized", - 402: "Payment Required", # unused - 403: "Forbidden", - 404: "Not Found", - 405: "Method Not Allowed", - 406: "Not Acceptable", - 407: "Proxy Authentication Required", - 408: "Request Timeout", - 409: "Conflict", - 410: "Gone", - 411: "Length Required", - 412: "Precondition Failed", - 413: "Request Entity Too Large", - 414: "Request URI Too Long", - 415: "Unsupported Media Type", - 416: "Requested Range Not Satisfiable", - 417: "Expectation Failed", - 418: "I'm a teapot", # see RFC 2324 - 421: "Misdirected Request", # see RFC 7540 - 422: "Unprocessable Entity", - 423: "Locked", - 424: "Failed Dependency", - 425: "Too Early", # see RFC 8470 - 426: "Upgrade Required", - 428: "Precondition Required", # see RFC 6585 - 429: "Too Many Requests", - 431: "Request Header Fields Too Large", - 449: "Retry With", # proprietary MS extension - 451: "Unavailable For Legal Reasons", - 500: "Internal Server Error", - 501: "Not Implemented", - 502: "Bad Gateway", - 503: "Service Unavailable", - 504: "Gateway Timeout", - 505: "HTTP Version Not Supported", - 506: "Variant Also Negotiates", # see RFC 2295 - 507: "Insufficient Storage", - 508: "Loop Detected", # see RFC 5842 - 510: "Not Extended", - 511: "Network Authentication Failed", # see RFC 6585 -} - - -def wsgi_to_bytes(data): - """coerce wsgi unicode represented bytes to real ones""" - if isinstance(data, bytes): - return data - return data.encode("latin1") # XXX: utf8 fallback? - - -def bytes_to_wsgi(data): - assert isinstance(data, bytes), "data must be bytes" - if isinstance(data, str): - return data - else: - return data.decode("latin1") - - -def quote_header_value(value, extra_chars="", allow_token=True): - """Quote a header value if necessary. - - .. versionadded:: 0.5 - - :param value: the value to quote. - :param extra_chars: a list of extra characters to skip quoting. - :param allow_token: if this is enabled token values are returned - unchanged. - """ - if isinstance(value, bytes): - value = bytes_to_wsgi(value) - value = str(value) - if allow_token: - token_chars = _token_chars | set(extra_chars) - if set(value).issubset(token_chars): - return value - return '"%s"' % value.replace("\\", "\\\\").replace('"', '\\"') - - -def unquote_header_value(value, is_filename=False): - r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). - This does not use the real unquoting but what browsers are actually - using for quoting. - - .. versionadded:: 0.5 - - :param value: the header value to unquote. - """ - if value and value[0] == value[-1] == '"': - # this is not the real unquoting, but fixing this so that the - # RFC is met will result in bugs with internet explorer and - # probably some other browsers as well. IE for example is - # uploading files with "C:\foo\bar.txt" as filename - value = value[1:-1] - - # if this is a filename and the starting characters look like - # a UNC path, then just return the value without quotes. Using the - # replace sequence below on a UNC path has the effect of turning - # the leading double slash into a single slash and then - # _fix_ie_filename() doesn't work correctly. See #458. - if not is_filename or value[:2] != "\\\\": - return value.replace("\\\\", "\\").replace('\\"', '"') - return value - - -def dump_options_header(header, options): - """The reverse function to :func:`parse_options_header`. - - :param header: the header to dump - :param options: a dict of options to append. - """ - segments = [] - if header is not None: - segments.append(header) - for key, value in iteritems(options): - if value is None: - segments.append(key) - else: - segments.append("%s=%s" % (key, quote_header_value(value))) - return "; ".join(segments) - - -def dump_header(iterable, allow_token=True): - """Dump an HTTP header again. This is the reversal of - :func:`parse_list_header`, :func:`parse_set_header` and - :func:`parse_dict_header`. This also quotes strings that include an - equals sign unless you pass it as dict of key, value pairs. - - >>> dump_header({'foo': 'bar baz'}) - 'foo="bar baz"' - >>> dump_header(('foo', 'bar baz')) - 'foo, "bar baz"' - - :param iterable: the iterable or dict of values to quote. - :param allow_token: if set to `False` tokens as values are disallowed. - See :func:`quote_header_value` for more details. - """ - if isinstance(iterable, dict): - items = [] - for key, value in iteritems(iterable): - if value is None: - items.append(key) - else: - items.append( - "%s=%s" % (key, quote_header_value(value, allow_token=allow_token)) - ) - else: - items = [quote_header_value(x, allow_token=allow_token) for x in iterable] - return ", ".join(items) - - -def dump_csp_header(header): - """Dump a Content Security Policy header. - - These are structured into policies such as "default-src 'self'; - script-src 'self'". - - .. versionadded:: 1.0.0 - Support for Content Security Policy headers was added. - - """ - return "; ".join("%s %s" % (key, value) for key, value in iteritems(header)) - - -def parse_list_header(value): - """Parse lists as described by RFC 2068 Section 2. - - In particular, parse comma-separated lists where the elements of - the list may include quoted-strings. A quoted-string could - contain a comma. A non-quoted string could have quotes in the - middle. Quotes are removed automatically after parsing. - - It basically works like :func:`parse_set_header` just that items - may appear multiple times and case sensitivity is preserved. - - The return value is a standard :class:`list`: - - >>> parse_list_header('token, "quoted value"') - ['token', 'quoted value'] - - To create a header from the :class:`list` again, use the - :func:`dump_header` function. - - :param value: a string with a list header. - :return: :class:`list` - """ - result = [] - for item in _parse_list_header(value): - if item[:1] == item[-1:] == '"': - item = unquote_header_value(item[1:-1]) - result.append(item) - return result - - -def parse_dict_header(value, cls=dict): - """Parse lists of key, value pairs as described by RFC 2068 Section 2 and - convert them into a python dict (or any other mapping object created from - the type with a dict like interface provided by the `cls` argument): - - >>> d = parse_dict_header('foo="is a fish", bar="as well"') - >>> type(d) is dict - True - >>> sorted(d.items()) - [('bar', 'as well'), ('foo', 'is a fish')] - - If there is no value for a key it will be `None`: - - >>> parse_dict_header('key_without_value') - {'key_without_value': None} - - To create a header from the :class:`dict` again, use the - :func:`dump_header` function. - - .. versionchanged:: 0.9 - Added support for `cls` argument. - - :param value: a string with a dict header. - :param cls: callable to use for storage of parsed results. - :return: an instance of `cls` - """ - result = cls() - if not isinstance(value, text_type): - # XXX: validate - value = bytes_to_wsgi(value) - for item in _parse_list_header(value): - if "=" not in item: - result[item] = None - continue - name, value = item.split("=", 1) - if value[:1] == value[-1:] == '"': - value = unquote_header_value(value[1:-1]) - result[name] = value - return result - - -def parse_options_header(value, multiple=False): - """Parse a ``Content-Type`` like header into a tuple with the content - type and the options: - - >>> parse_options_header('text/html; charset=utf8') - ('text/html', {'charset': 'utf8'}) - - This should not be used to parse ``Cache-Control`` like headers that use - a slightly different format. For these headers use the - :func:`parse_dict_header` function. - - .. versionchanged:: 0.15 - :rfc:`2231` parameter continuations are handled. - - .. versionadded:: 0.5 - - :param value: the header to parse. - :param multiple: Whether try to parse and return multiple MIME types - :return: (mimetype, options) or (mimetype, options, mimetype, options, …) - if multiple=True - """ - if not value: - return "", {} - - result = [] - - value = "," + value.replace("\n", ",") - while value: - match = _option_header_start_mime_type.match(value) - if not match: - break - result.append(match.group(1)) # mimetype - options = {} - # Parse options - rest = match.group(2) - continued_encoding = None - while rest: - optmatch = _option_header_piece_re.match(rest) - if not optmatch: - break - option, count, encoding, language, option_value = optmatch.groups() - # Continuations don't have to supply the encoding after the - # first line. If we're in a continuation, track the current - # encoding to use for subsequent lines. Reset it when the - # continuation ends. - if not count: - continued_encoding = None - else: - if not encoding: - encoding = continued_encoding - continued_encoding = encoding - option = unquote_header_value(option) - if option_value is not None: - option_value = unquote_header_value(option_value, option == "filename") - if encoding is not None: - option_value = _unquote(option_value).decode(encoding) - if count: - # Continuations append to the existing value. For - # simplicity, this ignores the possibility of - # out-of-order indices, which shouldn't happen anyway. - options[option] = options.get(option, "") + option_value - else: - options[option] = option_value - rest = rest[optmatch.end() :] - result.append(options) - if multiple is False: - return tuple(result) - value = rest - - return tuple(result) if result else ("", {}) - - -def parse_accept_header(value, cls=None): - """Parses an HTTP Accept-* header. This does not implement a complete - valid algorithm but one that supports at least value and quality - extraction. - - Returns a new :class:`Accept` object (basically a list of ``(value, quality)`` - tuples sorted by the quality with some additional accessor methods). - - The second parameter can be a subclass of :class:`Accept` that is created - with the parsed values and returned. - - :param value: the accept header string to be parsed. - :param cls: the wrapper class for the return value (can be - :class:`Accept` or a subclass thereof) - :return: an instance of `cls`. - """ - if cls is None: - cls = Accept - - if not value: - return cls(None) - - result = [] - for match in _accept_re.finditer(value): - quality = match.group(2) - if not quality: - quality = 1 - else: - quality = max(min(float(quality), 1), 0) - result.append((match.group(1), quality)) - return cls(result) - - -def parse_cache_control_header(value, on_update=None, cls=None): - """Parse a cache control header. The RFC differs between response and - request cache control, this method does not. It's your responsibility - to not use the wrong control statements. - - .. versionadded:: 0.5 - The `cls` was added. If not specified an immutable - :class:`~werkzeug.datastructures.RequestCacheControl` is returned. - - :param value: a cache control header to be parsed. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.CacheControl` - object is changed. - :param cls: the class for the returned object. By default - :class:`~werkzeug.datastructures.RequestCacheControl` is used. - :return: a `cls` object. - """ - if cls is None: - cls = RequestCacheControl - if not value: - return cls(None, on_update) - return cls(parse_dict_header(value), on_update) - - -def parse_csp_header(value, on_update=None, cls=None): - """Parse a Content Security Policy header. - - .. versionadded:: 1.0.0 - Support for Content Security Policy headers was added. - - :param value: a csp header to be parsed. - :param on_update: an optional callable that is called every time a value - on the object is changed. - :param cls: the class for the returned object. By default - :class:`~werkzeug.datastructures.ContentSecurityPolicy` is used. - :return: a `cls` object. - """ - - if cls is None: - cls = ContentSecurityPolicy - items = [] - for policy in value.split(";"): - policy = policy.strip() - # Ignore badly formatted policies (no space) - if " " in policy: - directive, value = policy.strip().split(" ", 1) - items.append((directive.strip(), value.strip())) - return cls(items, on_update) - - -def parse_set_header(value, on_update=None): - """Parse a set-like header and return a - :class:`~werkzeug.datastructures.HeaderSet` object: - - >>> hs = parse_set_header('token, "quoted value"') - - The return value is an object that treats the items case-insensitively - and keeps the order of the items: - - >>> 'TOKEN' in hs - True - >>> hs.index('quoted value') - 1 - >>> hs - HeaderSet(['token', 'quoted value']) - - To create a header from the :class:`HeaderSet` again, use the - :func:`dump_header` function. - - :param value: a set header to be parsed. - :param on_update: an optional callable that is called every time a - value on the :class:`~werkzeug.datastructures.HeaderSet` - object is changed. - :return: a :class:`~werkzeug.datastructures.HeaderSet` - """ - if not value: - return HeaderSet(None, on_update) - return HeaderSet(parse_list_header(value), on_update) - - -def parse_authorization_header(value): - """Parse an HTTP basic/digest authorization header transmitted by the web - browser. The return value is either `None` if the header was invalid or - not given, otherwise an :class:`~werkzeug.datastructures.Authorization` - object. - - :param value: the authorization header to parse. - :return: a :class:`~werkzeug.datastructures.Authorization` object or `None`. - """ - if not value: - return - value = wsgi_to_bytes(value) - try: - auth_type, auth_info = value.split(None, 1) - auth_type = auth_type.lower() - except ValueError: - return - if auth_type == b"basic": - try: - username, password = base64.b64decode(auth_info).split(b":", 1) - except Exception: - return - return Authorization( - "basic", - { - "username": to_unicode(username, _basic_auth_charset), - "password": to_unicode(password, _basic_auth_charset), - }, - ) - elif auth_type == b"digest": - auth_map = parse_dict_header(auth_info) - for key in "username", "realm", "nonce", "uri", "response": - if key not in auth_map: - return - if "qop" in auth_map: - if not auth_map.get("nc") or not auth_map.get("cnonce"): - return - return Authorization("digest", auth_map) - - -def parse_www_authenticate_header(value, on_update=None): - """Parse an HTTP WWW-Authenticate header into a - :class:`~werkzeug.datastructures.WWWAuthenticate` object. - - :param value: a WWW-Authenticate header to parse. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.WWWAuthenticate` - object is changed. - :return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object. - """ - if not value: - return WWWAuthenticate(on_update=on_update) - try: - auth_type, auth_info = value.split(None, 1) - auth_type = auth_type.lower() - except (ValueError, AttributeError): - return WWWAuthenticate(value.strip().lower(), on_update=on_update) - return WWWAuthenticate(auth_type, parse_dict_header(auth_info), on_update) - - -def parse_if_range_header(value): - """Parses an if-range header which can be an etag or a date. Returns - a :class:`~werkzeug.datastructures.IfRange` object. - - .. versionadded:: 0.7 - """ - if not value: - return IfRange() - date = parse_date(value) - if date is not None: - return IfRange(date=date) - # drop weakness information - return IfRange(unquote_etag(value)[0]) - - -def parse_range_header(value, make_inclusive=True): - """Parses a range header into a :class:`~werkzeug.datastructures.Range` - object. If the header is missing or malformed `None` is returned. - `ranges` is a list of ``(start, stop)`` tuples where the ranges are - non-inclusive. - - .. versionadded:: 0.7 - """ - if not value or "=" not in value: - return None - - ranges = [] - last_end = 0 - units, rng = value.split("=", 1) - units = units.strip().lower() - - for item in rng.split(","): - item = item.strip() - if "-" not in item: - return None - if item.startswith("-"): - if last_end < 0: - return None - try: - begin = int(item) - except ValueError: - return None - end = None - last_end = -1 - elif "-" in item: - begin, end = item.split("-", 1) - begin = begin.strip() - end = end.strip() - if not begin.isdigit(): - return None - begin = int(begin) - if begin < last_end or last_end < 0: - return None - if end: - if not end.isdigit(): - return None - end = int(end) + 1 - if begin >= end: - return None - else: - end = None - last_end = end - ranges.append((begin, end)) - - return Range(units, ranges) - - -def parse_content_range_header(value, on_update=None): - """Parses a range header into a - :class:`~werkzeug.datastructures.ContentRange` object or `None` if - parsing is not possible. - - .. versionadded:: 0.7 - - :param value: a content range header to be parsed. - :param on_update: an optional callable that is called every time a value - on the :class:`~werkzeug.datastructures.ContentRange` - object is changed. - """ - if value is None: - return None - try: - units, rangedef = (value or "").strip().split(None, 1) - except ValueError: - return None - - if "/" not in rangedef: - return None - rng, length = rangedef.split("/", 1) - if length == "*": - length = None - elif length.isdigit(): - length = int(length) - else: - return None - - if rng == "*": - return ContentRange(units, None, None, length, on_update=on_update) - elif "-" not in rng: - return None - - start, stop = rng.split("-", 1) - try: - start = int(start) - stop = int(stop) + 1 - except ValueError: - return None - - if is_byte_range_valid(start, stop, length): - return ContentRange(units, start, stop, length, on_update=on_update) - - -def quote_etag(etag, weak=False): - """Quote an etag. - - :param etag: the etag to quote. - :param weak: set to `True` to tag it "weak". - """ - if '"' in etag: - raise ValueError("invalid etag") - etag = '"%s"' % etag - if weak: - etag = "W/" + etag - return etag - - -def unquote_etag(etag): - """Unquote a single etag: - - >>> unquote_etag('W/"bar"') - ('bar', True) - >>> unquote_etag('"bar"') - ('bar', False) - - :param etag: the etag identifier to unquote. - :return: a ``(etag, weak)`` tuple. - """ - if not etag: - return None, None - etag = etag.strip() - weak = False - if etag.startswith(("W/", "w/")): - weak = True - etag = etag[2:] - if etag[:1] == etag[-1:] == '"': - etag = etag[1:-1] - return etag, weak - - -def parse_etags(value): - """Parse an etag header. - - :param value: the tag header to parse - :return: an :class:`~werkzeug.datastructures.ETags` object. - """ - if not value: - return ETags() - strong = [] - weak = [] - end = len(value) - pos = 0 - while pos < end: - match = _etag_re.match(value, pos) - if match is None: - break - is_weak, quoted, raw = match.groups() - if raw == "*": - return ETags(star_tag=True) - elif quoted: - raw = quoted - if is_weak: - weak.append(raw) - else: - strong.append(raw) - pos = match.end() - return ETags(strong, weak) - - -def generate_etag(data): - """Generate an etag for some data.""" - return md5(data).hexdigest() - - -def parse_date(value): - """Parse one of the following date formats into a datetime object: - - .. sourcecode:: text - - Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123 - Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036 - Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format - - If parsing fails the return value is `None`. - - :param value: a string with a supported date format. - :return: a :class:`datetime.datetime` object. - """ - if value: - t = parsedate_tz(value.strip()) - if t is not None: - try: - year = t[0] - # unfortunately that function does not tell us if two digit - # years were part of the string, or if they were prefixed - # with two zeroes. So what we do is to assume that 69-99 - # refer to 1900, and everything below to 2000 - if year >= 0 and year <= 68: - year += 2000 - elif year >= 69 and year <= 99: - year += 1900 - return datetime(*((year,) + t[1:7])) - timedelta(seconds=t[-1] or 0) - except (ValueError, OverflowError): - return None - - -def _dump_date(d, delim): - """Used for `http_date` and `cookie_date`.""" - if d is None: - d = gmtime() - elif isinstance(d, datetime): - d = d.utctimetuple() - elif isinstance(d, (integer_types, float)): - d = gmtime(d) - return "%s, %02d%s%s%s%s %02d:%02d:%02d GMT" % ( - ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")[d.tm_wday], - d.tm_mday, - delim, - ( - "Jan", - "Feb", - "Mar", - "Apr", - "May", - "Jun", - "Jul", - "Aug", - "Sep", - "Oct", - "Nov", - "Dec", - )[d.tm_mon - 1], - delim, - str(d.tm_year), - d.tm_hour, - d.tm_min, - d.tm_sec, - ) - - -def cookie_date(expires=None): - """Formats the time to ensure compatibility with Netscape's cookie - standard. - - Accepts a floating point number expressed in seconds since the epoch in, a - datetime object or a timetuple. All times in UTC. The :func:`parse_date` - function can be used to parse such a date. - - Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``. - - :param expires: If provided that date is used, otherwise the current. - """ - return _dump_date(expires, "-") - - -def http_date(timestamp=None): - """Formats the time to match the RFC1123 date format. - - Accepts a floating point number expressed in seconds since the epoch in, a - datetime object or a timetuple. All times in UTC. The :func:`parse_date` - function can be used to parse such a date. - - Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``. - - :param timestamp: If provided that date is used, otherwise the current. - """ - return _dump_date(timestamp, " ") - - -def parse_age(value=None): - """Parses a base-10 integer count of seconds into a timedelta. - - If parsing fails, the return value is `None`. - - :param value: a string consisting of an integer represented in base-10 - :return: a :class:`datetime.timedelta` object or `None`. - """ - if not value: - return None - try: - seconds = int(value) - except ValueError: - return None - if seconds < 0: - return None - try: - return timedelta(seconds=seconds) - except OverflowError: - return None - - -def dump_age(age=None): - """Formats the duration as a base-10 integer. - - :param age: should be an integer number of seconds, - a :class:`datetime.timedelta` object, or, - if the age is unknown, `None` (default). - """ - if age is None: - return - if isinstance(age, timedelta): - # do the equivalent of Python 2.7's timedelta.total_seconds(), - # but disregarding fractional seconds - age = age.seconds + (age.days * 24 * 3600) - - age = int(age) - if age < 0: - raise ValueError("age cannot be negative") - - return str(age) - - -def is_resource_modified( - environ, etag=None, data=None, last_modified=None, ignore_if_range=True -): - """Convenience method for conditional requests. - - :param environ: the WSGI environment of the request to be checked. - :param etag: the etag for the response for comparison. - :param data: or alternatively the data of the response to automatically - generate an etag using :func:`generate_etag`. - :param last_modified: an optional date of the last modification. - :param ignore_if_range: If `False`, `If-Range` header will be taken into - account. - :return: `True` if the resource was modified, otherwise `False`. - - .. versionchanged:: 1.0.0 - The check is run for methods other than ``GET`` and ``HEAD``. - """ - if etag is None and data is not None: - etag = generate_etag(data) - elif data is not None: - raise TypeError("both data and etag given") - - unmodified = False - if isinstance(last_modified, string_types): - last_modified = parse_date(last_modified) - - # ensure that microsecond is zero because the HTTP spec does not transmit - # that either and we might have some false positives. See issue #39 - if last_modified is not None: - last_modified = last_modified.replace(microsecond=0) - - if_range = None - if not ignore_if_range and "HTTP_RANGE" in environ: - # https://tools.ietf.org/html/rfc7233#section-3.2 - # A server MUST ignore an If-Range header field received in a request - # that does not contain a Range header field. - if_range = parse_if_range_header(environ.get("HTTP_IF_RANGE")) - - if if_range is not None and if_range.date is not None: - modified_since = if_range.date - else: - modified_since = parse_date(environ.get("HTTP_IF_MODIFIED_SINCE")) - - if modified_since and last_modified and last_modified <= modified_since: - unmodified = True - - if etag: - etag, _ = unquote_etag(etag) - if if_range is not None and if_range.etag is not None: - unmodified = parse_etags(if_range.etag).contains(etag) - else: - if_none_match = parse_etags(environ.get("HTTP_IF_NONE_MATCH")) - if if_none_match: - # https://tools.ietf.org/html/rfc7232#section-3.2 - # "A recipient MUST use the weak comparison function when comparing - # entity-tags for If-None-Match" - unmodified = if_none_match.contains_weak(etag) - - # https://tools.ietf.org/html/rfc7232#section-3.1 - # "Origin server MUST use the strong comparison function when - # comparing entity-tags for If-Match" - if_match = parse_etags(environ.get("HTTP_IF_MATCH")) - if if_match: - unmodified = not if_match.is_strong(etag) - - return not unmodified - - -def remove_entity_headers(headers, allowed=("expires", "content-location")): - """Remove all entity headers from a list or :class:`Headers` object. This - operation works in-place. `Expires` and `Content-Location` headers are - by default not removed. The reason for this is :rfc:`2616` section - 10.3.5 which specifies some entity headers that should be sent. - - .. versionchanged:: 0.5 - added `allowed` parameter. - - :param headers: a list or :class:`Headers` object. - :param allowed: a list of headers that should still be allowed even though - they are entity headers. - """ - allowed = set(x.lower() for x in allowed) - headers[:] = [ - (key, value) - for key, value in headers - if not is_entity_header(key) or key.lower() in allowed - ] - - -def remove_hop_by_hop_headers(headers): - """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or - :class:`Headers` object. This operation works in-place. - - .. versionadded:: 0.5 - - :param headers: a list or :class:`Headers` object. - """ - headers[:] = [ - (key, value) for key, value in headers if not is_hop_by_hop_header(key) - ] - - -def is_entity_header(header): - """Check if a header is an entity header. - - .. versionadded:: 0.5 - - :param header: the header to test. - :return: `True` if it's an entity header, `False` otherwise. - """ - return header.lower() in _entity_headers - - -def is_hop_by_hop_header(header): - """Check if a header is an HTTP/1.1 "Hop-by-Hop" header. - - .. versionadded:: 0.5 - - :param header: the header to test. - :return: `True` if it's an HTTP/1.1 "Hop-by-Hop" header, `False` otherwise. - """ - return header.lower() in _hop_by_hop_headers - - -def parse_cookie(header, charset="utf-8", errors="replace", cls=None): - """Parse a cookie from a string or WSGI environ. - - The same key can be provided multiple times, the values are stored - in-order. The default :class:`MultiDict` will have the first value - first, and all values can be retrieved with - :meth:`MultiDict.getlist`. - - :param header: The cookie header as a string, or a WSGI environ dict - with a ``HTTP_COOKIE`` key. - :param charset: The charset for the cookie values. - :param errors: The error behavior for the charset decoding. - :param cls: A dict-like class to store the parsed cookies in. - Defaults to :class:`MultiDict`. - - .. versionchanged:: 1.0.0 - Returns a :class:`MultiDict` instead of a - ``TypeConversionDict``. - - .. versionchanged:: 0.5 - Returns a :class:`TypeConversionDict` instead of a regular dict. - The ``cls`` parameter was added. - """ - if isinstance(header, dict): - header = header.get("HTTP_COOKIE", "") - elif header is None: - header = "" - - # On Python 3, PEP 3333 sends headers through the environ as latin1 - # decoded strings. Encode strings back to bytes for parsing. - if isinstance(header, text_type): - header = header.encode("latin1", "replace") - - if cls is None: - cls = MultiDict - - def _parse_pairs(): - for key, val in _cookie_parse_impl(header): - key = to_unicode(key, charset, errors, allow_none_charset=True) - if not key: - continue - val = to_unicode(val, charset, errors, allow_none_charset=True) - yield try_coerce_native(key), val - - return cls(_parse_pairs()) - - -def dump_cookie( - key, - value="", - max_age=None, - expires=None, - path="/", - domain=None, - secure=False, - httponly=False, - charset="utf-8", - sync_expires=True, - max_size=4093, - samesite=None, -): - """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix - The parameters are the same as in the cookie Morsel object in the - Python standard library but it accepts unicode data, too. - - On Python 3 the return value of this function will be a unicode - string, on Python 2 it will be a native string. In both cases the - return value is usually restricted to ascii as the vast majority of - values are properly escaped, but that is no guarantee. If a unicode - string is returned it's tunneled through latin1 as required by - PEP 3333. - - The return value is not ASCII safe if the key contains unicode - characters. This is technically against the specification but - happens in the wild. It's strongly recommended to not use - non-ASCII values for the keys. - - :param max_age: should be a number of seconds, or `None` (default) if - the cookie should last only as long as the client's - browser session. Additionally `timedelta` objects - are accepted, too. - :param expires: should be a `datetime` object or unix timestamp. - :param path: limits the cookie to a given path, per default it will - span the whole domain. - :param domain: Use this if you want to set a cross-domain cookie. For - example, ``domain=".example.com"`` will set a cookie - that is readable by the domain ``www.example.com``, - ``foo.example.com`` etc. Otherwise, a cookie will only - be readable by the domain that set it. - :param secure: The cookie will only be available via HTTPS - :param httponly: disallow JavaScript to access the cookie. This is an - extension to the cookie standard and probably not - supported by all browsers. - :param charset: the encoding for unicode values. - :param sync_expires: automatically set expires if max_age is defined - but expires not. - :param max_size: Warn if the final header value exceeds this size. The - default, 4093, should be safely `supported by most browsers - `_. Set to 0 to disable this check. - :param samesite: Limits the scope of the cookie such that it will - only be attached to requests if those requests are same-site. - - .. _`cookie`: http://browsercookielimits.squawky.net/ - - .. versionchanged:: 1.0.0 - The string ``'None'`` is accepted for ``samesite``. - """ - key = to_bytes(key, charset) - value = to_bytes(value, charset) - - if path is not None: - from .urls import iri_to_uri - - path = iri_to_uri(path, charset) - domain = _make_cookie_domain(domain) - if isinstance(max_age, timedelta): - max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds - if expires is not None: - if not isinstance(expires, string_types): - expires = cookie_date(expires) - elif max_age is not None and sync_expires: - expires = to_bytes(cookie_date(time() + max_age)) - - if samesite is not None: - samesite = samesite.title() - - if samesite not in {"Strict", "Lax", "None"}: - raise ValueError("SameSite must be 'Strict', 'Lax', or 'None'.") - - buf = [key + b"=" + _cookie_quote(value)] - - # XXX: In theory all of these parameters that are not marked with `None` - # should be quoted. Because stdlib did not quote it before I did not - # want to introduce quoting there now. - for k, v, q in ( - (b"Domain", domain, True), - (b"Expires", expires, False), - (b"Max-Age", max_age, False), - (b"Secure", secure, None), - (b"HttpOnly", httponly, None), - (b"Path", path, False), - (b"SameSite", samesite, False), - ): - if q is None: - if v: - buf.append(k) - continue - - if v is None: - continue - - tmp = bytearray(k) - if not isinstance(v, (bytes, bytearray)): - v = to_bytes(text_type(v), charset) - if q: - v = _cookie_quote(v) - tmp += b"=" + v - buf.append(bytes(tmp)) - - # The return value will be an incorrectly encoded latin1 header on - # Python 3 for consistency with the headers object and a bytestring - # on Python 2 because that's how the API makes more sense. - rv = b"; ".join(buf) - if not PY2: - rv = rv.decode("latin1") - - # Warn if the final value of the cookie is larger than the limit. If the - # cookie is too large, then it may be silently ignored by the browser, - # which can be quite hard to debug. - cookie_size = len(rv) - - if max_size and cookie_size > max_size: - value_size = len(value) - warnings.warn( - 'The "{key}" cookie is too large: the value was {value_size} bytes' - " but the header required {extra_size} extra bytes. The final size" - " was {cookie_size} bytes but the limit is {max_size} bytes." - " Browsers may silently ignore cookies larger than this.".format( - key=key, - value_size=value_size, - extra_size=cookie_size - value_size, - cookie_size=cookie_size, - max_size=max_size, - ), - stacklevel=2, - ) - - return rv - - -def is_byte_range_valid(start, stop, length): - """Checks if a given byte content range is valid for the given length. - - .. versionadded:: 0.7 - """ - if (start is None) != (stop is None): - return False - elif start is None: - return length is None or length >= 0 - elif length is None: - return 0 <= start < stop - elif start >= stop: - return False - return 0 <= start < length - - -# circular dependencies -from .datastructures import Accept -from .datastructures import Authorization -from .datastructures import ContentRange -from .datastructures import ContentSecurityPolicy -from .datastructures import ETags -from .datastructures import HeaderSet -from .datastructures import IfRange -from .datastructures import MultiDict -from .datastructures import Range -from .datastructures import RequestCacheControl -from .datastructures import WWWAuthenticate diff --git a/venv/lib/python3.7/site-packages/werkzeug/local.py b/venv/lib/python3.7/site-packages/werkzeug/local.py deleted file mode 100644 index 626b87b..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/local.py +++ /dev/null @@ -1,420 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.local - ~~~~~~~~~~~~~~ - - This module implements context-local objects. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import copy -from functools import update_wrapper - -from ._compat import implements_bool -from ._compat import PY2 -from .wsgi import ClosingIterator - -# since each thread has its own greenlet we can just use those as identifiers -# for the context. If greenlets are not available we fall back to the -# current thread ident depending on where it is. -try: - from greenlet import getcurrent as get_ident -except ImportError: - try: - from thread import get_ident - except ImportError: - from _thread import get_ident - - -def release_local(local): - """Releases the contents of the local for the current context. - This makes it possible to use locals without a manager. - - Example:: - - >>> loc = Local() - >>> loc.foo = 42 - >>> release_local(loc) - >>> hasattr(loc, 'foo') - False - - With this function one can release :class:`Local` objects as well - as :class:`LocalStack` objects. However it is not possible to - release data held by proxies that way, one always has to retain - a reference to the underlying local object in order to be able - to release it. - - .. versionadded:: 0.6.1 - """ - local.__release_local__() - - -class Local(object): - __slots__ = ("__storage__", "__ident_func__") - - def __init__(self): - object.__setattr__(self, "__storage__", {}) - object.__setattr__(self, "__ident_func__", get_ident) - - def __iter__(self): - return iter(self.__storage__.items()) - - def __call__(self, proxy): - """Create a proxy for a name.""" - return LocalProxy(self, proxy) - - def __release_local__(self): - self.__storage__.pop(self.__ident_func__(), None) - - def __getattr__(self, name): - try: - return self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - def __setattr__(self, name, value): - ident = self.__ident_func__() - storage = self.__storage__ - try: - storage[ident][name] = value - except KeyError: - storage[ident] = {name: value} - - def __delattr__(self, name): - try: - del self.__storage__[self.__ident_func__()][name] - except KeyError: - raise AttributeError(name) - - -class LocalStack(object): - """This class works similar to a :class:`Local` but keeps a stack - of objects instead. This is best explained with an example:: - - >>> ls = LocalStack() - >>> ls.push(42) - >>> ls.top - 42 - >>> ls.push(23) - >>> ls.top - 23 - >>> ls.pop() - 23 - >>> ls.top - 42 - - They can be force released by using a :class:`LocalManager` or with - the :func:`release_local` function but the correct way is to pop the - item from the stack after using. When the stack is empty it will - no longer be bound to the current context (and as such released). - - By calling the stack without arguments it returns a proxy that resolves to - the topmost item on the stack. - - .. versionadded:: 0.6.1 - """ - - def __init__(self): - self._local = Local() - - def __release_local__(self): - self._local.__release_local__() - - @property - def __ident_func__(self): - return self._local.__ident_func__ - - @__ident_func__.setter - def __ident_func__(self, value): - object.__setattr__(self._local, "__ident_func__", value) - - def __call__(self): - def _lookup(): - rv = self.top - if rv is None: - raise RuntimeError("object unbound") - return rv - - return LocalProxy(_lookup) - - def push(self, obj): - """Pushes a new item to the stack""" - rv = getattr(self._local, "stack", None) - if rv is None: - self._local.stack = rv = [] - rv.append(obj) - return rv - - def pop(self): - """Removes the topmost item from the stack, will return the - old value or `None` if the stack was already empty. - """ - stack = getattr(self._local, "stack", None) - if stack is None: - return None - elif len(stack) == 1: - release_local(self._local) - return stack[-1] - else: - return stack.pop() - - @property - def top(self): - """The topmost item on the stack. If the stack is empty, - `None` is returned. - """ - try: - return self._local.stack[-1] - except (AttributeError, IndexError): - return None - - -class LocalManager(object): - """Local objects cannot manage themselves. For that you need a local - manager. You can pass a local manager multiple locals or add them later - by appending them to `manager.locals`. Every time the manager cleans up, - it will clean up all the data left in the locals for this context. - - The `ident_func` parameter can be added to override the default ident - function for the wrapped locals. - - .. versionchanged:: 0.6.1 - Instead of a manager the :func:`release_local` function can be used - as well. - - .. versionchanged:: 0.7 - `ident_func` was added. - """ - - def __init__(self, locals=None, ident_func=None): - if locals is None: - self.locals = [] - elif isinstance(locals, Local): - self.locals = [locals] - else: - self.locals = list(locals) - if ident_func is not None: - self.ident_func = ident_func - for local in self.locals: - object.__setattr__(local, "__ident_func__", ident_func) - else: - self.ident_func = get_ident - - def get_ident(self): - """Return the context identifier the local objects use internally for - this context. You cannot override this method to change the behavior - but use it to link other context local objects (such as SQLAlchemy's - scoped sessions) to the Werkzeug locals. - - .. versionchanged:: 0.7 - You can pass a different ident function to the local manager that - will then be propagated to all the locals passed to the - constructor. - """ - return self.ident_func() - - def cleanup(self): - """Manually clean up the data in the locals for this context. Call - this at the end of the request or use `make_middleware()`. - """ - for local in self.locals: - release_local(local) - - def make_middleware(self, app): - """Wrap a WSGI application so that cleaning up happens after - request end. - """ - - def application(environ, start_response): - return ClosingIterator(app(environ, start_response), self.cleanup) - - return application - - def middleware(self, func): - """Like `make_middleware` but for decorating functions. - - Example usage:: - - @manager.middleware - def application(environ, start_response): - ... - - The difference to `make_middleware` is that the function passed - will have all the arguments copied from the inner application - (name, docstring, module). - """ - return update_wrapper(self.make_middleware(func), func) - - def __repr__(self): - return "<%s storages: %d>" % (self.__class__.__name__, len(self.locals)) - - -@implements_bool -class LocalProxy(object): - """Acts as a proxy for a werkzeug local. Forwards all operations to - a proxied object. The only operations not supported for forwarding - are right handed operands and any kind of assignment. - - Example usage:: - - from werkzeug.local import Local - l = Local() - - # these are proxies - request = l('request') - user = l('user') - - - from werkzeug.local import LocalStack - _response_local = LocalStack() - - # this is a proxy - response = _response_local() - - Whenever something is bound to l.user / l.request the proxy objects - will forward all operations. If no object is bound a :exc:`RuntimeError` - will be raised. - - To create proxies to :class:`Local` or :class:`LocalStack` objects, - call the object as shown above. If you want to have a proxy to an - object looked up by a function, you can (as of Werkzeug 0.6.1) pass - a function to the :class:`LocalProxy` constructor:: - - session = LocalProxy(lambda: get_current_request().session) - - .. versionchanged:: 0.6.1 - The class can be instantiated with a callable as well now. - """ - - __slots__ = ("__local", "__dict__", "__name__", "__wrapped__") - - def __init__(self, local, name=None): - object.__setattr__(self, "_LocalProxy__local", local) - object.__setattr__(self, "__name__", name) - if callable(local) and not hasattr(local, "__release_local__"): - # "local" is a callable that is not an instance of Local or - # LocalManager: mark it as a wrapped function. - object.__setattr__(self, "__wrapped__", local) - - def _get_current_object(self): - """Return the current object. This is useful if you want the real - object behind the proxy at a time for performance reasons or because - you want to pass the object into a different context. - """ - if not hasattr(self.__local, "__release_local__"): - return self.__local() - try: - return getattr(self.__local, self.__name__) - except AttributeError: - raise RuntimeError("no object bound to %s" % self.__name__) - - @property - def __dict__(self): - try: - return self._get_current_object().__dict__ - except RuntimeError: - raise AttributeError("__dict__") - - def __repr__(self): - try: - obj = self._get_current_object() - except RuntimeError: - return "<%s unbound>" % self.__class__.__name__ - return repr(obj) - - def __bool__(self): - try: - return bool(self._get_current_object()) - except RuntimeError: - return False - - def __unicode__(self): - try: - return unicode(self._get_current_object()) # noqa - except RuntimeError: - return repr(self) - - def __dir__(self): - try: - return dir(self._get_current_object()) - except RuntimeError: - return [] - - def __getattr__(self, name): - if name == "__members__": - return dir(self._get_current_object()) - return getattr(self._get_current_object(), name) - - def __setitem__(self, key, value): - self._get_current_object()[key] = value - - def __delitem__(self, key): - del self._get_current_object()[key] - - if PY2: - __getslice__ = lambda x, i, j: x._get_current_object()[i:j] - - def __setslice__(self, i, j, seq): - self._get_current_object()[i:j] = seq - - def __delslice__(self, i, j): - del self._get_current_object()[i:j] - - __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) - __delattr__ = lambda x, n: delattr(x._get_current_object(), n) - __str__ = lambda x: str(x._get_current_object()) - __lt__ = lambda x, o: x._get_current_object() < o - __le__ = lambda x, o: x._get_current_object() <= o - __eq__ = lambda x, o: x._get_current_object() == o - __ne__ = lambda x, o: x._get_current_object() != o - __gt__ = lambda x, o: x._get_current_object() > o - __ge__ = lambda x, o: x._get_current_object() >= o - __cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa - __hash__ = lambda x: hash(x._get_current_object()) - __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) - __len__ = lambda x: len(x._get_current_object()) - __getitem__ = lambda x, i: x._get_current_object()[i] - __iter__ = lambda x: iter(x._get_current_object()) - __contains__ = lambda x, i: i in x._get_current_object() - __add__ = lambda x, o: x._get_current_object() + o - __sub__ = lambda x, o: x._get_current_object() - o - __mul__ = lambda x, o: x._get_current_object() * o - __floordiv__ = lambda x, o: x._get_current_object() // o - __mod__ = lambda x, o: x._get_current_object() % o - __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) - __pow__ = lambda x, o: x._get_current_object() ** o - __lshift__ = lambda x, o: x._get_current_object() << o - __rshift__ = lambda x, o: x._get_current_object() >> o - __and__ = lambda x, o: x._get_current_object() & o - __xor__ = lambda x, o: x._get_current_object() ^ o - __or__ = lambda x, o: x._get_current_object() | o - __div__ = lambda x, o: x._get_current_object().__div__(o) - __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) - __neg__ = lambda x: -(x._get_current_object()) - __pos__ = lambda x: +(x._get_current_object()) - __abs__ = lambda x: abs(x._get_current_object()) - __invert__ = lambda x: ~(x._get_current_object()) - __complex__ = lambda x: complex(x._get_current_object()) - __int__ = lambda x: int(x._get_current_object()) - __long__ = lambda x: long(x._get_current_object()) # noqa - __float__ = lambda x: float(x._get_current_object()) - __oct__ = lambda x: oct(x._get_current_object()) - __hex__ = lambda x: hex(x._get_current_object()) - __index__ = lambda x: x._get_current_object().__index__() - __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) - __enter__ = lambda x: x._get_current_object().__enter__() - __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) - __radd__ = lambda x, o: o + x._get_current_object() - __rsub__ = lambda x, o: o - x._get_current_object() - __rmul__ = lambda x, o: o * x._get_current_object() - __rdiv__ = lambda x, o: o / x._get_current_object() - if PY2: - __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o) - else: - __rtruediv__ = __rdiv__ - __rfloordiv__ = lambda x, o: o // x._get_current_object() - __rmod__ = lambda x, o: o % x._get_current_object() - __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) - __copy__ = lambda x: copy.copy(x._get_current_object()) - __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo) diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__init__.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/__init__.py deleted file mode 100644 index 5e049f5..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -""" -Middleware -========== - -A WSGI middleware is a WSGI application that wraps another application -in order to observe or change its behavior. Werkzeug provides some -middleware for common use cases. - -.. toctree:: - :maxdepth: 1 - - proxy_fix - shared_data - dispatcher - http_proxy - lint - profiler - -The :doc:`interactive debugger ` is also a middleware that can -be applied manually, although it is typically used automatically with -the :doc:`development server `. - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index e996790..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc deleted file mode 100644 index 815c471..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/dispatcher.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc deleted file mode 100644 index fd9247b..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/http_proxy.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/lint.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/lint.cpython-37.pyc deleted file mode 100644 index c66674c..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/lint.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/profiler.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/profiler.cpython-37.pyc deleted file mode 100644 index 9eee8c1..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/profiler.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc deleted file mode 100644 index da07793..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/proxy_fix.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc deleted file mode 100644 index 9f07e5e..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/middleware/__pycache__/shared_data.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/dispatcher.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/dispatcher.py deleted file mode 100644 index 2eb173e..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/dispatcher.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -Application Dispatcher -====================== - -This middleware creates a single WSGI application that dispatches to -multiple other WSGI applications mounted at different URL paths. - -A common example is writing a Single Page Application, where you have a -backend API and a frontend written in JavaScript that does the routing -in the browser rather than requesting different pages from the server. -The frontend is a single HTML and JS file that should be served for any -path besides "/api". - -This example dispatches to an API app under "/api", an admin app -under "/admin", and an app that serves frontend files for all other -requests:: - - app = DispatcherMiddleware(serve_frontend, { - '/api': api_app, - '/admin': admin_app, - }) - -In production, you might instead handle this at the HTTP server level, -serving files or proxying to application servers based on location. The -API and admin apps would each be deployed with a separate WSGI server, -and the static files would be served directly by the HTTP server. - -.. autoclass:: DispatcherMiddleware - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" - - -class DispatcherMiddleware(object): - """Combine multiple applications as a single WSGI application. - Requests are dispatched to an application based on the path it is - mounted under. - - :param app: The WSGI application to dispatch to if the request - doesn't match a mounted path. - :param mounts: Maps path prefixes to applications for dispatching. - """ - - def __init__(self, app, mounts=None): - self.app = app - self.mounts = mounts or {} - - def __call__(self, environ, start_response): - script = environ.get("PATH_INFO", "") - path_info = "" - - while "/" in script: - if script in self.mounts: - app = self.mounts[script] - break - - script, last_item = script.rsplit("/", 1) - path_info = "/%s%s" % (last_item, path_info) - else: - app = self.mounts.get(script, self.app) - - original_script_name = environ.get("SCRIPT_NAME", "") - environ["SCRIPT_NAME"] = original_script_name + script - environ["PATH_INFO"] = path_info - return app(environ, start_response) diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/http_proxy.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/http_proxy.py deleted file mode 100644 index bfdc071..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/http_proxy.py +++ /dev/null @@ -1,219 +0,0 @@ -""" -Basic HTTP Proxy -================ - -.. autoclass:: ProxyMiddleware - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" -import socket - -from ..datastructures import EnvironHeaders -from ..http import is_hop_by_hop_header -from ..urls import url_parse -from ..urls import url_quote -from ..wsgi import get_input_stream - -try: - from http import client -except ImportError: - import httplib as client - - -class ProxyMiddleware(object): - """Proxy requests under a path to an external server, routing other - requests to the app. - - This middleware can only proxy HTTP requests, as that is the only - protocol handled by the WSGI server. Other protocols, such as - websocket requests, cannot be proxied at this layer. This should - only be used for development, in production a real proxying server - should be used. - - The middleware takes a dict that maps a path prefix to a dict - describing the host to be proxied to:: - - app = ProxyMiddleware(app, { - "/static/": { - "target": "http://127.0.0.1:5001/", - } - }) - - Each host has the following options: - - ``target``: - The target URL to dispatch to. This is required. - ``remove_prefix``: - Whether to remove the prefix from the URL before dispatching it - to the target. The default is ``False``. - ``host``: - ``""`` (default): - The host header is automatically rewritten to the URL of the - target. - ``None``: - The host header is unmodified from the client request. - Any other value: - The host header is overwritten with the value. - ``headers``: - A dictionary of headers to be sent with the request to the - target. The default is ``{}``. - ``ssl_context``: - A :class:`ssl.SSLContext` defining how to verify requests if the - target is HTTPS. The default is ``None``. - - In the example above, everything under ``"/static/"`` is proxied to - the server on port 5001. The host header is rewritten to the target, - and the ``"/static/"`` prefix is removed from the URLs. - - :param app: The WSGI application to wrap. - :param targets: Proxy target configurations. See description above. - :param chunk_size: Size of chunks to read from input stream and - write to target. - :param timeout: Seconds before an operation to a target fails. - - .. versionadded:: 0.14 - """ - - def __init__(self, app, targets, chunk_size=2 << 13, timeout=10): - def _set_defaults(opts): - opts.setdefault("remove_prefix", False) - opts.setdefault("host", "") - opts.setdefault("headers", {}) - opts.setdefault("ssl_context", None) - return opts - - self.app = app - self.targets = dict( - ("/%s/" % k.strip("/"), _set_defaults(v)) for k, v in targets.items() - ) - self.chunk_size = chunk_size - self.timeout = timeout - - def proxy_to(self, opts, path, prefix): - target = url_parse(opts["target"]) - - def application(environ, start_response): - headers = list(EnvironHeaders(environ).items()) - headers[:] = [ - (k, v) - for k, v in headers - if not is_hop_by_hop_header(k) - and k.lower() not in ("content-length", "host") - ] - headers.append(("Connection", "close")) - - if opts["host"] == "": - headers.append(("Host", target.ascii_host)) - elif opts["host"] is None: - headers.append(("Host", environ["HTTP_HOST"])) - else: - headers.append(("Host", opts["host"])) - - headers.extend(opts["headers"].items()) - remote_path = path - - if opts["remove_prefix"]: - remote_path = "%s/%s" % ( - target.path.rstrip("/"), - remote_path[len(prefix) :].lstrip("/"), - ) - - content_length = environ.get("CONTENT_LENGTH") - chunked = False - - if content_length not in ("", None): - headers.append(("Content-Length", content_length)) - elif content_length is not None: - headers.append(("Transfer-Encoding", "chunked")) - chunked = True - - try: - if target.scheme == "http": - con = client.HTTPConnection( - target.ascii_host, target.port or 80, timeout=self.timeout - ) - elif target.scheme == "https": - con = client.HTTPSConnection( - target.ascii_host, - target.port or 443, - timeout=self.timeout, - context=opts["ssl_context"], - ) - else: - raise RuntimeError( - "Target scheme must be 'http' or 'https', got '{}'.".format( - target.scheme - ) - ) - - con.connect() - remote_url = url_quote(remote_path) - querystring = environ["QUERY_STRING"] - - if querystring: - remote_url = remote_url + "?" + querystring - - con.putrequest(environ["REQUEST_METHOD"], remote_url, skip_host=True) - - for k, v in headers: - if k.lower() == "connection": - v = "close" - - con.putheader(k, v) - - con.endheaders() - stream = get_input_stream(environ) - - while 1: - data = stream.read(self.chunk_size) - - if not data: - break - - if chunked: - con.send(b"%x\r\n%s\r\n" % (len(data), data)) - else: - con.send(data) - - resp = con.getresponse() - except socket.error: - from ..exceptions import BadGateway - - return BadGateway()(environ, start_response) - - start_response( - "%d %s" % (resp.status, resp.reason), - [ - (k.title(), v) - for k, v in resp.getheaders() - if not is_hop_by_hop_header(k) - ], - ) - - def read(): - while 1: - try: - data = resp.read(self.chunk_size) - except socket.error: - break - - if not data: - break - - yield data - - return read() - - return application - - def __call__(self, environ, start_response): - path = environ["PATH_INFO"] - app = self.app - - for prefix, opts in self.targets.items(): - if path.startswith(prefix): - app = self.proxy_to(opts, path, prefix) - break - - return app(environ, start_response) diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/lint.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/lint.py deleted file mode 100644 index 98f9581..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/lint.py +++ /dev/null @@ -1,408 +0,0 @@ -""" -WSGI Protocol Linter -==================== - -This module provides a middleware that performs sanity checks on the -behavior of the WSGI server and application. It checks that the -:pep:`3333` WSGI spec is properly implemented. It also warns on some -common HTTP errors such as non-empty responses for 304 status codes. - -.. autoclass:: LintMiddleware - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" -from warnings import warn - -from .._compat import implements_iterator -from .._compat import PY2 -from .._compat import string_types -from ..datastructures import Headers -from ..http import is_entity_header -from ..wsgi import FileWrapper - -try: - from urllib.parse import urlparse -except ImportError: - from urlparse import urlparse - - -class WSGIWarning(Warning): - """Warning class for WSGI warnings.""" - - -class HTTPWarning(Warning): - """Warning class for HTTP warnings.""" - - -def check_string(context, obj, stacklevel=3): - if type(obj) is not str: - warn( - "'%s' requires strings, got '%s'" % (context, type(obj).__name__), - WSGIWarning, - ) - - -class InputStream(object): - def __init__(self, stream): - self._stream = stream - - def read(self, *args): - if len(args) == 0: - warn( - "WSGI does not guarantee an EOF marker on the input stream, thus making" - " calls to 'wsgi.input.read()' unsafe. Conforming servers may never" - " return from this call.", - WSGIWarning, - stacklevel=2, - ) - elif len(args) != 1: - warn( - "Too many parameters passed to 'wsgi.input.read()'.", - WSGIWarning, - stacklevel=2, - ) - return self._stream.read(*args) - - def readline(self, *args): - if len(args) == 0: - warn( - "Calls to 'wsgi.input.readline()' without arguments are unsafe. Use" - " 'wsgi.input.read()' instead.", - WSGIWarning, - stacklevel=2, - ) - elif len(args) == 1: - warn( - "'wsgi.input.readline()' was called with a size hint. WSGI does not" - " support this, although it's available on all major servers.", - WSGIWarning, - stacklevel=2, - ) - else: - raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.") - return self._stream.readline(*args) - - def __iter__(self): - try: - return iter(self._stream) - except TypeError: - warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2) - return iter(()) - - def close(self): - warn("The application closed the input stream!", WSGIWarning, stacklevel=2) - self._stream.close() - - -class ErrorStream(object): - def __init__(self, stream): - self._stream = stream - - def write(self, s): - check_string("wsgi.error.write()", s) - self._stream.write(s) - - def flush(self): - self._stream.flush() - - def writelines(self, seq): - for line in seq: - self.write(line) - - def close(self): - warn("The application closed the error stream!", WSGIWarning, stacklevel=2) - self._stream.close() - - -class GuardedWrite(object): - def __init__(self, write, chunks): - self._write = write - self._chunks = chunks - - def __call__(self, s): - check_string("write()", s) - self._write.write(s) - self._chunks.append(len(s)) - - -@implements_iterator -class GuardedIterator(object): - def __init__(self, iterator, headers_set, chunks): - self._iterator = iterator - if PY2: - self._next = iter(iterator).next - else: - self._next = iter(iterator).__next__ - self.closed = False - self.headers_set = headers_set - self.chunks = chunks - - def __iter__(self): - return self - - def __next__(self): - if self.closed: - warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2) - - rv = self._next() - - if not self.headers_set: - warn( - "The application returned before it started the response.", - WSGIWarning, - stacklevel=2, - ) - - check_string("application iterator items", rv) - self.chunks.append(len(rv)) - return rv - - def close(self): - self.closed = True - - if hasattr(self._iterator, "close"): - self._iterator.close() - - if self.headers_set: - status_code, headers = self.headers_set - bytes_sent = sum(self.chunks) - content_length = headers.get("content-length", type=int) - - if status_code == 304: - for key, _value in headers: - key = key.lower() - if key not in ("expires", "content-location") and is_entity_header( - key - ): - warn( - "Entity header %r found in 304 response." % key, HTTPWarning - ) - if bytes_sent: - warn("304 responses must not have a body.", HTTPWarning) - elif 100 <= status_code < 200 or status_code == 204: - if content_length != 0: - warn( - "%r responses must have an empty content length." % status_code, - HTTPWarning, - ) - if bytes_sent: - warn( - "%r responses must not have a body." % status_code, HTTPWarning - ) - elif content_length is not None and content_length != bytes_sent: - warn( - "Content-Length and the number of bytes sent to the client do not" - " match.", - WSGIWarning, - ) - - def __del__(self): - if not self.closed: - try: - warn( - "Iterator was garbage collected before it was closed.", WSGIWarning - ) - except Exception: - pass - - -class LintMiddleware(object): - """Warns about common errors in the WSGI and HTTP behavior of the - server and wrapped application. Some of the issues it check are: - - - invalid status codes - - non-bytestrings sent to the WSGI server - - strings returned from the WSGI application - - non-empty conditional responses - - unquoted etags - - relative URLs in the Location header - - unsafe calls to wsgi.input - - unclosed iterators - - Error information is emitted using the :mod:`warnings` module. - - :param app: The WSGI application to wrap. - - .. code-block:: python - - from werkzeug.middleware.lint import LintMiddleware - app = LintMiddleware(app) - """ - - def __init__(self, app): - self.app = app - - def check_environ(self, environ): - if type(environ) is not dict: - warn( - "WSGI environment is not a standard Python dict.", - WSGIWarning, - stacklevel=4, - ) - for key in ( - "REQUEST_METHOD", - "SERVER_NAME", - "SERVER_PORT", - "wsgi.version", - "wsgi.input", - "wsgi.errors", - "wsgi.multithread", - "wsgi.multiprocess", - "wsgi.run_once", - ): - if key not in environ: - warn( - "Required environment key %r not found" % key, - WSGIWarning, - stacklevel=3, - ) - if environ["wsgi.version"] != (1, 0): - warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3) - - script_name = environ.get("SCRIPT_NAME", "") - path_info = environ.get("PATH_INFO", "") - - if script_name and script_name[0] != "/": - warn( - "'SCRIPT_NAME' does not start with a slash: %r" % script_name, - WSGIWarning, - stacklevel=3, - ) - - if path_info and path_info[0] != "/": - warn( - "'PATH_INFO' does not start with a slash: %r" % path_info, - WSGIWarning, - stacklevel=3, - ) - - def check_start_response(self, status, headers, exc_info): - check_string("status", status) - status_code = status.split(None, 1)[0] - - if len(status_code) != 3 or not status_code.isdigit(): - warn(WSGIWarning("Status code must be three digits"), stacklevel=3) - - if len(status) < 4 or status[3] != " ": - warn( - WSGIWarning( - "Invalid value for status %r. Valid " - "status strings are three digits, a space " - "and a status explanation" - ), - stacklevel=3, - ) - - status_code = int(status_code) - - if status_code < 100: - warn(WSGIWarning("status code < 100 detected"), stacklevel=3) - - if type(headers) is not list: - warn(WSGIWarning("header list is not a list"), stacklevel=3) - - for item in headers: - if type(item) is not tuple or len(item) != 2: - warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3) - name, value = item - if type(name) is not str or type(value) is not str: - warn(WSGIWarning("header items must be strings"), stacklevel=3) - if name.lower() == "status": - warn( - WSGIWarning( - "The status header is not supported due to " - "conflicts with the CGI spec." - ), - stacklevel=3, - ) - - if exc_info is not None and not isinstance(exc_info, tuple): - warn(WSGIWarning("invalid value for exc_info"), stacklevel=3) - - headers = Headers(headers) - self.check_headers(headers) - - return status_code, headers - - def check_headers(self, headers): - etag = headers.get("etag") - - if etag is not None: - if etag.startswith(("W/", "w/")): - if etag.startswith("w/"): - warn( - HTTPWarning("weak etag indicator should be upcase."), - stacklevel=4, - ) - - etag = etag[2:] - - if not (etag[:1] == etag[-1:] == '"'): - warn(HTTPWarning("unquoted etag emitted."), stacklevel=4) - - location = headers.get("location") - - if location is not None: - if not urlparse(location).netloc: - warn( - HTTPWarning("absolute URLs required for location header"), - stacklevel=4, - ) - - def check_iterator(self, app_iter): - if isinstance(app_iter, string_types): - warn( - "The application returned astring. The response will send one character" - " at a time to the client, which will kill performance. Return a list" - " or iterable instead.", - WSGIWarning, - stacklevel=3, - ) - - def __call__(self, *args, **kwargs): - if len(args) != 2: - warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2) - - if kwargs: - warn( - "A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2 - ) - - environ, start_response = args - - self.check_environ(environ) - environ["wsgi.input"] = InputStream(environ["wsgi.input"]) - environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"]) - - # Hook our own file wrapper in so that applications will always - # iterate to the end and we can check the content length. - environ["wsgi.file_wrapper"] = FileWrapper - - headers_set = [] - chunks = [] - - def checking_start_response(*args, **kwargs): - if len(args) not in (2, 3): - warn( - "Invalid number of arguments: %s, expected 2 or 3." % len(args), - WSGIWarning, - stacklevel=2, - ) - - if kwargs: - warn("'start_response' does not take keyword arguments.", WSGIWarning) - - status, headers = args[:2] - - if len(args) == 3: - exc_info = args[2] - else: - exc_info = None - - headers_set[:] = self.check_start_response(status, headers, exc_info) - return GuardedWrite(start_response(status, headers, exc_info), chunks) - - app_iter = self.app(environ, checking_start_response) - self.check_iterator(app_iter) - return GuardedIterator(app_iter, headers_set, chunks) diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/profiler.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/profiler.py deleted file mode 100644 index 32a14d9..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/profiler.py +++ /dev/null @@ -1,132 +0,0 @@ -""" -Application Profiler -==================== - -This module provides a middleware that profiles each request with the -:mod:`cProfile` module. This can help identify bottlenecks in your code -that may be slowing down your application. - -.. autoclass:: ProfilerMiddleware - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" -from __future__ import print_function - -import os.path -import sys -import time -from pstats import Stats - -try: - from cProfile import Profile -except ImportError: - from profile import Profile - - -class ProfilerMiddleware(object): - """Wrap a WSGI application and profile the execution of each - request. Responses are buffered so that timings are more exact. - - If ``stream`` is given, :class:`pstats.Stats` are written to it - after each request. If ``profile_dir`` is given, :mod:`cProfile` - data files are saved to that directory, one file per request. - - The filename can be customized by passing ``filename_format``. If - it is a string, it will be formatted using :meth:`str.format` with - the following fields available: - - - ``{method}`` - The request method; GET, POST, etc. - - ``{path}`` - The request path or 'root' should one not exist. - - ``{elapsed}`` - The elapsed time of the request. - - ``{time}`` - The time of the request. - - If it is a callable, it will be called with the WSGI ``environ`` - dict and should return a filename. - - :param app: The WSGI application to wrap. - :param stream: Write stats to this stream. Disable with ``None``. - :param sort_by: A tuple of columns to sort stats by. See - :meth:`pstats.Stats.sort_stats`. - :param restrictions: A tuple of restrictions to filter stats by. See - :meth:`pstats.Stats.print_stats`. - :param profile_dir: Save profile data files to this directory. - :param filename_format: Format string for profile data file names, - or a callable returning a name. See explanation above. - - .. code-block:: python - - from werkzeug.middleware.profiler import ProfilerMiddleware - app = ProfilerMiddleware(app) - - .. versionchanged:: 0.15 - Stats are written even if ``profile_dir`` is given, and can be - disable by passing ``stream=None``. - - .. versionadded:: 0.15 - Added ``filename_format``. - - .. versionadded:: 0.9 - Added ``restrictions`` and ``profile_dir``. - """ - - def __init__( - self, - app, - stream=sys.stdout, - sort_by=("time", "calls"), - restrictions=(), - profile_dir=None, - filename_format="{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof", - ): - self._app = app - self._stream = stream - self._sort_by = sort_by - self._restrictions = restrictions - self._profile_dir = profile_dir - self._filename_format = filename_format - - def __call__(self, environ, start_response): - response_body = [] - - def catching_start_response(status, headers, exc_info=None): - start_response(status, headers, exc_info) - return response_body.append - - def runapp(): - app_iter = self._app(environ, catching_start_response) - response_body.extend(app_iter) - - if hasattr(app_iter, "close"): - app_iter.close() - - profile = Profile() - start = time.time() - profile.runcall(runapp) - body = b"".join(response_body) - elapsed = time.time() - start - - if self._profile_dir is not None: - if callable(self._filename_format): - filename = self._filename_format(environ) - else: - filename = self._filename_format.format( - method=environ["REQUEST_METHOD"], - path=( - environ.get("PATH_INFO").strip("/").replace("/", ".") or "root" - ), - elapsed=elapsed * 1000.0, - time=time.time(), - ) - filename = os.path.join(self._profile_dir, filename) - profile.dump_stats(filename) - - if self._stream is not None: - stats = Stats(profile, stream=self._stream) - stats.sort_stats(*self._sort_by) - print("-" * 80, file=self._stream) - print("PATH: {!r}".format(environ.get("PATH_INFO", "")), file=self._stream) - stats.print_stats(*self._restrictions) - print("-" * 80 + "\n", file=self._stream) - - return [body] diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py deleted file mode 100644 index f393f61..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/proxy_fix.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -X-Forwarded-For Proxy Fix -========================= - -This module provides a middleware that adjusts the WSGI environ based on -``X-Forwarded-`` headers that proxies in front of an application may -set. - -When an application is running behind a proxy server, WSGI may see the -request as coming from that server rather than the real client. Proxies -set various headers to track where the request actually came from. - -This middleware should only be applied if the application is actually -behind such a proxy, and should be configured with the number of proxies -that are chained in front of it. Not all proxies set all the headers. -Since incoming headers can be faked, you must set how many proxies are -setting each header so the middleware knows what to trust. - -.. autoclass:: ProxyFix - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" -from werkzeug.http import parse_list_header - - -class ProxyFix(object): - """Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in - front of the application may set. - - - ``X-Forwarded-For`` sets ``REMOTE_ADDR``. - - ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``. - - ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and - ``SERVER_PORT``. - - ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``. - - ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``. - - You must tell the middleware how many proxies set each header so it - knows what values to trust. It is a security issue to trust values - that came from the client rather than a proxy. - - The original values of the headers are stored in the WSGI - environ as ``werkzeug.proxy_fix.orig``, a dict. - - :param app: The WSGI application to wrap. - :param x_for: Number of values to trust for ``X-Forwarded-For``. - :param x_proto: Number of values to trust for ``X-Forwarded-Proto``. - :param x_host: Number of values to trust for ``X-Forwarded-Host``. - :param x_port: Number of values to trust for ``X-Forwarded-Port``. - :param x_prefix: Number of values to trust for - ``X-Forwarded-Prefix``. - - .. code-block:: python - - from werkzeug.middleware.proxy_fix import ProxyFix - # App is behind one proxy that sets the -For and -Host headers. - app = ProxyFix(app, x_for=1, x_host=1) - - .. versionchanged:: 1.0 - Deprecated code has been removed: - - * The ``num_proxies`` argument and attribute. - * The ``get_remote_addr`` method. - * The environ keys ``orig_remote_addr``, - ``orig_wsgi_url_scheme``, and ``orig_http_host``. - - .. versionchanged:: 0.15 - All headers support multiple values. The ``num_proxies`` - argument is deprecated. Each header is configured with a - separate number of trusted proxies. - - .. versionchanged:: 0.15 - Original WSGI environ values are stored in the - ``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``, - ``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated - and will be removed in 1.0. - - .. versionchanged:: 0.15 - Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``. - - .. versionchanged:: 0.15 - ``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify - ``SERVER_NAME`` and ``SERVER_PORT``. - """ - - def __init__(self, app, x_for=1, x_proto=1, x_host=0, x_port=0, x_prefix=0): - self.app = app - self.x_for = x_for - self.x_proto = x_proto - self.x_host = x_host - self.x_port = x_port - self.x_prefix = x_prefix - - def _get_real_value(self, trusted, value): - """Get the real value from a list header based on the configured - number of trusted proxies. - - :param trusted: Number of values to trust in the header. - :param value: Comma separated list header value to parse. - :return: The real value, or ``None`` if there are fewer values - than the number of trusted proxies. - - .. versionchanged:: 1.0 - Renamed from ``_get_trusted_comma``. - - .. versionadded:: 0.15 - """ - if not (trusted and value): - return - values = parse_list_header(value) - if len(values) >= trusted: - return values[-trusted] - - def __call__(self, environ, start_response): - """Modify the WSGI environ based on the various ``Forwarded`` - headers before calling the wrapped application. Store the - original environ values in ``werkzeug.proxy_fix.orig_{key}``. - """ - environ_get = environ.get - orig_remote_addr = environ_get("REMOTE_ADDR") - orig_wsgi_url_scheme = environ_get("wsgi.url_scheme") - orig_http_host = environ_get("HTTP_HOST") - environ.update( - { - "werkzeug.proxy_fix.orig": { - "REMOTE_ADDR": orig_remote_addr, - "wsgi.url_scheme": orig_wsgi_url_scheme, - "HTTP_HOST": orig_http_host, - "SERVER_NAME": environ_get("SERVER_NAME"), - "SERVER_PORT": environ_get("SERVER_PORT"), - "SCRIPT_NAME": environ_get("SCRIPT_NAME"), - } - } - ) - - x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR")) - if x_for: - environ["REMOTE_ADDR"] = x_for - - x_proto = self._get_real_value( - self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO") - ) - if x_proto: - environ["wsgi.url_scheme"] = x_proto - - x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST")) - if x_host: - environ["HTTP_HOST"] = x_host - parts = x_host.split(":", 1) - environ["SERVER_NAME"] = parts[0] - if len(parts) == 2: - environ["SERVER_PORT"] = parts[1] - - x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT")) - if x_port: - host = environ.get("HTTP_HOST") - if host: - parts = host.split(":", 1) - host = parts[0] if len(parts) == 2 else host - environ["HTTP_HOST"] = "%s:%s" % (host, x_port) - environ["SERVER_PORT"] = x_port - - x_prefix = self._get_real_value( - self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX") - ) - if x_prefix: - environ["SCRIPT_NAME"] = x_prefix - - return self.app(environ, start_response) diff --git a/venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py b/venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py deleted file mode 100644 index ab4ff0f..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/middleware/shared_data.py +++ /dev/null @@ -1,293 +0,0 @@ -""" -Serve Shared Static Files -========================= - -.. autoclass:: SharedDataMiddleware - :members: is_allowed - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" -import mimetypes -import os -import pkgutil -import posixpath -from datetime import datetime -from io import BytesIO -from time import mktime -from time import time -from zlib import adler32 - -from .._compat import PY2 -from .._compat import string_types -from ..filesystem import get_filesystem_encoding -from ..http import http_date -from ..http import is_resource_modified -from ..security import safe_join -from ..utils import get_content_type -from ..wsgi import get_path_info -from ..wsgi import wrap_file - - -class SharedDataMiddleware(object): - - """A WSGI middleware that provides static content for development - environments or simple server setups. Usage is quite simple:: - - import os - from werkzeug.wsgi import SharedDataMiddleware - - app = SharedDataMiddleware(app, { - '/static': os.path.join(os.path.dirname(__file__), 'static') - }) - - The contents of the folder ``./shared`` will now be available on - ``http://example.com/shared/``. This is pretty useful during development - because a standalone media server is not required. One can also mount - files on the root folder and still continue to use the application because - the shared data middleware forwards all unhandled requests to the - application, even if the requests are below one of the shared folders. - - If `pkg_resources` is available you can also tell the middleware to serve - files from package data:: - - app = SharedDataMiddleware(app, { - '/static': ('myapplication', 'static') - }) - - This will then serve the ``static`` folder in the `myapplication` - Python package. - - The optional `disallow` parameter can be a list of :func:`~fnmatch.fnmatch` - rules for files that are not accessible from the web. If `cache` is set to - `False` no caching headers are sent. - - Currently the middleware does not support non ASCII filenames. If the - encoding on the file system happens to be the encoding of the URI it may - work but this could also be by accident. We strongly suggest using ASCII - only file names for static files. - - The middleware will guess the mimetype using the Python `mimetype` - module. If it's unable to figure out the charset it will fall back - to `fallback_mimetype`. - - :param app: the application to wrap. If you don't want to wrap an - application you can pass it :exc:`NotFound`. - :param exports: a list or dict of exported files and folders. - :param disallow: a list of :func:`~fnmatch.fnmatch` rules. - :param cache: enable or disable caching headers. - :param cache_timeout: the cache timeout in seconds for the headers. - :param fallback_mimetype: The fallback mimetype for unknown files. - - .. versionchanged:: 1.0 - The default ``fallback_mimetype`` is - ``application/octet-stream``. If a filename looks like a text - mimetype, the ``utf-8`` charset is added to it. - - .. versionadded:: 0.6 - Added ``fallback_mimetype``. - - .. versionchanged:: 0.5 - Added ``cache_timeout``. - """ - - def __init__( - self, - app, - exports, - disallow=None, - cache=True, - cache_timeout=60 * 60 * 12, - fallback_mimetype="application/octet-stream", - ): - self.app = app - self.exports = [] - self.cache = cache - self.cache_timeout = cache_timeout - - if hasattr(exports, "items"): - exports = exports.items() - - for key, value in exports: - if isinstance(value, tuple): - loader = self.get_package_loader(*value) - elif isinstance(value, string_types): - if os.path.isfile(value): - loader = self.get_file_loader(value) - else: - loader = self.get_directory_loader(value) - else: - raise TypeError("unknown def %r" % value) - - self.exports.append((key, loader)) - - if disallow is not None: - from fnmatch import fnmatch - - self.is_allowed = lambda x: not fnmatch(x, disallow) - - self.fallback_mimetype = fallback_mimetype - - def is_allowed(self, filename): - """Subclasses can override this method to disallow the access to - certain files. However by providing `disallow` in the constructor - this method is overwritten. - """ - return True - - def _opener(self, filename): - return lambda: ( - open(filename, "rb"), - datetime.utcfromtimestamp(os.path.getmtime(filename)), - int(os.path.getsize(filename)), - ) - - def get_file_loader(self, filename): - return lambda x: (os.path.basename(filename), self._opener(filename)) - - def get_package_loader(self, package, package_path): - loadtime = datetime.utcnow() - provider = pkgutil.get_loader(package) - - if hasattr(provider, "get_resource_reader"): - # Python 3 - reader = provider.get_resource_reader(package) - - def loader(path): - if path is None: - return None, None - - path = safe_join(package_path, path) - basename = posixpath.basename(path) - - try: - resource = reader.open_resource(path) - except IOError: - return None, None - - if isinstance(resource, BytesIO): - return ( - basename, - lambda: (resource, loadtime, len(resource.getvalue())), - ) - - return ( - basename, - lambda: ( - resource, - datetime.utcfromtimestamp(os.path.getmtime(resource.name)), - os.path.getsize(resource.name), - ), - ) - - else: - # Python 2 - package_filename = provider.get_filename(package) - is_filesystem = os.path.exists(package_filename) - root = os.path.join(os.path.dirname(package_filename), package_path) - - def loader(path): - if path is None: - return None, None - - path = safe_join(root, path) - basename = posixpath.basename(path) - - if is_filesystem: - if not os.path.isfile(path): - return None, None - - return basename, self._opener(path) - - try: - data = provider.get_data(path) - except IOError: - return None, None - - return basename, lambda: (BytesIO(data), loadtime, len(data)) - - return loader - - def get_directory_loader(self, directory): - def loader(path): - if path is not None: - path = safe_join(directory, path) - else: - path = directory - - if os.path.isfile(path): - return os.path.basename(path), self._opener(path) - - return None, None - - return loader - - def generate_etag(self, mtime, file_size, real_filename): - if not isinstance(real_filename, bytes): - real_filename = real_filename.encode(get_filesystem_encoding()) - - return "wzsdm-%d-%s-%s" % ( - mktime(mtime.timetuple()), - file_size, - adler32(real_filename) & 0xFFFFFFFF, - ) - - def __call__(self, environ, start_response): - path = get_path_info(environ) - - if PY2: - path = path.encode(get_filesystem_encoding()) - - file_loader = None - - for search_path, loader in self.exports: - if search_path == path: - real_filename, file_loader = loader(None) - - if file_loader is not None: - break - - if not search_path.endswith("/"): - search_path += "/" - - if path.startswith(search_path): - real_filename, file_loader = loader(path[len(search_path) :]) - - if file_loader is not None: - break - - if file_loader is None or not self.is_allowed(real_filename): - return self.app(environ, start_response) - - guessed_type = mimetypes.guess_type(real_filename) - mime_type = get_content_type(guessed_type[0] or self.fallback_mimetype, "utf-8") - f, mtime, file_size = file_loader() - - headers = [("Date", http_date())] - - if self.cache: - timeout = self.cache_timeout - etag = self.generate_etag(mtime, file_size, real_filename) - headers += [ - ("Etag", '"%s"' % etag), - ("Cache-Control", "max-age=%d, public" % timeout), - ] - - if not is_resource_modified(environ, etag, last_modified=mtime): - f.close() - start_response("304 Not Modified", headers) - return [] - - headers.append(("Expires", http_date(time() + timeout))) - else: - headers.append(("Cache-Control", "public")) - - headers.extend( - ( - ("Content-Type", mime_type), - ("Content-Length", str(file_size)), - ("Last-Modified", http_date(mtime)), - ) - ) - start_response("200 OK", headers) - return wrap_file(environ, f) diff --git a/venv/lib/python3.7/site-packages/werkzeug/posixemulation.py b/venv/lib/python3.7/site-packages/werkzeug/posixemulation.py deleted file mode 100644 index 696b456..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/posixemulation.py +++ /dev/null @@ -1,117 +0,0 @@ -# -*- coding: utf-8 -*- -r""" - werkzeug.posixemulation - ~~~~~~~~~~~~~~~~~~~~~~~ - - Provides a POSIX emulation for some features that are relevant to - web applications. The main purpose is to simplify support for - systems such as Windows NT that are not 100% POSIX compatible. - - Currently this only implements a :func:`rename` function that - follows POSIX semantics. Eg: if the target file already exists it - will be replaced without asking. - - This module was introduced in 0.6.1 and is not a public interface. - It might become one in later versions of Werkzeug. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import errno -import os -import random -import sys -import time - -from ._compat import to_unicode -from .filesystem import get_filesystem_encoding - -can_rename_open_file = False - -if os.name == "nt": - try: - import ctypes - - _MOVEFILE_REPLACE_EXISTING = 0x1 - _MOVEFILE_WRITE_THROUGH = 0x8 - _MoveFileEx = ctypes.windll.kernel32.MoveFileExW - - def _rename(src, dst): - src = to_unicode(src, get_filesystem_encoding()) - dst = to_unicode(dst, get_filesystem_encoding()) - if _rename_atomic(src, dst): - return True - retry = 0 - rv = False - while not rv and retry < 100: - rv = _MoveFileEx( - src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH - ) - if not rv: - time.sleep(0.001) - retry += 1 - return rv - - # new in Vista and Windows Server 2008 - _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction - _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction - _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW - _CloseHandle = ctypes.windll.kernel32.CloseHandle - can_rename_open_file = True - - def _rename_atomic(src, dst): - ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, "Werkzeug rename") - if ta == -1: - return False - try: - retry = 0 - rv = False - while not rv and retry < 100: - rv = _MoveFileTransacted( - src, - dst, - None, - None, - _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH, - ta, - ) - if rv: - rv = _CommitTransaction(ta) - break - else: - time.sleep(0.001) - retry += 1 - return rv - finally: - _CloseHandle(ta) - - except Exception: - - def _rename(src, dst): - return False - - def _rename_atomic(src, dst): - return False - - def rename(src, dst): - # Try atomic or pseudo-atomic rename - if _rename(src, dst): - return - # Fall back to "move away and replace" - try: - os.rename(src, dst) - except OSError as e: - if e.errno != errno.EEXIST: - raise - old = "%s-%08x" % (dst, random.randint(0, sys.maxsize)) - os.rename(dst, old) - os.rename(src, dst) - try: - os.unlink(old) - except Exception: - pass - - -else: - rename = os.rename - can_rename_open_file = True diff --git a/venv/lib/python3.7/site-packages/werkzeug/routing.py b/venv/lib/python3.7/site-packages/werkzeug/routing.py deleted file mode 100644 index 8fa3c60..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/routing.py +++ /dev/null @@ -1,2210 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.routing - ~~~~~~~~~~~~~~~~ - - When it comes to combining multiple controller or view functions (however - you want to call them) you need a dispatcher. A simple way would be - applying regular expression tests on the ``PATH_INFO`` and calling - registered callback functions that return the value then. - - This module implements a much more powerful system than simple regular - expression matching because it can also convert values in the URLs and - build URLs. - - Here a simple example that creates an URL map for an application with - two subdomains (www and kb) and some URL rules: - - >>> m = Map([ - ... # Static URLs - ... Rule('/', endpoint='static/index'), - ... Rule('/about', endpoint='static/about'), - ... Rule('/help', endpoint='static/help'), - ... # Knowledge Base - ... Subdomain('kb', [ - ... Rule('/', endpoint='kb/index'), - ... Rule('/browse/', endpoint='kb/browse'), - ... Rule('/browse//', endpoint='kb/browse'), - ... Rule('/browse//', endpoint='kb/browse') - ... ]) - ... ], default_subdomain='www') - - If the application doesn't use subdomains it's perfectly fine to not set - the default subdomain and not use the `Subdomain` rule factory. The endpoint - in the rules can be anything, for example import paths or unique - identifiers. The WSGI application can use those endpoints to get the - handler for that URL. It doesn't have to be a string at all but it's - recommended. - - Now it's possible to create a URL adapter for one of the subdomains and - build URLs: - - >>> c = m.bind('example.com') - >>> c.build("kb/browse", dict(id=42)) - 'http://kb.example.com/browse/42/' - >>> c.build("kb/browse", dict()) - 'http://kb.example.com/browse/' - >>> c.build("kb/browse", dict(id=42, page=3)) - 'http://kb.example.com/browse/42/3' - >>> c.build("static/about") - '/about' - >>> c.build("static/index", force_external=True) - 'http://www.example.com/' - - >>> c = m.bind('example.com', subdomain='kb') - >>> c.build("static/about") - 'http://www.example.com/about' - - The first argument to bind is the server name *without* the subdomain. - Per default it will assume that the script is mounted on the root, but - often that's not the case so you can provide the real mount point as - second argument: - - >>> c = m.bind('example.com', '/applications/example') - - The third argument can be the subdomain, if not given the default - subdomain is used. For more details about binding have a look at the - documentation of the `MapAdapter`. - - And here is how you can match URLs: - - >>> c = m.bind('example.com') - >>> c.match("/") - ('static/index', {}) - >>> c.match("/about") - ('static/about', {}) - >>> c = m.bind('example.com', '/', 'kb') - >>> c.match("/") - ('kb/index', {}) - >>> c.match("/browse/42/23") - ('kb/browse', {'id': 42, 'page': 23}) - - If matching fails you get a `NotFound` exception, if the rule thinks - it's a good idea to redirect (for example because the URL was defined - to have a slash at the end but the request was missing that slash) it - will raise a `RequestRedirect` exception. Both are subclasses of the - `HTTPException` so you can use those errors as responses in the - application. - - If matching succeeded but the URL rule was incompatible to the given - method (for example there were only rules for `GET` and `HEAD` and - routing system tried to match a `POST` request) a `MethodNotAllowed` - exception is raised. - - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import ast -import difflib -import posixpath -import re -import uuid -import warnings -from pprint import pformat -from threading import Lock - -from ._compat import implements_to_string -from ._compat import iteritems -from ._compat import itervalues -from ._compat import native_string_result -from ._compat import string_types -from ._compat import text_type -from ._compat import to_bytes -from ._compat import to_unicode -from ._compat import wsgi_decoding_dance -from ._internal import _encode_idna -from ._internal import _get_environ -from .datastructures import ImmutableDict -from .datastructures import MultiDict -from .exceptions import BadHost -from .exceptions import BadRequest -from .exceptions import HTTPException -from .exceptions import MethodNotAllowed -from .exceptions import NotFound -from .urls import _fast_url_quote -from .urls import url_encode -from .urls import url_join -from .urls import url_quote -from .utils import cached_property -from .utils import format_string -from .utils import redirect -from .wsgi import get_host - -_rule_re = re.compile( - r""" - (?P[^<]*) # static rule data - < - (?: - (?P[a-zA-Z_][a-zA-Z0-9_]*) # converter name - (?:\((?P.*?)\))? # converter arguments - \: # variable delimiter - )? - (?P[a-zA-Z_][a-zA-Z0-9_]*) # variable name - > - """, - re.VERBOSE, -) -_simple_rule_re = re.compile(r"<([^>]+)>") -_converter_args_re = re.compile( - r""" - ((?P\w+)\s*=\s*)? - (?P - True|False| - \d+.\d+| - \d+.| - \d+| - [\w\d_.]+| - [urUR]?(?P"[^"]*?"|'[^']*') - )\s*, - """, - re.VERBOSE | re.UNICODE, -) - - -_PYTHON_CONSTANTS = {"None": None, "True": True, "False": False} - - -def _pythonize(value): - if value in _PYTHON_CONSTANTS: - return _PYTHON_CONSTANTS[value] - for convert in int, float: - try: - return convert(value) - except ValueError: - pass - if value[:1] == value[-1:] and value[0] in "\"'": - value = value[1:-1] - return text_type(value) - - -def parse_converter_args(argstr): - argstr += "," - args = [] - kwargs = {} - - for item in _converter_args_re.finditer(argstr): - value = item.group("stringval") - if value is None: - value = item.group("value") - value = _pythonize(value) - if not item.group("name"): - args.append(value) - else: - name = item.group("name") - kwargs[name] = value - - return tuple(args), kwargs - - -def parse_rule(rule): - """Parse a rule and return it as generator. Each iteration yields tuples - in the form ``(converter, arguments, variable)``. If the converter is - `None` it's a static url part, otherwise it's a dynamic one. - - :internal: - """ - pos = 0 - end = len(rule) - do_match = _rule_re.match - used_names = set() - while pos < end: - m = do_match(rule, pos) - if m is None: - break - data = m.groupdict() - if data["static"]: - yield None, None, data["static"] - variable = data["variable"] - converter = data["converter"] or "default" - if variable in used_names: - raise ValueError("variable name %r used twice." % variable) - used_names.add(variable) - yield converter, data["args"] or None, variable - pos = m.end() - if pos < end: - remaining = rule[pos:] - if ">" in remaining or "<" in remaining: - raise ValueError("malformed url rule: %r" % rule) - yield None, None, remaining - - -class RoutingException(Exception): - """Special exceptions that require the application to redirect, notifying - about missing urls, etc. - - :internal: - """ - - -class RequestRedirect(HTTPException, RoutingException): - """Raise if the map requests a redirect. This is for example the case if - `strict_slashes` are activated and an url that requires a trailing slash. - - The attribute `new_url` contains the absolute destination url. - """ - - code = 308 - - def __init__(self, new_url): - RoutingException.__init__(self, new_url) - self.new_url = new_url - - def get_response(self, environ): - return redirect(self.new_url, self.code) - - -class RequestPath(RoutingException): - """Internal exception.""" - - __slots__ = ("path_info",) - - def __init__(self, path_info): - self.path_info = path_info - - -class RequestAliasRedirect(RoutingException): # noqa: B903 - """This rule is an alias and wants to redirect to the canonical URL.""" - - def __init__(self, matched_values): - self.matched_values = matched_values - - -@implements_to_string -class BuildError(RoutingException, LookupError): - """Raised if the build system cannot find a URL for an endpoint with the - values provided. - """ - - def __init__(self, endpoint, values, method, adapter=None): - LookupError.__init__(self, endpoint, values, method) - self.endpoint = endpoint - self.values = values - self.method = method - self.adapter = adapter - - @cached_property - def suggested(self): - return self.closest_rule(self.adapter) - - def closest_rule(self, adapter): - def _score_rule(rule): - return sum( - [ - 0.98 - * difflib.SequenceMatcher( - None, rule.endpoint, self.endpoint - ).ratio(), - 0.01 * bool(set(self.values or ()).issubset(rule.arguments)), - 0.01 * bool(rule.methods and self.method in rule.methods), - ] - ) - - if adapter and adapter.map._rules: - return max(adapter.map._rules, key=_score_rule) - - def __str__(self): - message = [] - message.append("Could not build url for endpoint %r" % self.endpoint) - if self.method: - message.append(" (%r)" % self.method) - if self.values: - message.append(" with values %r" % sorted(self.values.keys())) - message.append(".") - if self.suggested: - if self.endpoint == self.suggested.endpoint: - if self.method and self.method not in self.suggested.methods: - message.append( - " Did you mean to use methods %r?" - % sorted(self.suggested.methods) - ) - missing_values = self.suggested.arguments.union( - set(self.suggested.defaults or ()) - ) - set(self.values.keys()) - if missing_values: - message.append( - " Did you forget to specify values %r?" % sorted(missing_values) - ) - else: - message.append(" Did you mean %r instead?" % self.suggested.endpoint) - return u"".join(message) - - -class WebsocketMismatch(BadRequest): - """The only matched rule is either a WebSocket and the request is - HTTP, or the rule is HTTP and the request is a WebSocket. - """ - - -class ValidationError(ValueError): - """Validation error. If a rule converter raises this exception the rule - does not match the current URL and the next URL is tried. - """ - - -class RuleFactory(object): - """As soon as you have more complex URL setups it's a good idea to use rule - factories to avoid repetitive tasks. Some of them are builtin, others can - be added by subclassing `RuleFactory` and overriding `get_rules`. - """ - - def get_rules(self, map): - """Subclasses of `RuleFactory` have to override this method and return - an iterable of rules.""" - raise NotImplementedError() - - -class Subdomain(RuleFactory): - """All URLs provided by this factory have the subdomain set to a - specific domain. For example if you want to use the subdomain for - the current language this can be a good setup:: - - url_map = Map([ - Rule('/', endpoint='#select_language'), - Subdomain('', [ - Rule('/', endpoint='index'), - Rule('/about', endpoint='about'), - Rule('/help', endpoint='help') - ]) - ]) - - All the rules except for the ``'#select_language'`` endpoint will now - listen on a two letter long subdomain that holds the language code - for the current request. - """ - - def __init__(self, subdomain, rules): - self.subdomain = subdomain - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.subdomain = self.subdomain - yield rule - - -class Submount(RuleFactory): - """Like `Subdomain` but prefixes the URL rule with a given string:: - - url_map = Map([ - Rule('/', endpoint='index'), - Submount('/blog', [ - Rule('/', endpoint='blog/index'), - Rule('/entry/', endpoint='blog/show') - ]) - ]) - - Now the rule ``'blog/show'`` matches ``/blog/entry/``. - """ - - def __init__(self, path, rules): - self.path = path.rstrip("/") - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.rule = self.path + rule.rule - yield rule - - -class EndpointPrefix(RuleFactory): - """Prefixes all endpoints (which must be strings for this factory) with - another string. This can be useful for sub applications:: - - url_map = Map([ - Rule('/', endpoint='index'), - EndpointPrefix('blog/', [Submount('/blog', [ - Rule('/', endpoint='index'), - Rule('/entry/', endpoint='show') - ])]) - ]) - """ - - def __init__(self, prefix, rules): - self.prefix = prefix - self.rules = rules - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - rule = rule.empty() - rule.endpoint = self.prefix + rule.endpoint - yield rule - - -class RuleTemplate(object): - """Returns copies of the rules wrapped and expands string templates in - the endpoint, rule, defaults or subdomain sections. - - Here a small example for such a rule template:: - - from werkzeug.routing import Map, Rule, RuleTemplate - - resource = RuleTemplate([ - Rule('/$name/', endpoint='$name.list'), - Rule('/$name/', endpoint='$name.show') - ]) - - url_map = Map([resource(name='user'), resource(name='page')]) - - When a rule template is called the keyword arguments are used to - replace the placeholders in all the string parameters. - """ - - def __init__(self, rules): - self.rules = list(rules) - - def __call__(self, *args, **kwargs): - return RuleTemplateFactory(self.rules, dict(*args, **kwargs)) - - -class RuleTemplateFactory(RuleFactory): - """A factory that fills in template variables into rules. Used by - `RuleTemplate` internally. - - :internal: - """ - - def __init__(self, rules, context): - self.rules = rules - self.context = context - - def get_rules(self, map): - for rulefactory in self.rules: - for rule in rulefactory.get_rules(map): - new_defaults = subdomain = None - if rule.defaults: - new_defaults = {} - for key, value in iteritems(rule.defaults): - if isinstance(value, string_types): - value = format_string(value, self.context) - new_defaults[key] = value - if rule.subdomain is not None: - subdomain = format_string(rule.subdomain, self.context) - new_endpoint = rule.endpoint - if isinstance(new_endpoint, string_types): - new_endpoint = format_string(new_endpoint, self.context) - yield Rule( - format_string(rule.rule, self.context), - new_defaults, - subdomain, - rule.methods, - rule.build_only, - new_endpoint, - rule.strict_slashes, - ) - - -def _prefix_names(src): - """ast parse and prefix names with `.` to avoid collision with user vars""" - tree = ast.parse(src).body[0] - if isinstance(tree, ast.Expr): - tree = tree.value - for node in ast.walk(tree): - if isinstance(node, ast.Name): - node.id = "." + node.id - return tree - - -_CALL_CONVERTER_CODE_FMT = "self._converters[{elem!r}].to_url()" -_IF_KWARGS_URL_ENCODE_CODE = """\ -if kwargs: - q = '?' - params = self._encode_query_vars(kwargs) -else: - q = params = '' -""" -_IF_KWARGS_URL_ENCODE_AST = _prefix_names(_IF_KWARGS_URL_ENCODE_CODE) -_URL_ENCODE_AST_NAMES = (_prefix_names("q"), _prefix_names("params")) - - -@implements_to_string -class Rule(RuleFactory): - """A Rule represents one URL pattern. There are some options for `Rule` - that change the way it behaves and are passed to the `Rule` constructor. - Note that besides the rule-string all arguments *must* be keyword arguments - in order to not break the application on Werkzeug upgrades. - - `string` - Rule strings basically are just normal URL paths with placeholders in - the format ```` where the converter and the - arguments are optional. If no converter is defined the `default` - converter is used which means `string` in the normal configuration. - - URL rules that end with a slash are branch URLs, others are leaves. - If you have `strict_slashes` enabled (which is the default), all - branch URLs that are matched without a trailing slash will trigger a - redirect to the same URL with the missing slash appended. - - The converters are defined on the `Map`. - - `endpoint` - The endpoint for this rule. This can be anything. A reference to a - function, a string, a number etc. The preferred way is using a string - because the endpoint is used for URL generation. - - `defaults` - An optional dict with defaults for other rules with the same endpoint. - This is a bit tricky but useful if you want to have unique URLs:: - - url_map = Map([ - Rule('/all/', defaults={'page': 1}, endpoint='all_entries'), - Rule('/all/page/', endpoint='all_entries') - ]) - - If a user now visits ``http://example.com/all/page/1`` he will be - redirected to ``http://example.com/all/``. If `redirect_defaults` is - disabled on the `Map` instance this will only affect the URL - generation. - - `subdomain` - The subdomain rule string for this rule. If not specified the rule - only matches for the `default_subdomain` of the map. If the map is - not bound to a subdomain this feature is disabled. - - Can be useful if you want to have user profiles on different subdomains - and all subdomains are forwarded to your application:: - - url_map = Map([ - Rule('/', subdomain='', endpoint='user/homepage'), - Rule('/stats', subdomain='', endpoint='user/stats') - ]) - - `methods` - A sequence of http methods this rule applies to. If not specified, all - methods are allowed. For example this can be useful if you want different - endpoints for `POST` and `GET`. If methods are defined and the path - matches but the method matched against is not in this list or in the - list of another rule for that path the error raised is of the type - `MethodNotAllowed` rather than `NotFound`. If `GET` is present in the - list of methods and `HEAD` is not, `HEAD` is added automatically. - - `strict_slashes` - Override the `Map` setting for `strict_slashes` only for this rule. If - not specified the `Map` setting is used. - - `merge_slashes` - Override :attr:`Map.merge_slashes` for this rule. - - `build_only` - Set this to True and the rule will never match but will create a URL - that can be build. This is useful if you have resources on a subdomain - or folder that are not handled by the WSGI application (like static data) - - `redirect_to` - If given this must be either a string or callable. In case of a - callable it's called with the url adapter that triggered the match and - the values of the URL as keyword arguments and has to return the target - for the redirect, otherwise it has to be a string with placeholders in - rule syntax:: - - def foo_with_slug(adapter, id): - # ask the database for the slug for the old id. this of - # course has nothing to do with werkzeug. - return 'foo/' + Foo.get_slug_for_id(id) - - url_map = Map([ - Rule('/foo/', endpoint='foo'), - Rule('/some/old/url/', redirect_to='foo/'), - Rule('/other/old/url/', redirect_to=foo_with_slug) - ]) - - When the rule is matched the routing system will raise a - `RequestRedirect` exception with the target for the redirect. - - Keep in mind that the URL will be joined against the URL root of the - script so don't use a leading slash on the target URL unless you - really mean root of that domain. - - `alias` - If enabled this rule serves as an alias for another rule with the same - endpoint and arguments. - - `host` - If provided and the URL map has host matching enabled this can be - used to provide a match rule for the whole host. This also means - that the subdomain feature is disabled. - - `websocket` - If ``True``, this rule is only matches for WebSocket (``ws://``, - ``wss://``) requests. By default, rules will only match for HTTP - requests. - - .. versionadded:: 1.0 - Added ``websocket``. - - .. versionadded:: 1.0 - Added ``merge_slashes``. - - .. versionadded:: 0.7 - Added ``alias`` and ``host``. - - .. versionchanged:: 0.6.1 - ``HEAD`` is added to ``methods`` if ``GET`` is present. - """ - - def __init__( - self, - string, - defaults=None, - subdomain=None, - methods=None, - build_only=False, - endpoint=None, - strict_slashes=None, - merge_slashes=None, - redirect_to=None, - alias=False, - host=None, - websocket=False, - ): - if not string.startswith("/"): - raise ValueError("urls must start with a leading slash") - self.rule = string - self.is_leaf = not string.endswith("/") - - self.map = None - self.strict_slashes = strict_slashes - self.merge_slashes = merge_slashes - self.subdomain = subdomain - self.host = host - self.defaults = defaults - self.build_only = build_only - self.alias = alias - self.websocket = websocket - - if methods is not None: - if isinstance(methods, str): - raise TypeError("'methods' should be a list of strings.") - - methods = {x.upper() for x in methods} - - if "HEAD" not in methods and "GET" in methods: - methods.add("HEAD") - - if websocket and methods - {"GET", "HEAD", "OPTIONS"}: - raise ValueError( - "WebSocket rules can only use 'GET', 'HEAD', and 'OPTIONS' methods." - ) - - self.methods = methods - self.endpoint = endpoint - self.redirect_to = redirect_to - - if defaults: - self.arguments = set(map(str, defaults)) - else: - self.arguments = set() - self._trace = self._converters = self._regex = self._argument_weights = None - - def empty(self): - """ - Return an unbound copy of this rule. - - This can be useful if want to reuse an already bound URL for another - map. See ``get_empty_kwargs`` to override what keyword arguments are - provided to the new copy. - """ - return type(self)(self.rule, **self.get_empty_kwargs()) - - def get_empty_kwargs(self): - """ - Provides kwargs for instantiating empty copy with empty() - - Use this method to provide custom keyword arguments to the subclass of - ``Rule`` when calling ``some_rule.empty()``. Helpful when the subclass - has custom keyword arguments that are needed at instantiation. - - Must return a ``dict`` that will be provided as kwargs to the new - instance of ``Rule``, following the initial ``self.rule`` value which - is always provided as the first, required positional argument. - """ - defaults = None - if self.defaults: - defaults = dict(self.defaults) - return dict( - defaults=defaults, - subdomain=self.subdomain, - methods=self.methods, - build_only=self.build_only, - endpoint=self.endpoint, - strict_slashes=self.strict_slashes, - redirect_to=self.redirect_to, - alias=self.alias, - host=self.host, - ) - - def get_rules(self, map): - yield self - - def refresh(self): - """Rebinds and refreshes the URL. Call this if you modified the - rule in place. - - :internal: - """ - self.bind(self.map, rebind=True) - - def bind(self, map, rebind=False): - """Bind the url to a map and create a regular expression based on - the information from the rule itself and the defaults from the map. - - :internal: - """ - if self.map is not None and not rebind: - raise RuntimeError("url rule %r already bound to map %r" % (self, self.map)) - self.map = map - if self.strict_slashes is None: - self.strict_slashes = map.strict_slashes - if self.merge_slashes is None: - self.merge_slashes = map.merge_slashes - if self.subdomain is None: - self.subdomain = map.default_subdomain - self.compile() - - def get_converter(self, variable_name, converter_name, args, kwargs): - """Looks up the converter for the given parameter. - - .. versionadded:: 0.9 - """ - if converter_name not in self.map.converters: - raise LookupError("the converter %r does not exist" % converter_name) - return self.map.converters[converter_name](self.map, *args, **kwargs) - - def _encode_query_vars(self, query_vars): - return url_encode( - query_vars, - charset=self.map.charset, - sort=self.map.sort_parameters, - key=self.map.sort_key, - ) - - def compile(self): - """Compiles the regular expression and stores it.""" - assert self.map is not None, "rule not bound" - - if self.map.host_matching: - domain_rule = self.host or "" - else: - domain_rule = self.subdomain or "" - - self._trace = [] - self._converters = {} - self._static_weights = [] - self._argument_weights = [] - regex_parts = [] - - def _build_regex(rule): - index = 0 - for converter, arguments, variable in parse_rule(rule): - if converter is None: - for match in re.finditer(r"/+|[^/]+", variable): - part = match.group(0) - if part.startswith("/"): - if self.merge_slashes: - regex_parts.append(r"/+?") - self._trace.append((False, "/")) - else: - regex_parts.append(part) - self._trace.append((False, part)) - continue - self._trace.append((False, part)) - regex_parts.append(re.escape(part)) - if part: - self._static_weights.append((index, -len(part))) - else: - if arguments: - c_args, c_kwargs = parse_converter_args(arguments) - else: - c_args = () - c_kwargs = {} - convobj = self.get_converter(variable, converter, c_args, c_kwargs) - regex_parts.append("(?P<%s>%s)" % (variable, convobj.regex)) - self._converters[variable] = convobj - self._trace.append((True, variable)) - self._argument_weights.append(convobj.weight) - self.arguments.add(str(variable)) - index = index + 1 - - _build_regex(domain_rule) - regex_parts.append("\\|") - self._trace.append((False, "|")) - _build_regex(self.rule if self.is_leaf else self.rule.rstrip("/")) - if not self.is_leaf: - self._trace.append((False, "/")) - - self._build = self._compile_builder(False).__get__(self, None) - self._build_unknown = self._compile_builder(True).__get__(self, None) - - if self.build_only: - return - - if not (self.is_leaf and self.strict_slashes): - reps = u"*" if self.merge_slashes else u"?" - tail = u"(?/%s)" % reps - else: - tail = u"" - - regex = u"^%s%s$" % (u"".join(regex_parts), tail) - self._regex = re.compile(regex, re.UNICODE) - - def match(self, path, method=None): - """Check if the rule matches a given path. Path is a string in the - form ``"subdomain|/path"`` and is assembled by the map. If - the map is doing host matching the subdomain part will be the host - instead. - - If the rule matches a dict with the converted values is returned, - otherwise the return value is `None`. - - :internal: - """ - if not self.build_only: - require_redirect = False - - m = self._regex.search(path) - if m is not None: - groups = m.groupdict() - # we have a folder like part of the url without a trailing - # slash and strict slashes enabled. raise an exception that - # tells the map to redirect to the same url but with a - # trailing slash - if ( - self.strict_slashes - and not self.is_leaf - and not groups.pop("__suffix__") - and ( - method is None or self.methods is None or method in self.methods - ) - ): - path += "/" - require_redirect = True - # if we are not in strict slashes mode we have to remove - # a __suffix__ - elif not self.strict_slashes: - del groups["__suffix__"] - - result = {} - for name, value in iteritems(groups): - try: - value = self._converters[name].to_python(value) - except ValidationError: - return - result[str(name)] = value - if self.defaults: - result.update(self.defaults) - - if self.merge_slashes: - new_path = "|".join(self.build(result, False)) - if path.endswith("/") and not new_path.endswith("/"): - new_path += "/" - if new_path.count("/") < path.count("/"): - path = new_path - require_redirect = True - - if require_redirect: - path = path.split("|", 1)[1] - raise RequestPath(path) - - if self.alias and self.map.redirect_defaults: - raise RequestAliasRedirect(result) - - return result - - @staticmethod - def _get_func_code(code, name): - globs, locs = {}, {} - exec(code, globs, locs) - return locs[name] - - def _compile_builder(self, append_unknown=True): - defaults = self.defaults or {} - dom_ops = [] - url_ops = [] - - opl = dom_ops - for is_dynamic, data in self._trace: - if data == "|" and opl is dom_ops: - opl = url_ops - continue - # this seems like a silly case to ever come up but: - # if a default is given for a value that appears in the rule, - # resolve it to a constant ahead of time - if is_dynamic and data in defaults: - data = self._converters[data].to_url(defaults[data]) - opl.append((False, data)) - elif not is_dynamic: - opl.append( - (False, url_quote(to_bytes(data, self.map.charset), safe="/:|+")) - ) - else: - opl.append((True, data)) - - def _convert(elem): - ret = _prefix_names(_CALL_CONVERTER_CODE_FMT.format(elem=elem)) - ret.args = [ast.Name(str(elem), ast.Load())] # str for py2 - return ret - - def _parts(ops): - parts = [ - _convert(elem) if is_dynamic else ast.Str(s=elem) - for is_dynamic, elem in ops - ] - parts = parts or [ast.Str("")] - # constant fold - ret = [parts[0]] - for p in parts[1:]: - if isinstance(p, ast.Str) and isinstance(ret[-1], ast.Str): - ret[-1] = ast.Str(ret[-1].s + p.s) - else: - ret.append(p) - return ret - - dom_parts = _parts(dom_ops) - url_parts = _parts(url_ops) - if not append_unknown: - body = [] - else: - body = [_IF_KWARGS_URL_ENCODE_AST] - url_parts.extend(_URL_ENCODE_AST_NAMES) - - def _join(parts): - if len(parts) == 1: # shortcut - return parts[0] - elif hasattr(ast, "JoinedStr"): # py36+ - return ast.JoinedStr(parts) - else: - call = _prefix_names('"".join()') - call.args = [ast.Tuple(parts, ast.Load())] - return call - - body.append( - ast.Return(ast.Tuple([_join(dom_parts), _join(url_parts)], ast.Load())) - ) - - # str is necessary for python2 - pargs = [ - str(elem) - for is_dynamic, elem in dom_ops + url_ops - if is_dynamic and elem not in defaults - ] - kargs = [str(k) for k in defaults] - - func_ast = _prefix_names("def _(): pass") - func_ast.name = "".format(self.rule) - if hasattr(ast, "arg"): # py3 - func_ast.args.args.append(ast.arg(".self", None)) - for arg in pargs + kargs: - func_ast.args.args.append(ast.arg(arg, None)) - func_ast.args.kwarg = ast.arg(".kwargs", None) - else: - func_ast.args.args.append(ast.Name(".self", ast.Param())) - for arg in pargs + kargs: - func_ast.args.args.append(ast.Name(arg, ast.Param())) - func_ast.args.kwarg = ".kwargs" - for _ in kargs: - func_ast.args.defaults.append(ast.Str("")) - func_ast.body = body - - # use `ast.parse` instead of `ast.Module` for better portability - # python3.8 changes the signature of `ast.Module` - module = ast.parse("") - module.body = [func_ast] - - # mark everything as on line 1, offset 0 - # less error-prone than `ast.fix_missing_locations` - # bad line numbers cause an assert to fail in debug builds - for node in ast.walk(module): - if "lineno" in node._attributes: - node.lineno = 1 - if "col_offset" in node._attributes: - node.col_offset = 0 - - code = compile(module, "", "exec") - return self._get_func_code(code, func_ast.name) - - def build(self, values, append_unknown=True): - """Assembles the relative url for that rule and the subdomain. - If building doesn't work for some reasons `None` is returned. - - :internal: - """ - try: - if append_unknown: - return self._build_unknown(**values) - else: - return self._build(**values) - except ValidationError: - return None - - def provides_defaults_for(self, rule): - """Check if this rule has defaults for a given rule. - - :internal: - """ - return ( - not self.build_only - and self.defaults - and self.endpoint == rule.endpoint - and self != rule - and self.arguments == rule.arguments - ) - - def suitable_for(self, values, method=None): - """Check if the dict of values has enough data for url generation. - - :internal: - """ - # if a method was given explicitly and that method is not supported - # by this rule, this rule is not suitable. - if ( - method is not None - and self.methods is not None - and method not in self.methods - ): - return False - - defaults = self.defaults or () - - # all arguments required must be either in the defaults dict or - # the value dictionary otherwise it's not suitable - for key in self.arguments: - if key not in defaults and key not in values: - return False - - # in case defaults are given we ensure that either the value was - # skipped or the value is the same as the default value. - if defaults: - for key, value in iteritems(defaults): - if key in values and value != values[key]: - return False - - return True - - def match_compare_key(self): - """The match compare key for sorting. - - Current implementation: - - 1. rules without any arguments come first for performance - reasons only as we expect them to match faster and some - common ones usually don't have any arguments (index pages etc.) - 2. rules with more static parts come first so the second argument - is the negative length of the number of the static weights. - 3. we order by static weights, which is a combination of index - and length - 4. The more complex rules come first so the next argument is the - negative length of the number of argument weights. - 5. lastly we order by the actual argument weights. - - :internal: - """ - return ( - bool(self.arguments), - -len(self._static_weights), - self._static_weights, - -len(self._argument_weights), - self._argument_weights, - ) - - def build_compare_key(self): - """The build compare key for sorting. - - :internal: - """ - return 1 if self.alias else 0, -len(self.arguments), -len(self.defaults or ()) - - def __eq__(self, other): - return self.__class__ is other.__class__ and self._trace == other._trace - - __hash__ = None - - def __ne__(self, other): - return not self.__eq__(other) - - def __str__(self): - return self.rule - - @native_string_result - def __repr__(self): - if self.map is None: - return u"<%s (unbound)>" % self.__class__.__name__ - tmp = [] - for is_dynamic, data in self._trace: - if is_dynamic: - tmp.append(u"<%s>" % data) - else: - tmp.append(data) - return u"<%s %s%s -> %s>" % ( - self.__class__.__name__, - repr((u"".join(tmp)).lstrip(u"|")).lstrip(u"u"), - self.methods is not None and u" (%s)" % u", ".join(self.methods) or u"", - self.endpoint, - ) - - -class BaseConverter(object): - """Base class for all converters.""" - - regex = "[^/]+" - weight = 100 - - def __init__(self, map): - self.map = map - - def to_python(self, value): - return value - - def to_url(self, value): - if isinstance(value, (bytes, bytearray)): - return _fast_url_quote(value) - return _fast_url_quote(text_type(value).encode(self.map.charset)) - - -class UnicodeConverter(BaseConverter): - """This converter is the default converter and accepts any string but - only one path segment. Thus the string can not include a slash. - - This is the default validator. - - Example:: - - Rule('/pages/'), - Rule('/') - - :param map: the :class:`Map`. - :param minlength: the minimum length of the string. Must be greater - or equal 1. - :param maxlength: the maximum length of the string. - :param length: the exact length of the string. - """ - - def __init__(self, map, minlength=1, maxlength=None, length=None): - BaseConverter.__init__(self, map) - if length is not None: - length = "{%d}" % int(length) - else: - if maxlength is None: - maxlength = "" - else: - maxlength = int(maxlength) - length = "{%s,%s}" % (int(minlength), maxlength) - self.regex = "[^/]" + length - - -class AnyConverter(BaseConverter): - """Matches one of the items provided. Items can either be Python - identifiers or strings:: - - Rule('/') - - :param map: the :class:`Map`. - :param items: this function accepts the possible items as positional - arguments. - """ - - def __init__(self, map, *items): - BaseConverter.__init__(self, map) - self.regex = "(?:%s)" % "|".join([re.escape(x) for x in items]) - - -class PathConverter(BaseConverter): - """Like the default :class:`UnicodeConverter`, but it also matches - slashes. This is useful for wikis and similar applications:: - - Rule('/') - Rule('//edit') - - :param map: the :class:`Map`. - """ - - regex = "[^/].*?" - weight = 200 - - -class NumberConverter(BaseConverter): - """Baseclass for `IntegerConverter` and `FloatConverter`. - - :internal: - """ - - weight = 50 - - def __init__(self, map, fixed_digits=0, min=None, max=None, signed=False): - if signed: - self.regex = self.signed_regex - BaseConverter.__init__(self, map) - self.fixed_digits = fixed_digits - self.min = min - self.max = max - self.signed = signed - - def to_python(self, value): - if self.fixed_digits and len(value) != self.fixed_digits: - raise ValidationError() - value = self.num_convert(value) - if (self.min is not None and value < self.min) or ( - self.max is not None and value > self.max - ): - raise ValidationError() - return value - - def to_url(self, value): - value = self.num_convert(value) - if self.fixed_digits: - value = ("%%0%sd" % self.fixed_digits) % value - return str(value) - - @property - def signed_regex(self): - return r"-?" + self.regex - - -class IntegerConverter(NumberConverter): - """This converter only accepts integer values:: - - Rule("/page/") - - By default it only accepts unsigned, positive values. The ``signed`` - parameter will enable signed, negative values. :: - - Rule("/page/") - - :param map: The :class:`Map`. - :param fixed_digits: The number of fixed digits in the URL. If you - set this to ``4`` for example, the rule will only match if the - URL looks like ``/0001/``. The default is variable length. - :param min: The minimal value. - :param max: The maximal value. - :param signed: Allow signed (negative) values. - - .. versionadded:: 0.15 - The ``signed`` parameter. - """ - - regex = r"\d+" - num_convert = int - - -class FloatConverter(NumberConverter): - """This converter only accepts floating point values:: - - Rule("/probability/") - - By default it only accepts unsigned, positive values. The ``signed`` - parameter will enable signed, negative values. :: - - Rule("/offset/") - - :param map: The :class:`Map`. - :param min: The minimal value. - :param max: The maximal value. - :param signed: Allow signed (negative) values. - - .. versionadded:: 0.15 - The ``signed`` parameter. - """ - - regex = r"\d+\.\d+" - num_convert = float - - def __init__(self, map, min=None, max=None, signed=False): - NumberConverter.__init__(self, map, min=min, max=max, signed=signed) - - -class UUIDConverter(BaseConverter): - """This converter only accepts UUID strings:: - - Rule('/object/') - - .. versionadded:: 0.10 - - :param map: the :class:`Map`. - """ - - regex = ( - r"[A-Fa-f0-9]{8}-[A-Fa-f0-9]{4}-" - r"[A-Fa-f0-9]{4}-[A-Fa-f0-9]{4}-[A-Fa-f0-9]{12}" - ) - - def to_python(self, value): - return uuid.UUID(value) - - def to_url(self, value): - return str(value) - - -#: the default converter mapping for the map. -DEFAULT_CONVERTERS = { - "default": UnicodeConverter, - "string": UnicodeConverter, - "any": AnyConverter, - "path": PathConverter, - "int": IntegerConverter, - "float": FloatConverter, - "uuid": UUIDConverter, -} - - -class Map(object): - """The map class stores all the URL rules and some configuration - parameters. Some of the configuration values are only stored on the - `Map` instance since those affect all rules, others are just defaults - and can be overridden for each rule. Note that you have to specify all - arguments besides the `rules` as keyword arguments! - - :param rules: sequence of url rules for this map. - :param default_subdomain: The default subdomain for rules without a - subdomain defined. - :param charset: charset of the url. defaults to ``"utf-8"`` - :param strict_slashes: If a rule ends with a slash but the matched - URL does not, redirect to the URL with a trailing slash. - :param merge_slashes: Merge consecutive slashes when matching or - building URLs. Matches will redirect to the normalized URL. - Slashes in variable parts are not merged. - :param redirect_defaults: This will redirect to the default rule if it - wasn't visited that way. This helps creating - unique URLs. - :param converters: A dict of converters that adds additional converters - to the list of converters. If you redefine one - converter this will override the original one. - :param sort_parameters: If set to `True` the url parameters are sorted. - See `url_encode` for more details. - :param sort_key: The sort key function for `url_encode`. - :param encoding_errors: the error method to use for decoding - :param host_matching: if set to `True` it enables the host matching - feature and disables the subdomain one. If - enabled the `host` parameter to rules is used - instead of the `subdomain` one. - - .. versionchanged:: 1.0 - If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules - will match. - - .. versionchanged:: 1.0 - Added ``merge_slashes``. - - .. versionchanged:: 0.7 - Added ``encoding_errors`` and ``host_matching``. - - .. versionchanged:: 0.5 - Added ``sort_parameters`` and ``sort_key``. - """ - - #: A dict of default converters to be used. - default_converters = ImmutableDict(DEFAULT_CONVERTERS) - - #: The type of lock to use when updating. - #: - #: .. versionadded:: 1.0 - lock_class = Lock - - def __init__( - self, - rules=None, - default_subdomain="", - charset="utf-8", - strict_slashes=True, - merge_slashes=True, - redirect_defaults=True, - converters=None, - sort_parameters=False, - sort_key=None, - encoding_errors="replace", - host_matching=False, - ): - self._rules = [] - self._rules_by_endpoint = {} - self._remap = True - self._remap_lock = self.lock_class() - - self.default_subdomain = default_subdomain - self.charset = charset - self.encoding_errors = encoding_errors - self.strict_slashes = strict_slashes - self.merge_slashes = merge_slashes - self.redirect_defaults = redirect_defaults - self.host_matching = host_matching - - self.converters = self.default_converters.copy() - if converters: - self.converters.update(converters) - - self.sort_parameters = sort_parameters - self.sort_key = sort_key - - for rulefactory in rules or (): - self.add(rulefactory) - - def is_endpoint_expecting(self, endpoint, *arguments): - """Iterate over all rules and check if the endpoint expects - the arguments provided. This is for example useful if you have - some URLs that expect a language code and others that do not and - you want to wrap the builder a bit so that the current language - code is automatically added if not provided but endpoints expect - it. - - :param endpoint: the endpoint to check. - :param arguments: this function accepts one or more arguments - as positional arguments. Each one of them is - checked. - """ - self.update() - arguments = set(arguments) - for rule in self._rules_by_endpoint[endpoint]: - if arguments.issubset(rule.arguments): - return True - return False - - def iter_rules(self, endpoint=None): - """Iterate over all rules or the rules of an endpoint. - - :param endpoint: if provided only the rules for that endpoint - are returned. - :return: an iterator - """ - self.update() - if endpoint is not None: - return iter(self._rules_by_endpoint[endpoint]) - return iter(self._rules) - - def add(self, rulefactory): - """Add a new rule or factory to the map and bind it. Requires that the - rule is not bound to another map. - - :param rulefactory: a :class:`Rule` or :class:`RuleFactory` - """ - for rule in rulefactory.get_rules(self): - rule.bind(self) - self._rules.append(rule) - self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule) - self._remap = True - - def bind( - self, - server_name, - script_name=None, - subdomain=None, - url_scheme="http", - default_method="GET", - path_info=None, - query_args=None, - ): - """Return a new :class:`MapAdapter` with the details specified to the - call. Note that `script_name` will default to ``'/'`` if not further - specified or `None`. The `server_name` at least is a requirement - because the HTTP RFC requires absolute URLs for redirects and so all - redirect exceptions raised by Werkzeug will contain the full canonical - URL. - - If no path_info is passed to :meth:`match` it will use the default path - info passed to bind. While this doesn't really make sense for - manual bind calls, it's useful if you bind a map to a WSGI - environment which already contains the path info. - - `subdomain` will default to the `default_subdomain` for this map if - no defined. If there is no `default_subdomain` you cannot use the - subdomain feature. - - .. versionchanged:: 1.0 - If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules - will match. - - .. versionchanged:: 0.15 - ``path_info`` defaults to ``'/'`` if ``None``. - - .. versionchanged:: 0.8 - ``query_args`` can be a string. - - .. versionchanged:: 0.7 - Added ``query_args``. - """ - server_name = server_name.lower() - if self.host_matching: - if subdomain is not None: - raise RuntimeError("host matching enabled and a subdomain was provided") - elif subdomain is None: - subdomain = self.default_subdomain - if script_name is None: - script_name = "/" - if path_info is None: - path_info = "/" - try: - server_name = _encode_idna(server_name) - except UnicodeError: - raise BadHost() - return MapAdapter( - self, - server_name, - script_name, - subdomain, - url_scheme, - path_info, - default_method, - query_args, - ) - - def bind_to_environ(self, environ, server_name=None, subdomain=None): - """Like :meth:`bind` but you can pass it an WSGI environment and it - will fetch the information from that dictionary. Note that because of - limitations in the protocol there is no way to get the current - subdomain and real `server_name` from the environment. If you don't - provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or - `HTTP_HOST` if provided) as used `server_name` with disabled subdomain - feature. - - If `subdomain` is `None` but an environment and a server name is - provided it will calculate the current subdomain automatically. - Example: `server_name` is ``'example.com'`` and the `SERVER_NAME` - in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated - subdomain will be ``'staging.dev'``. - - If the object passed as environ has an environ attribute, the value of - this attribute is used instead. This allows you to pass request - objects. Additionally `PATH_INFO` added as a default of the - :class:`MapAdapter` so that you don't have to pass the path info to - the match method. - - .. versionchanged:: 1.0.0 - If the passed server name specifies port 443, it will match - if the incoming scheme is ``https`` without a port. - - .. versionchanged:: 1.0.0 - A warning is shown when the passed server name does not - match the incoming WSGI server name. - - .. versionchanged:: 0.8 - This will no longer raise a ValueError when an unexpected server - name was passed. - - .. versionchanged:: 0.5 - previously this method accepted a bogus `calculate_subdomain` - parameter that did not have any effect. It was removed because - of that. - - :param environ: a WSGI environment. - :param server_name: an optional server name hint (see above). - :param subdomain: optionally the current subdomain (see above). - """ - environ = _get_environ(environ) - wsgi_server_name = get_host(environ).lower() - scheme = environ["wsgi.url_scheme"] - - if server_name is None: - server_name = wsgi_server_name - else: - server_name = server_name.lower() - - # strip standard port to match get_host() - if scheme == "http" and server_name.endswith(":80"): - server_name = server_name[:-3] - elif scheme == "https" and server_name.endswith(":443"): - server_name = server_name[:-4] - - if subdomain is None and not self.host_matching: - cur_server_name = wsgi_server_name.split(".") - real_server_name = server_name.split(".") - offset = -len(real_server_name) - - if cur_server_name[offset:] != real_server_name: - # This can happen even with valid configs if the server was - # accessed directly by IP address under some situations. - # Instead of raising an exception like in Werkzeug 0.7 or - # earlier we go by an invalid subdomain which will result - # in a 404 error on matching. - warnings.warn( - "Current server name '{}' doesn't match configured" - " server name '{}'".format(wsgi_server_name, server_name), - stacklevel=2, - ) - subdomain = "" - else: - subdomain = ".".join(filter(None, cur_server_name[:offset])) - - def _get_wsgi_string(name): - val = environ.get(name) - if val is not None: - return wsgi_decoding_dance(val, self.charset) - - script_name = _get_wsgi_string("SCRIPT_NAME") - path_info = _get_wsgi_string("PATH_INFO") - query_args = _get_wsgi_string("QUERY_STRING") - return Map.bind( - self, - server_name, - script_name, - subdomain, - scheme, - environ["REQUEST_METHOD"], - path_info, - query_args=query_args, - ) - - def update(self): - """Called before matching and building to keep the compiled rules - in the correct order after things changed. - """ - if not self._remap: - return - - with self._remap_lock: - if not self._remap: - return - - self._rules.sort(key=lambda x: x.match_compare_key()) - for rules in itervalues(self._rules_by_endpoint): - rules.sort(key=lambda x: x.build_compare_key()) - self._remap = False - - def __repr__(self): - rules = self.iter_rules() - return "%s(%s)" % (self.__class__.__name__, pformat(list(rules))) - - -class MapAdapter(object): - - """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does - the URL matching and building based on runtime information. - """ - - def __init__( - self, - map, - server_name, - script_name, - subdomain, - url_scheme, - path_info, - default_method, - query_args=None, - ): - self.map = map - self.server_name = to_unicode(server_name) - script_name = to_unicode(script_name) - if not script_name.endswith(u"/"): - script_name += u"/" - self.script_name = script_name - self.subdomain = to_unicode(subdomain) - self.url_scheme = to_unicode(url_scheme) - self.path_info = to_unicode(path_info) - self.default_method = to_unicode(default_method) - self.query_args = query_args - self.websocket = self.url_scheme in {"ws", "wss"} - - def dispatch( - self, view_func, path_info=None, method=None, catch_http_exceptions=False - ): - """Does the complete dispatching process. `view_func` is called with - the endpoint and a dict with the values for the view. It should - look up the view function, call it, and return a response object - or WSGI application. http exceptions are not caught by default - so that applications can display nicer error messages by just - catching them by hand. If you want to stick with the default - error messages you can pass it ``catch_http_exceptions=True`` and - it will catch the http exceptions. - - Here a small example for the dispatch usage:: - - from werkzeug.wrappers import Request, Response - from werkzeug.wsgi import responder - from werkzeug.routing import Map, Rule - - def on_index(request): - return Response('Hello from the index') - - url_map = Map([Rule('/', endpoint='index')]) - views = {'index': on_index} - - @responder - def application(environ, start_response): - request = Request(environ) - urls = url_map.bind_to_environ(environ) - return urls.dispatch(lambda e, v: views[e](request, **v), - catch_http_exceptions=True) - - Keep in mind that this method might return exception objects, too, so - use :class:`Response.force_type` to get a response object. - - :param view_func: a function that is called with the endpoint as - first argument and the value dict as second. Has - to dispatch to the actual view function with this - information. (see above) - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - :param catch_http_exceptions: set to `True` to catch any of the - werkzeug :class:`HTTPException`\\s. - """ - try: - try: - endpoint, args = self.match(path_info, method) - except RequestRedirect as e: - return e - return view_func(endpoint, args) - except HTTPException as e: - if catch_http_exceptions: - return e - raise - - def match( - self, - path_info=None, - method=None, - return_rule=False, - query_args=None, - websocket=None, - ): - """The usage is simple: you just pass the match method the current - path info as well as the method (which defaults to `GET`). The - following things can then happen: - - - you receive a `NotFound` exception that indicates that no URL is - matching. A `NotFound` exception is also a WSGI application you - can call to get a default page not found page (happens to be the - same object as `werkzeug.exceptions.NotFound`) - - - you receive a `MethodNotAllowed` exception that indicates that there - is a match for this URL but not for the current request method. - This is useful for RESTful applications. - - - you receive a `RequestRedirect` exception with a `new_url` - attribute. This exception is used to notify you about a request - Werkzeug requests from your WSGI application. This is for example the - case if you request ``/foo`` although the correct URL is ``/foo/`` - You can use the `RequestRedirect` instance as response-like object - similar to all other subclasses of `HTTPException`. - - - you receive a ``WebsocketMismatch`` exception if the only - match is a WebSocket rule but the bind is an HTTP request, or - if the match is an HTTP rule but the bind is a WebSocket - request. - - - you get a tuple in the form ``(endpoint, arguments)`` if there is - a match (unless `return_rule` is True, in which case you get a tuple - in the form ``(rule, arguments)``) - - If the path info is not passed to the match method the default path - info of the map is used (defaults to the root URL if not defined - explicitly). - - All of the exceptions raised are subclasses of `HTTPException` so they - can be used as WSGI responses. They will all render generic error or - redirect pages. - - Here is a small example for matching: - - >>> m = Map([ - ... Rule('/', endpoint='index'), - ... Rule('/downloads/', endpoint='downloads/index'), - ... Rule('/downloads/', endpoint='downloads/show') - ... ]) - >>> urls = m.bind("example.com", "/") - >>> urls.match("/", "GET") - ('index', {}) - >>> urls.match("/downloads/42") - ('downloads/show', {'id': 42}) - - And here is what happens on redirect and missing URLs: - - >>> urls.match("/downloads") - Traceback (most recent call last): - ... - RequestRedirect: http://example.com/downloads/ - >>> urls.match("/missing") - Traceback (most recent call last): - ... - NotFound: 404 Not Found - - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - :param return_rule: return the rule that matched instead of just the - endpoint (defaults to `False`). - :param query_args: optional query arguments that are used for - automatic redirects as string or dictionary. It's - currently not possible to use the query arguments - for URL matching. - :param websocket: Match WebSocket instead of HTTP requests. A - websocket request has a ``ws`` or ``wss`` - :attr:`url_scheme`. This overrides that detection. - - .. versionadded:: 1.0 - Added ``websocket``. - - .. versionchanged:: 0.8 - ``query_args`` can be a string. - - .. versionadded:: 0.7 - Added ``query_args``. - - .. versionadded:: 0.6 - Added ``return_rule``. - """ - self.map.update() - if path_info is None: - path_info = self.path_info - else: - path_info = to_unicode(path_info, self.map.charset) - if query_args is None: - query_args = self.query_args - method = (method or self.default_method).upper() - - if websocket is None: - websocket = self.websocket - - require_redirect = False - - path = u"%s|%s" % ( - self.map.host_matching and self.server_name or self.subdomain, - path_info and "/%s" % path_info.lstrip("/"), - ) - - have_match_for = set() - websocket_mismatch = False - - for rule in self.map._rules: - try: - rv = rule.match(path, method) - except RequestPath as e: - raise RequestRedirect( - self.make_redirect_url( - url_quote(e.path_info, self.map.charset, safe="/:|+"), - query_args, - ) - ) - except RequestAliasRedirect as e: - raise RequestRedirect( - self.make_alias_redirect_url( - path, rule.endpoint, e.matched_values, method, query_args - ) - ) - if rv is None: - continue - if rule.methods is not None and method not in rule.methods: - have_match_for.update(rule.methods) - continue - - if rule.websocket != websocket: - websocket_mismatch = True - continue - - if self.map.redirect_defaults: - redirect_url = self.get_default_redirect(rule, method, rv, query_args) - if redirect_url is not None: - raise RequestRedirect(redirect_url) - - if rule.redirect_to is not None: - if isinstance(rule.redirect_to, string_types): - - def _handle_match(match): - value = rv[match.group(1)] - return rule._converters[match.group(1)].to_url(value) - - redirect_url = _simple_rule_re.sub(_handle_match, rule.redirect_to) - else: - redirect_url = rule.redirect_to(self, **rv) - raise RequestRedirect( - str( - url_join( - "%s://%s%s%s" - % ( - self.url_scheme or "http", - self.subdomain + "." if self.subdomain else "", - self.server_name, - self.script_name, - ), - redirect_url, - ) - ) - ) - - if require_redirect: - raise RequestRedirect( - self.make_redirect_url( - url_quote(path_info, self.map.charset, safe="/:|+"), query_args - ) - ) - - if return_rule: - return rule, rv - else: - return rule.endpoint, rv - - if have_match_for: - raise MethodNotAllowed(valid_methods=list(have_match_for)) - - if websocket_mismatch: - raise WebsocketMismatch() - - raise NotFound() - - def test(self, path_info=None, method=None): - """Test if a rule would match. Works like `match` but returns `True` - if the URL matches, or `False` if it does not exist. - - :param path_info: the path info to use for matching. Overrides the - path info specified on binding. - :param method: the HTTP method used for matching. Overrides the - method specified on binding. - """ - try: - self.match(path_info, method) - except RequestRedirect: - pass - except HTTPException: - return False - return True - - def allowed_methods(self, path_info=None): - """Returns the valid methods that match for a given path. - - .. versionadded:: 0.7 - """ - try: - self.match(path_info, method="--") - except MethodNotAllowed as e: - return e.valid_methods - except HTTPException: - pass - return [] - - def get_host(self, domain_part): - """Figures out the full host name for the given domain part. The - domain part is a subdomain in case host matching is disabled or - a full host name. - """ - if self.map.host_matching: - if domain_part is None: - return self.server_name - return to_unicode(domain_part, "ascii") - subdomain = domain_part - if subdomain is None: - subdomain = self.subdomain - else: - subdomain = to_unicode(subdomain, "ascii") - return (subdomain + u"." if subdomain else u"") + self.server_name - - def get_default_redirect(self, rule, method, values, query_args): - """A helper that returns the URL to redirect to if it finds one. - This is used for default redirecting only. - - :internal: - """ - assert self.map.redirect_defaults - for r in self.map._rules_by_endpoint[rule.endpoint]: - # every rule that comes after this one, including ourself - # has a lower priority for the defaults. We order the ones - # with the highest priority up for building. - if r is rule: - break - if r.provides_defaults_for(rule) and r.suitable_for(values, method): - values.update(r.defaults) - domain_part, path = r.build(values) - return self.make_redirect_url(path, query_args, domain_part=domain_part) - - def encode_query_args(self, query_args): - if not isinstance(query_args, string_types): - query_args = url_encode(query_args, self.map.charset) - return query_args - - def make_redirect_url(self, path_info, query_args=None, domain_part=None): - """Creates a redirect URL. - - :internal: - """ - suffix = "" - if query_args: - suffix = "?" + self.encode_query_args(query_args) - return str( - "%s://%s/%s%s" - % ( - self.url_scheme or "http", - self.get_host(domain_part), - posixpath.join( - self.script_name[:-1].lstrip("/"), path_info.lstrip("/") - ), - suffix, - ) - ) - - def make_alias_redirect_url(self, path, endpoint, values, method, query_args): - """Internally called to make an alias redirect URL.""" - url = self.build( - endpoint, values, method, append_unknown=False, force_external=True - ) - if query_args: - url += "?" + self.encode_query_args(query_args) - assert url != path, "detected invalid alias setting. No canonical URL found" - return url - - def _partial_build(self, endpoint, values, method, append_unknown): - """Helper for :meth:`build`. Returns subdomain and path for the - rule that accepts this endpoint, values and method. - - :internal: - """ - # in case the method is none, try with the default method first - if method is None: - rv = self._partial_build( - endpoint, values, self.default_method, append_unknown - ) - if rv is not None: - return rv - - # Default method did not match or a specific method is passed. - # Check all for first match with matching host. If no matching - # host is found, go with first result. - first_match = None - - for rule in self.map._rules_by_endpoint.get(endpoint, ()): - if rule.suitable_for(values, method): - rv = rule.build(values, append_unknown) - - if rv is not None: - rv = (rv[0], rv[1], rule.websocket) - if self.map.host_matching: - if rv[0] == self.server_name: - return rv - elif first_match is None: - first_match = rv - else: - return rv - - return first_match - - def build( - self, - endpoint, - values=None, - method=None, - force_external=False, - append_unknown=True, - ): - """Building URLs works pretty much the other way round. Instead of - `match` you call `build` and pass it the endpoint and a dict of - arguments for the placeholders. - - The `build` function also accepts an argument called `force_external` - which, if you set it to `True` will force external URLs. Per default - external URLs (include the server name) will only be used if the - target URL is on a different subdomain. - - >>> m = Map([ - ... Rule('/', endpoint='index'), - ... Rule('/downloads/', endpoint='downloads/index'), - ... Rule('/downloads/', endpoint='downloads/show') - ... ]) - >>> urls = m.bind("example.com", "/") - >>> urls.build("index", {}) - '/' - >>> urls.build("downloads/show", {'id': 42}) - '/downloads/42' - >>> urls.build("downloads/show", {'id': 42}, force_external=True) - 'http://example.com/downloads/42' - - Because URLs cannot contain non ASCII data you will always get - bytestrings back. Non ASCII characters are urlencoded with the - charset defined on the map instance. - - Additional values are converted to unicode and appended to the URL as - URL querystring parameters: - - >>> urls.build("index", {'q': 'My Searchstring'}) - '/?q=My+Searchstring' - - When processing those additional values, lists are furthermore - interpreted as multiple values (as per - :py:class:`werkzeug.datastructures.MultiDict`): - - >>> urls.build("index", {'q': ['a', 'b', 'c']}) - '/?q=a&q=b&q=c' - - Passing a ``MultiDict`` will also add multiple values: - - >>> urls.build("index", MultiDict((('p', 'z'), ('q', 'a'), ('q', 'b')))) - '/?p=z&q=a&q=b' - - If a rule does not exist when building a `BuildError` exception is - raised. - - The build method accepts an argument called `method` which allows you - to specify the method you want to have an URL built for if you have - different methods for the same endpoint specified. - - .. versionadded:: 0.6 - the `append_unknown` parameter was added. - - :param endpoint: the endpoint of the URL to build. - :param values: the values for the URL to build. Unhandled values are - appended to the URL as query parameters. - :param method: the HTTP method for the rule if there are different - URLs for different methods on the same endpoint. - :param force_external: enforce full canonical external URLs. If the URL - scheme is not provided, this will generate - a protocol-relative URL. - :param append_unknown: unknown parameters are appended to the generated - URL as query string argument. Disable this - if you want the builder to ignore those. - """ - self.map.update() - - if values: - if isinstance(values, MultiDict): - temp_values = {} - # iteritems(dict, values) is like `values.lists()` - # without the call or `list()` coercion overhead. - for key, value in iteritems(dict, values): - if not value: - continue - if len(value) == 1: # flatten single item lists - value = value[0] - if value is None: # drop None - continue - temp_values[key] = value - values = temp_values - else: - # drop None - values = dict(i for i in iteritems(values) if i[1] is not None) - else: - values = {} - - rv = self._partial_build(endpoint, values, method, append_unknown) - if rv is None: - raise BuildError(endpoint, values, method, self) - - domain_part, path, websocket = rv - host = self.get_host(domain_part) - - # Always build WebSocket routes with the scheme (browsers - # require full URLs). If bound to a WebSocket, ensure that HTTP - # routes are built with an HTTP scheme. - url_scheme = self.url_scheme - secure = url_scheme in {"https", "wss"} - - if websocket: - force_external = True - url_scheme = "wss" if secure else "ws" - elif url_scheme: - url_scheme = "https" if secure else "http" - - # shortcut this. - if not force_external and ( - (self.map.host_matching and host == self.server_name) - or (not self.map.host_matching and domain_part == self.subdomain) - ): - return "%s/%s" % (self.script_name.rstrip("/"), path.lstrip("/")) - return str( - "%s//%s%s/%s" - % ( - url_scheme + ":" if url_scheme else "", - host, - self.script_name[:-1], - path.lstrip("/"), - ) - ) diff --git a/venv/lib/python3.7/site-packages/werkzeug/security.py b/venv/lib/python3.7/site-packages/werkzeug/security.py deleted file mode 100644 index 2308040..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/security.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.security - ~~~~~~~~~~~~~~~~~ - - Security related helpers such as secure password hashing tools. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import codecs -import hashlib -import hmac -import os -import posixpath -from random import SystemRandom -from struct import Struct - -from ._compat import izip -from ._compat import PY2 -from ._compat import range_type -from ._compat import text_type -from ._compat import to_bytes -from ._compat import to_native - -SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" -DEFAULT_PBKDF2_ITERATIONS = 150000 - -_pack_int = Struct(">I").pack -_builtin_safe_str_cmp = getattr(hmac, "compare_digest", None) -_sys_rng = SystemRandom() -_os_alt_seps = list( - sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, "/") -) - - -def pbkdf2_hex( - data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None -): - """Like :func:`pbkdf2_bin`, but returns a hex-encoded string. - - .. versionadded:: 0.9 - - :param data: the data to derive. - :param salt: the salt for the derivation. - :param iterations: the number of iterations. - :param keylen: the length of the resulting key. If not provided, - the digest size will be used. - :param hashfunc: the hash function to use. This can either be the - string name of a known hash function, or a function - from the hashlib module. Defaults to sha256. - """ - rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc) - return to_native(codecs.encode(rv, "hex_codec")) - - -def pbkdf2_bin( - data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS, keylen=None, hashfunc=None -): - """Returns a binary digest for the PBKDF2 hash algorithm of `data` - with the given `salt`. It iterates `iterations` times and produces a - key of `keylen` bytes. By default, SHA-256 is used as hash function; - a different hashlib `hashfunc` can be provided. - - .. versionadded:: 0.9 - - :param data: the data to derive. - :param salt: the salt for the derivation. - :param iterations: the number of iterations. - :param keylen: the length of the resulting key. If not provided - the digest size will be used. - :param hashfunc: the hash function to use. This can either be the - string name of a known hash function or a function - from the hashlib module. Defaults to sha256. - """ - if not hashfunc: - hashfunc = "sha256" - - data = to_bytes(data) - salt = to_bytes(salt) - - if callable(hashfunc): - _test_hash = hashfunc() - hash_name = getattr(_test_hash, "name", None) - else: - hash_name = hashfunc - return hashlib.pbkdf2_hmac(hash_name, data, salt, iterations, keylen) - - -def safe_str_cmp(a, b): - """This function compares strings in somewhat constant time. This - requires that the length of at least one string is known in advance. - - Returns `True` if the two strings are equal, or `False` if they are not. - - .. versionadded:: 0.7 - """ - if isinstance(a, text_type): - a = a.encode("utf-8") - if isinstance(b, text_type): - b = b.encode("utf-8") - - if _builtin_safe_str_cmp is not None: - return _builtin_safe_str_cmp(a, b) - - if len(a) != len(b): - return False - - rv = 0 - if PY2: - for x, y in izip(a, b): - rv |= ord(x) ^ ord(y) - else: - for x, y in izip(a, b): - rv |= x ^ y - - return rv == 0 - - -def gen_salt(length): - """Generate a random string of SALT_CHARS with specified ``length``.""" - if length <= 0: - raise ValueError("Salt length must be positive") - return "".join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length)) - - -def _hash_internal(method, salt, password): - """Internal password hash helper. Supports plaintext without salt, - unsalted and salted passwords. In case salted passwords are used - hmac is used. - """ - if method == "plain": - return password, method - - if isinstance(password, text_type): - password = password.encode("utf-8") - - if method.startswith("pbkdf2:"): - args = method[7:].split(":") - if len(args) not in (1, 2): - raise ValueError("Invalid number of arguments for PBKDF2") - method = args.pop(0) - iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS - is_pbkdf2 = True - actual_method = "pbkdf2:%s:%d" % (method, iterations) - else: - is_pbkdf2 = False - actual_method = method - - if is_pbkdf2: - if not salt: - raise ValueError("Salt is required for PBKDF2") - rv = pbkdf2_hex(password, salt, iterations, hashfunc=method) - elif salt: - if isinstance(salt, text_type): - salt = salt.encode("utf-8") - mac = _create_mac(salt, password, method) - rv = mac.hexdigest() - else: - rv = hashlib.new(method, password).hexdigest() - return rv, actual_method - - -def _create_mac(key, msg, method): - if callable(method): - return hmac.HMAC(key, msg, method) - - def hashfunc(d=b""): - return hashlib.new(method, d) - - # Python 2.7 used ``hasattr(digestmod, '__call__')`` - # to detect if hashfunc is callable - hashfunc.__call__ = hashfunc - return hmac.HMAC(key, msg, hashfunc) - - -def generate_password_hash(password, method="pbkdf2:sha256", salt_length=8): - """Hash a password with the given method and salt with a string of - the given length. The format of the string returned includes the method - that was used so that :func:`check_password_hash` can check the hash. - - The format for the hashed string looks like this:: - - method$salt$hash - - This method can **not** generate unsalted passwords but it is possible - to set param method='plain' in order to enforce plaintext passwords. - If a salt is used, hmac is used internally to salt the password. - - If PBKDF2 is wanted it can be enabled by setting the method to - ``pbkdf2:method:iterations`` where iterations is optional:: - - pbkdf2:sha256:80000$salt$hash - pbkdf2:sha256$salt$hash - - :param password: the password to hash. - :param method: the hash method to use (one that hashlib supports). Can - optionally be in the format ``pbkdf2:[:iterations]`` - to enable PBKDF2. - :param salt_length: the length of the salt in letters. - """ - salt = gen_salt(salt_length) if method != "plain" else "" - h, actual_method = _hash_internal(method, salt, password) - return "%s$%s$%s" % (actual_method, salt, h) - - -def check_password_hash(pwhash, password): - """check a password against a given salted and hashed password value. - In order to support unsalted legacy passwords this method supports - plain text passwords, md5 and sha1 hashes (both salted and unsalted). - - Returns `True` if the password matched, `False` otherwise. - - :param pwhash: a hashed string like returned by - :func:`generate_password_hash`. - :param password: the plaintext password to compare against the hash. - """ - if pwhash.count("$") < 2: - return False - method, salt, hashval = pwhash.split("$", 2) - return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval) - - -def safe_join(directory, *pathnames): - """Safely join zero or more untrusted path components to a base - directory to avoid escaping the base directory. - - :param directory: The trusted base directory. - :param pathnames: The untrusted path components relative to the - base directory. - :return: A safe path, otherwise ``None``. - """ - parts = [directory] - - for filename in pathnames: - if filename != "": - filename = posixpath.normpath(filename) - - if ( - any(sep in filename for sep in _os_alt_seps) - or os.path.isabs(filename) - or filename == ".." - or filename.startswith("../") - ): - return None - - parts.append(filename) - - return posixpath.join(*parts) diff --git a/venv/lib/python3.7/site-packages/werkzeug/serving.py b/venv/lib/python3.7/site-packages/werkzeug/serving.py deleted file mode 100644 index f2a0dc9..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/serving.py +++ /dev/null @@ -1,1117 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.serving - ~~~~~~~~~~~~~~~~ - - There are many ways to serve a WSGI application. While you're developing - it you usually don't want a full blown webserver like Apache but a simple - standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in - the standard library. If you're using older versions of Python you can - download the package from the cheeseshop. - - However there are some caveats. Sourcecode won't reload itself when - changed and each time you kill the server using ``^C`` you get an - `KeyboardInterrupt` error. While the latter is easy to solve the first - one can be a pain in the ass in some situations. - - The easiest way is creating a small ``start-myproject.py`` that runs the - application:: - - #!/usr/bin/env python - # -*- coding: utf-8 -*- - from myproject import make_app - from werkzeug.serving import run_simple - - app = make_app(...) - run_simple('localhost', 8080, app, use_reloader=True) - - You can also pass it a `extra_files` keyword argument with a list of - additional files (like configuration files) you want to observe. - - For bigger applications you should consider using `click` - (http://click.pocoo.org) instead of a simple start file. - - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import io -import os -import signal -import socket -import sys -from datetime import datetime as dt -from datetime import timedelta - -from ._compat import PY2 -from ._compat import reraise -from ._compat import WIN -from ._compat import wsgi_encoding_dance -from ._internal import _log -from .exceptions import InternalServerError -from .urls import uri_to_iri -from .urls import url_parse -from .urls import url_unquote - -try: - import socketserver - from http.server import BaseHTTPRequestHandler - from http.server import HTTPServer -except ImportError: - import SocketServer as socketserver - from BaseHTTPServer import HTTPServer - from BaseHTTPServer import BaseHTTPRequestHandler - -try: - import ssl -except ImportError: - - class _SslDummy(object): - def __getattr__(self, name): - raise RuntimeError("SSL support unavailable") - - ssl = _SslDummy() - -try: - import click -except ImportError: - click = None - - -ThreadingMixIn = socketserver.ThreadingMixIn -can_fork = hasattr(os, "fork") - -if can_fork: - ForkingMixIn = socketserver.ForkingMixIn -else: - - class ForkingMixIn(object): - pass - - -try: - af_unix = socket.AF_UNIX -except AttributeError: - af_unix = None - - -LISTEN_QUEUE = 128 -can_open_by_fd = not WIN and hasattr(socket, "fromfd") - -# On Python 3, ConnectionError represents the same errnos as -# socket.error from Python 2, while socket.error is an alias for the -# more generic OSError. -if PY2: - _ConnectionError = socket.error -else: - _ConnectionError = ConnectionError - - -class DechunkedInput(io.RawIOBase): - """An input stream that handles Transfer-Encoding 'chunked'""" - - def __init__(self, rfile): - self._rfile = rfile - self._done = False - self._len = 0 - - def readable(self): - return True - - def read_chunk_len(self): - try: - line = self._rfile.readline().decode("latin1") - _len = int(line.strip(), 16) - except ValueError: - raise IOError("Invalid chunk header") - if _len < 0: - raise IOError("Negative chunk length not allowed") - return _len - - def readinto(self, buf): - read = 0 - while not self._done and read < len(buf): - if self._len == 0: - # This is the first chunk or we fully consumed the previous - # one. Read the next length of the next chunk - self._len = self.read_chunk_len() - - if self._len == 0: - # Found the final chunk of size 0. The stream is now exhausted, - # but there is still a final newline that should be consumed - self._done = True - - if self._len > 0: - # There is data (left) in this chunk, so append it to the - # buffer. If this operation fully consumes the chunk, this will - # reset self._len to 0. - n = min(len(buf), self._len) - buf[read : read + n] = self._rfile.read(n) - self._len -= n - read += n - - if self._len == 0: - # Skip the terminating newline of a chunk that has been fully - # consumed. This also applies to the 0-sized final chunk - terminator = self._rfile.readline() - if terminator not in (b"\n", b"\r\n", b"\r"): - raise IOError("Missing chunk terminating newline") - - return read - - -class WSGIRequestHandler(BaseHTTPRequestHandler, object): - - """A request handler that implements WSGI dispatching.""" - - @property - def server_version(self): - from . import __version__ - - return "Werkzeug/" + __version__ - - def make_environ(self): - request_url = url_parse(self.path) - - def shutdown_server(): - self.server.shutdown_signal = True - - url_scheme = "http" if self.server.ssl_context is None else "https" - if not self.client_address: - self.client_address = "" - if isinstance(self.client_address, str): - self.client_address = (self.client_address, 0) - else: - pass - - # If there was no scheme but the path started with two slashes, - # the first segment may have been incorrectly parsed as the - # netloc, prepend it to the path again. - if not request_url.scheme and request_url.netloc: - path_info = "/%s%s" % (request_url.netloc, request_url.path) - else: - path_info = request_url.path - - path_info = url_unquote(path_info) - - environ = { - "wsgi.version": (1, 0), - "wsgi.url_scheme": url_scheme, - "wsgi.input": self.rfile, - "wsgi.errors": sys.stderr, - "wsgi.multithread": self.server.multithread, - "wsgi.multiprocess": self.server.multiprocess, - "wsgi.run_once": False, - "werkzeug.server.shutdown": shutdown_server, - "SERVER_SOFTWARE": self.server_version, - "REQUEST_METHOD": self.command, - "SCRIPT_NAME": "", - "PATH_INFO": wsgi_encoding_dance(path_info), - "QUERY_STRING": wsgi_encoding_dance(request_url.query), - # Non-standard, added by mod_wsgi, uWSGI - "REQUEST_URI": wsgi_encoding_dance(self.path), - # Non-standard, added by gunicorn - "RAW_URI": wsgi_encoding_dance(self.path), - "REMOTE_ADDR": self.address_string(), - "REMOTE_PORT": self.port_integer(), - "SERVER_NAME": self.server.server_address[0], - "SERVER_PORT": str(self.server.server_address[1]), - "SERVER_PROTOCOL": self.request_version, - } - - for key, value in self.get_header_items(): - key = key.upper().replace("-", "_") - value = value.replace("\r\n", "") - if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"): - key = "HTTP_" + key - if key in environ: - value = "{},{}".format(environ[key], value) - environ[key] = value - - if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked": - environ["wsgi.input_terminated"] = True - environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"]) - - # Per RFC 2616, if the URL is absolute, use that as the host. - # We're using "has a scheme" to indicate an absolute URL. - if request_url.scheme and request_url.netloc: - environ["HTTP_HOST"] = request_url.netloc - - try: - # binary_form=False gives nicer information, but wouldn't be compatible with - # what Nginx or Apache could return. - peer_cert = self.connection.getpeercert(binary_form=True) - if peer_cert is not None: - # Nginx and Apache use PEM format. - environ["SSL_CLIENT_CERT"] = ssl.DER_cert_to_PEM_cert(peer_cert) - except ValueError: - # SSL handshake hasn't finished. - self.server.log("error", "Cannot fetch SSL peer certificate info") - except AttributeError: - # Not using TLS, the socket will not have getpeercert(). - pass - - return environ - - def run_wsgi(self): - if self.headers.get("Expect", "").lower().strip() == "100-continue": - self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n") - - self.environ = environ = self.make_environ() - headers_set = [] - headers_sent = [] - - def write(data): - assert headers_set, "write() before start_response" - if not headers_sent: - status, response_headers = headers_sent[:] = headers_set - try: - code, msg = status.split(None, 1) - except ValueError: - code, msg = status, "" - code = int(code) - self.send_response(code, msg) - header_keys = set() - for key, value in response_headers: - self.send_header(key, value) - key = key.lower() - header_keys.add(key) - if not ( - "content-length" in header_keys - or environ["REQUEST_METHOD"] == "HEAD" - or code < 200 - or code in (204, 304) - ): - self.close_connection = True - self.send_header("Connection", "close") - if "server" not in header_keys: - self.send_header("Server", self.version_string()) - if "date" not in header_keys: - self.send_header("Date", self.date_time_string()) - self.end_headers() - - assert isinstance(data, bytes), "applications must write bytes" - if data: - # Only write data if there is any to avoid Python 3.5 SSL bug - self.wfile.write(data) - self.wfile.flush() - - def start_response(status, response_headers, exc_info=None): - if exc_info: - try: - if headers_sent: - reraise(*exc_info) - finally: - exc_info = None - elif headers_set: - raise AssertionError("Headers already set") - headers_set[:] = [status, response_headers] - return write - - def execute(app): - application_iter = app(environ, start_response) - try: - for data in application_iter: - write(data) - if not headers_sent: - write(b"") - finally: - if hasattr(application_iter, "close"): - application_iter.close() - - try: - execute(self.server.app) - except (_ConnectionError, socket.timeout) as e: - self.connection_dropped(e, environ) - except Exception: - if self.server.passthrough_errors: - raise - from .debug.tbtools import get_current_traceback - - traceback = get_current_traceback(ignore_system_exceptions=True) - try: - # if we haven't yet sent the headers but they are set - # we roll back to be able to set them again. - if not headers_sent: - del headers_set[:] - execute(InternalServerError()) - except Exception: - pass - self.server.log("error", "Error on request:\n%s", traceback.plaintext) - - def handle(self): - """Handles a request ignoring dropped connections.""" - try: - BaseHTTPRequestHandler.handle(self) - except (_ConnectionError, socket.timeout) as e: - self.connection_dropped(e) - except Exception as e: - if self.server.ssl_context is None or not is_ssl_error(e): - raise - if self.server.shutdown_signal: - self.initiate_shutdown() - - def initiate_shutdown(self): - """A horrible, horrible way to kill the server for Python 2.6 and - later. It's the best we can do. - """ - # Windows does not provide SIGKILL, go with SIGTERM then. - sig = getattr(signal, "SIGKILL", signal.SIGTERM) - # reloader active - if is_running_from_reloader(): - os.kill(os.getpid(), sig) - # python 2.7 - self.server._BaseServer__shutdown_request = True - # python 2.6 - self.server._BaseServer__serving = False - - def connection_dropped(self, error, environ=None): - """Called if the connection was closed by the client. By default - nothing happens. - """ - - def handle_one_request(self): - """Handle a single HTTP request.""" - self.raw_requestline = self.rfile.readline() - if not self.raw_requestline: - self.close_connection = 1 - elif self.parse_request(): - return self.run_wsgi() - - def send_response(self, code, message=None): - """Send the response header and log the response code.""" - self.log_request(code) - if message is None: - message = code in self.responses and self.responses[code][0] or "" - if self.request_version != "HTTP/0.9": - hdr = "%s %d %s\r\n" % (self.protocol_version, code, message) - self.wfile.write(hdr.encode("ascii")) - - def version_string(self): - return BaseHTTPRequestHandler.version_string(self).strip() - - def address_string(self): - if getattr(self, "environ", None): - return self.environ["REMOTE_ADDR"] - elif not self.client_address: - return "" - elif isinstance(self.client_address, str): - return self.client_address - else: - return self.client_address[0] - - def port_integer(self): - return self.client_address[1] - - def log_request(self, code="-", size="-"): - try: - path = uri_to_iri(self.path) - msg = "%s %s %s" % (self.command, path, self.request_version) - except AttributeError: - # path isn't set if the requestline was bad - msg = self.requestline - - code = str(code) - - if click: - color = click.style - - if code[0] == "1": # 1xx - Informational - msg = color(msg, bold=True) - elif code[0] == "2": # 2xx - Success - msg = color(msg, fg="white") - elif code == "304": # 304 - Resource Not Modified - msg = color(msg, fg="cyan") - elif code[0] == "3": # 3xx - Redirection - msg = color(msg, fg="green") - elif code == "404": # 404 - Resource Not Found - msg = color(msg, fg="yellow") - elif code[0] == "4": # 4xx - Client Error - msg = color(msg, fg="red", bold=True) - else: # 5xx, or any other response - msg = color(msg, fg="magenta", bold=True) - - self.log("info", '"%s" %s %s', msg, code, size) - - def log_error(self, *args): - self.log("error", *args) - - def log_message(self, format, *args): - self.log("info", format, *args) - - def log(self, type, message, *args): - _log( - type, - "%s - - [%s] %s\n" - % (self.address_string(), self.log_date_time_string(), message % args), - ) - - def get_header_items(self): - """ - Get an iterable list of key/value pairs representing headers. - - This function provides Python 2/3 compatibility as related to the - parsing of request headers. Python 2.7 is not compliant with - RFC 3875 Section 4.1.18 which requires multiple values for headers - to be provided or RFC 2616 which allows for folding of multi-line - headers. This function will return a matching list regardless - of Python version. It can be removed once Python 2.7 support - is dropped. - - :return: List of tuples containing header hey/value pairs - """ - if PY2: - # For Python 2, process the headers manually according to - # W3C RFC 2616 Section 4.2. - items = [] - for header in self.headers.headers: - # Remove "\r\n" from the header and split on ":" to get - # the field name and value. - try: - key, value = header[0:-2].split(":", 1) - except ValueError: - # If header could not be slit with : but starts with white - # space and it follows an existing header, it's a folded - # header. - if header[0] in ("\t", " ") and items: - # Pop off the last header - key, value = items.pop() - # Append the current header to the value of the last - # header which will be placed back on the end of the - # list - value = value + header - # Otherwise it's just a bad header and should error - else: - # Re-raise the value error - raise - - # Add the key and the value once stripped of leading - # white space. The specification allows for stripping - # trailing white space but the Python 3 code does not - # strip trailing white space. Therefore, trailing space - # will be left as is to match the Python 3 behavior. - items.append((key, value.lstrip())) - else: - items = self.headers.items() - - return items - - -#: backwards compatible name if someone is subclassing it -BaseRequestHandler = WSGIRequestHandler - - -def generate_adhoc_ssl_pair(cn=None): - try: - from cryptography import x509 - from cryptography.x509.oid import NameOID - from cryptography.hazmat.backends import default_backend - from cryptography.hazmat.primitives import hashes - from cryptography.hazmat.primitives.asymmetric import rsa - except ImportError: - raise TypeError("Using ad-hoc certificates requires the cryptography library.") - pkey = rsa.generate_private_key( - public_exponent=65537, key_size=2048, backend=default_backend() - ) - - # pretty damn sure that this is not actually accepted by anyone - if cn is None: - cn = u"*" - - subject = x509.Name( - [ - x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Dummy Certificate"), - x509.NameAttribute(NameOID.COMMON_NAME, cn), - ] - ) - - cert = ( - x509.CertificateBuilder() - .subject_name(subject) - .issuer_name(subject) - .public_key(pkey.public_key()) - .serial_number(x509.random_serial_number()) - .not_valid_before(dt.utcnow()) - .not_valid_after(dt.utcnow() + timedelta(days=365)) - .add_extension(x509.ExtendedKeyUsage([x509.OID_SERVER_AUTH]), critical=False) - .add_extension( - x509.SubjectAlternativeName([x509.DNSName(u"*")]), critical=False - ) - .sign(pkey, hashes.SHA256(), default_backend()) - ) - return cert, pkey - - -def make_ssl_devcert(base_path, host=None, cn=None): - """Creates an SSL key for development. This should be used instead of - the ``'adhoc'`` key which generates a new cert on each server start. - It accepts a path for where it should store the key and cert and - either a host or CN. If a host is given it will use the CN - ``*.host/CN=host``. - - For more information see :func:`run_simple`. - - .. versionadded:: 0.9 - - :param base_path: the path to the certificate and key. The extension - ``.crt`` is added for the certificate, ``.key`` is - added for the key. - :param host: the name of the host. This can be used as an alternative - for the `cn`. - :param cn: the `CN` to use. - """ - - if host is not None: - cn = u"*.%s/CN=%s" % (host, host) - cert, pkey = generate_adhoc_ssl_pair(cn=cn) - - from cryptography.hazmat.primitives import serialization - - cert_file = base_path + ".crt" - pkey_file = base_path + ".key" - - with open(cert_file, "wb") as f: - f.write(cert.public_bytes(serialization.Encoding.PEM)) - with open(pkey_file, "wb") as f: - f.write( - pkey.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption(), - ) - ) - - return cert_file, pkey_file - - -def generate_adhoc_ssl_context(): - """Generates an adhoc SSL context for the development server.""" - import tempfile - import atexit - - cert, pkey = generate_adhoc_ssl_pair() - - from cryptography.hazmat.primitives import serialization - - cert_handle, cert_file = tempfile.mkstemp() - pkey_handle, pkey_file = tempfile.mkstemp() - atexit.register(os.remove, pkey_file) - atexit.register(os.remove, cert_file) - - os.write(cert_handle, cert.public_bytes(serialization.Encoding.PEM)) - os.write( - pkey_handle, - pkey.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=serialization.NoEncryption(), - ), - ) - - os.close(cert_handle) - os.close(pkey_handle) - ctx = load_ssl_context(cert_file, pkey_file) - return ctx - - -def load_ssl_context(cert_file, pkey_file=None, protocol=None): - """Loads SSL context from cert/private key files and optional protocol. - Many parameters are directly taken from the API of - :py:class:`ssl.SSLContext`. - - :param cert_file: Path of the certificate to use. - :param pkey_file: Path of the private key to use. If not given, the key - will be obtained from the certificate file. - :param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl`` - module. Defaults to ``PROTOCOL_SSLv23``. - """ - if protocol is None: - try: - protocol = ssl.PROTOCOL_TLS_SERVER - except AttributeError: - # Python <= 3.5 compat - protocol = ssl.PROTOCOL_SSLv23 - ctx = _SSLContext(protocol) - ctx.load_cert_chain(cert_file, pkey_file) - return ctx - - -class _SSLContext(object): - - """A dummy class with a small subset of Python3's ``ssl.SSLContext``, only - intended to be used with and by Werkzeug.""" - - def __init__(self, protocol): - self._protocol = protocol - self._certfile = None - self._keyfile = None - self._password = None - - def load_cert_chain(self, certfile, keyfile=None, password=None): - self._certfile = certfile - self._keyfile = keyfile or certfile - self._password = password - - def wrap_socket(self, sock, **kwargs): - return ssl.wrap_socket( - sock, - keyfile=self._keyfile, - certfile=self._certfile, - ssl_version=self._protocol, - **kwargs - ) - - -def is_ssl_error(error=None): - """Checks if the given error (or the current one) is an SSL error.""" - if error is None: - error = sys.exc_info()[1] - return isinstance(error, ssl.SSLError) - - -def select_address_family(host, port): - """Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on - the host and port.""" - # disabled due to problems with current ipv6 implementations - # and various operating systems. Probably this code also is - # not supposed to work, but I can't come up with any other - # ways to implement this. - # try: - # info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, - # socket.SOCK_STREAM, 0, - # socket.AI_PASSIVE) - # if info: - # return info[0][0] - # except socket.gaierror: - # pass - if host.startswith("unix://"): - return socket.AF_UNIX - elif ":" in host and hasattr(socket, "AF_INET6"): - return socket.AF_INET6 - return socket.AF_INET - - -def get_sockaddr(host, port, family): - """Return a fully qualified socket address that can be passed to - :func:`socket.bind`.""" - if family == af_unix: - return host.split("://", 1)[1] - try: - res = socket.getaddrinfo( - host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP - ) - except socket.gaierror: - return host, port - return res[0][4] - - -class BaseWSGIServer(HTTPServer, object): - - """Simple single-threaded, single-process WSGI server.""" - - multithread = False - multiprocess = False - request_queue_size = LISTEN_QUEUE - - def __init__( - self, - host, - port, - app, - handler=None, - passthrough_errors=False, - ssl_context=None, - fd=None, - ): - if handler is None: - handler = WSGIRequestHandler - - self.address_family = select_address_family(host, port) - - if fd is not None: - real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM) - port = 0 - - server_address = get_sockaddr(host, int(port), self.address_family) - - # remove socket file if it already exists - if self.address_family == af_unix and os.path.exists(server_address): - os.unlink(server_address) - HTTPServer.__init__(self, server_address, handler) - - self.app = app - self.passthrough_errors = passthrough_errors - self.shutdown_signal = False - self.host = host - self.port = self.socket.getsockname()[1] - - # Patch in the original socket. - if fd is not None: - self.socket.close() - self.socket = real_sock - self.server_address = self.socket.getsockname() - - if ssl_context is not None: - if isinstance(ssl_context, tuple): - ssl_context = load_ssl_context(*ssl_context) - if ssl_context == "adhoc": - ssl_context = generate_adhoc_ssl_context() - - # If we are on Python 2 the return value from socket.fromfd - # is an internal socket object but what we need for ssl wrap - # is the wrapper around it :( - sock = self.socket - if PY2 and not isinstance(sock, socket.socket): - sock = socket.socket(sock.family, sock.type, sock.proto, sock) - self.socket = ssl_context.wrap_socket(sock, server_side=True) - self.ssl_context = ssl_context - else: - self.ssl_context = None - - def log(self, type, message, *args): - _log(type, message, *args) - - def serve_forever(self): - self.shutdown_signal = False - try: - HTTPServer.serve_forever(self) - except KeyboardInterrupt: - pass - finally: - self.server_close() - - def handle_error(self, request, client_address): - if self.passthrough_errors: - raise - # Python 2 still causes a socket.error after the earlier - # handling, so silence it here. - if isinstance(sys.exc_info()[1], _ConnectionError): - return - return HTTPServer.handle_error(self, request, client_address) - - def get_request(self): - con, info = self.socket.accept() - return con, info - - -class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer): - - """A WSGI server that does threading.""" - - multithread = True - daemon_threads = True - - -class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer): - - """A WSGI server that does forking.""" - - multiprocess = True - - def __init__( - self, - host, - port, - app, - processes=40, - handler=None, - passthrough_errors=False, - ssl_context=None, - fd=None, - ): - if not can_fork: - raise ValueError("Your platform does not support forking.") - BaseWSGIServer.__init__( - self, host, port, app, handler, passthrough_errors, ssl_context, fd - ) - self.max_children = processes - - -def make_server( - host=None, - port=None, - app=None, - threaded=False, - processes=1, - request_handler=None, - passthrough_errors=False, - ssl_context=None, - fd=None, -): - """Create a new server instance that is either threaded, or forks - or just processes one request after another. - """ - if threaded and processes > 1: - raise ValueError("cannot have a multithreaded and multi process server.") - elif threaded: - return ThreadedWSGIServer( - host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd - ) - elif processes > 1: - return ForkingWSGIServer( - host, - port, - app, - processes, - request_handler, - passthrough_errors, - ssl_context, - fd=fd, - ) - else: - return BaseWSGIServer( - host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd - ) - - -def is_running_from_reloader(): - """Checks if the application is running from within the Werkzeug - reloader subprocess. - - .. versionadded:: 0.10 - """ - return os.environ.get("WERKZEUG_RUN_MAIN") == "true" - - -def run_simple( - hostname, - port, - application, - use_reloader=False, - use_debugger=False, - use_evalex=True, - extra_files=None, - reloader_interval=1, - reloader_type="auto", - threaded=False, - processes=1, - request_handler=None, - static_files=None, - passthrough_errors=False, - ssl_context=None, -): - """Start a WSGI application. Optional features include a reloader, - multithreading and fork support. - - This function has a command-line interface too:: - - python -m werkzeug.serving --help - - .. versionadded:: 0.5 - `static_files` was added to simplify serving of static files as well - as `passthrough_errors`. - - .. versionadded:: 0.6 - support for SSL was added. - - .. versionadded:: 0.8 - Added support for automatically loading a SSL context from certificate - file and private key. - - .. versionadded:: 0.9 - Added command-line interface. - - .. versionadded:: 0.10 - Improved the reloader and added support for changing the backend - through the `reloader_type` parameter. See :ref:`reloader` - for more information. - - .. versionchanged:: 0.15 - Bind to a Unix socket by passing a path that starts with - ``unix://`` as the ``hostname``. - - :param hostname: The host to bind to, for example ``'localhost'``. - If the value is a path that starts with ``unix://`` it will bind - to a Unix socket instead of a TCP socket.. - :param port: The port for the server. eg: ``8080`` - :param application: the WSGI application to execute - :param use_reloader: should the server automatically restart the python - process if modules were changed? - :param use_debugger: should the werkzeug debugging system be used? - :param use_evalex: should the exception evaluation feature be enabled? - :param extra_files: a list of files the reloader should watch - additionally to the modules. For example configuration - files. - :param reloader_interval: the interval for the reloader in seconds. - :param reloader_type: the type of reloader to use. The default is - auto detection. Valid values are ``'stat'`` and - ``'watchdog'``. See :ref:`reloader` for more - information. - :param threaded: should the process handle each request in a separate - thread? - :param processes: if greater than 1 then handle each request in a new process - up to this maximum number of concurrent processes. - :param request_handler: optional parameter that can be used to replace - the default one. You can use this to replace it - with a different - :class:`~BaseHTTPServer.BaseHTTPRequestHandler` - subclass. - :param static_files: a list or dict of paths for static files. This works - exactly like :class:`SharedDataMiddleware`, it's actually - just wrapping the application in that middleware before - serving. - :param passthrough_errors: set this to `True` to disable the error catching. - This means that the server will die on errors but - it can be useful to hook debuggers in (pdb etc.) - :param ssl_context: an SSL context for the connection. Either an - :class:`ssl.SSLContext`, a tuple in the form - ``(cert_file, pkey_file)``, the string ``'adhoc'`` if - the server should automatically create one, or ``None`` - to disable SSL (which is the default). - """ - if not isinstance(port, int): - raise TypeError("port must be an integer") - if use_debugger: - from .debug import DebuggedApplication - - application = DebuggedApplication(application, use_evalex) - if static_files: - from .middleware.shared_data import SharedDataMiddleware - - application = SharedDataMiddleware(application, static_files) - - def log_startup(sock): - display_hostname = hostname if hostname not in ("", "*") else "localhost" - quit_msg = "(Press CTRL+C to quit)" - if sock.family == af_unix: - _log("info", " * Running on %s %s", display_hostname, quit_msg) - else: - if ":" in display_hostname: - display_hostname = "[%s]" % display_hostname - port = sock.getsockname()[1] - _log( - "info", - " * Running on %s://%s:%d/ %s", - "http" if ssl_context is None else "https", - display_hostname, - port, - quit_msg, - ) - - def inner(): - try: - fd = int(os.environ["WERKZEUG_SERVER_FD"]) - except (LookupError, ValueError): - fd = None - srv = make_server( - hostname, - port, - application, - threaded, - processes, - request_handler, - passthrough_errors, - ssl_context, - fd=fd, - ) - if fd is None: - log_startup(srv.socket) - srv.serve_forever() - - if use_reloader: - # If we're not running already in the subprocess that is the - # reloader we want to open up a socket early to make sure the - # port is actually available. - if not is_running_from_reloader(): - if port == 0 and not can_open_by_fd: - raise ValueError( - "Cannot bind to a random port with enabled " - "reloader if the Python interpreter does " - "not support socket opening by fd." - ) - - # Create and destroy a socket so that any exceptions are - # raised before we spawn a separate Python interpreter and - # lose this ability. - address_family = select_address_family(hostname, port) - server_address = get_sockaddr(hostname, port, address_family) - s = socket.socket(address_family, socket.SOCK_STREAM) - s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - s.bind(server_address) - if hasattr(s, "set_inheritable"): - s.set_inheritable(True) - - # If we can open the socket by file descriptor, then we can just - # reuse this one and our socket will survive the restarts. - if can_open_by_fd: - os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno()) - s.listen(LISTEN_QUEUE) - log_startup(s) - else: - s.close() - if address_family == af_unix: - _log("info", "Unlinking %s" % server_address) - os.unlink(server_address) - - # Do not use relative imports, otherwise "python -m werkzeug.serving" - # breaks. - from ._reloader import run_with_reloader - - run_with_reloader(inner, extra_files, reloader_interval, reloader_type) - else: - inner() - - -def run_with_reloader(*args, **kwargs): - # People keep using undocumented APIs. Do not use this function - # please, we do not guarantee that it continues working. - from ._reloader import run_with_reloader - - return run_with_reloader(*args, **kwargs) - - -def main(): - """A simple command-line interface for :py:func:`run_simple`.""" - - # in contrast to argparse, this works at least under Python < 2.7 - import optparse - from .utils import import_string - - parser = optparse.OptionParser(usage="Usage: %prog [options] app_module:app_object") - parser.add_option( - "-b", - "--bind", - dest="address", - help="The hostname:port the app should listen on.", - ) - parser.add_option( - "-d", - "--debug", - dest="use_debugger", - action="store_true", - default=False, - help="Use Werkzeug's debugger.", - ) - parser.add_option( - "-r", - "--reload", - dest="use_reloader", - action="store_true", - default=False, - help="Reload Python process if modules change.", - ) - options, args = parser.parse_args() - - hostname, port = None, None - if options.address: - address = options.address.split(":") - hostname = address[0] - if len(address) > 1: - port = address[1] - - if len(args) != 1: - sys.stdout.write("No application supplied, or too much. See --help\n") - sys.exit(1) - app = import_string(args[0]) - - run_simple( - hostname=(hostname or "127.0.0.1"), - port=int(port or 5000), - application=app, - use_reloader=options.use_reloader, - use_debugger=options.use_debugger, - ) - - -if __name__ == "__main__": - main() diff --git a/venv/lib/python3.7/site-packages/werkzeug/test.py b/venv/lib/python3.7/site-packages/werkzeug/test.py deleted file mode 100644 index c5ce50a..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/test.py +++ /dev/null @@ -1,1123 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.test - ~~~~~~~~~~~~~ - - This module implements a client to WSGI applications for testing. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import mimetypes -import sys -from io import BytesIO -from itertools import chain -from random import random -from tempfile import TemporaryFile -from time import time - -from ._compat import iteritems -from ._compat import iterlists -from ._compat import itervalues -from ._compat import make_literal_wrapper -from ._compat import reraise -from ._compat import string_types -from ._compat import text_type -from ._compat import to_bytes -from ._compat import wsgi_encoding_dance -from ._internal import _get_environ -from .datastructures import CallbackDict -from .datastructures import CombinedMultiDict -from .datastructures import EnvironHeaders -from .datastructures import FileMultiDict -from .datastructures import Headers -from .datastructures import MultiDict -from .http import dump_cookie -from .http import dump_options_header -from .http import parse_options_header -from .urls import iri_to_uri -from .urls import url_encode -from .urls import url_fix -from .urls import url_parse -from .urls import url_unparse -from .urls import url_unquote -from .utils import get_content_type -from .wrappers import BaseRequest -from .wsgi import ClosingIterator -from .wsgi import get_current_url - -try: - from urllib.request import Request as U2Request -except ImportError: - from urllib2 import Request as U2Request - -try: - from http.cookiejar import CookieJar -except ImportError: - from cookielib import CookieJar - - -def stream_encode_multipart( - values, use_tempfile=True, threshold=1024 * 500, boundary=None, charset="utf-8" -): - """Encode a dict of values (either strings or file descriptors or - :class:`FileStorage` objects.) into a multipart encoded string stored - in a file descriptor. - """ - if boundary is None: - boundary = "---------------WerkzeugFormPart_%s%s" % (time(), random()) - _closure = [BytesIO(), 0, False] - - if use_tempfile: - - def write_binary(string): - stream, total_length, on_disk = _closure - if on_disk: - stream.write(string) - else: - length = len(string) - if length + _closure[1] <= threshold: - stream.write(string) - else: - new_stream = TemporaryFile("wb+") - new_stream.write(stream.getvalue()) - new_stream.write(string) - _closure[0] = new_stream - _closure[2] = True - _closure[1] = total_length + length - - else: - write_binary = _closure[0].write - - def write(string): - write_binary(string.encode(charset)) - - if not isinstance(values, MultiDict): - values = MultiDict(values) - - for key, values in iterlists(values): - for value in values: - write('--%s\r\nContent-Disposition: form-data; name="%s"' % (boundary, key)) - reader = getattr(value, "read", None) - if reader is not None: - filename = getattr(value, "filename", getattr(value, "name", None)) - content_type = getattr(value, "content_type", None) - if content_type is None: - content_type = ( - filename - and mimetypes.guess_type(filename)[0] - or "application/octet-stream" - ) - if filename is not None: - write('; filename="%s"\r\n' % filename) - else: - write("\r\n") - write("Content-Type: %s\r\n\r\n" % content_type) - while 1: - chunk = reader(16384) - if not chunk: - break - write_binary(chunk) - else: - if not isinstance(value, string_types): - value = str(value) - - value = to_bytes(value, charset) - write("\r\n\r\n") - write_binary(value) - write("\r\n") - write("--%s--\r\n" % boundary) - - length = int(_closure[0].tell()) - _closure[0].seek(0) - return _closure[0], length, boundary - - -def encode_multipart(values, boundary=None, charset="utf-8"): - """Like `stream_encode_multipart` but returns a tuple in the form - (``boundary``, ``data``) where data is a bytestring. - """ - stream, length, boundary = stream_encode_multipart( - values, use_tempfile=False, boundary=boundary, charset=charset - ) - return boundary, stream.read() - - -class _TestCookieHeaders(object): - - """A headers adapter for cookielib - """ - - def __init__(self, headers): - self.headers = headers - - def getheaders(self, name): - headers = [] - name = name.lower() - for k, v in self.headers: - if k.lower() == name: - headers.append(v) - return headers - - def get_all(self, name, default=None): - rv = [] - for k, v in self.headers: - if k.lower() == name.lower(): - rv.append(v) - return rv or default or [] - - -class _TestCookieResponse(object): - - """Something that looks like a httplib.HTTPResponse, but is actually just an - adapter for our test responses to make them available for cookielib. - """ - - def __init__(self, headers): - self.headers = _TestCookieHeaders(headers) - - def info(self): - return self.headers - - -class _TestCookieJar(CookieJar): - - """A cookielib.CookieJar modified to inject and read cookie headers from - and to wsgi environments, and wsgi application responses. - """ - - def inject_wsgi(self, environ): - """Inject the cookies as client headers into the server's wsgi - environment. - """ - cvals = ["%s=%s" % (c.name, c.value) for c in self] - - if cvals: - environ["HTTP_COOKIE"] = "; ".join(cvals) - else: - environ.pop("HTTP_COOKIE", None) - - def extract_wsgi(self, environ, headers): - """Extract the server's set-cookie headers as cookies into the - cookie jar. - """ - self.extract_cookies( - _TestCookieResponse(headers), U2Request(get_current_url(environ)) - ) - - -def _iter_data(data): - """Iterates over a `dict` or :class:`MultiDict` yielding all keys and - values. - This is used to iterate over the data passed to the - :class:`EnvironBuilder`. - """ - if isinstance(data, MultiDict): - for key, values in iterlists(data): - for value in values: - yield key, value - else: - for key, values in iteritems(data): - if isinstance(values, list): - for value in values: - yield key, value - else: - yield key, values - - -class EnvironBuilder(object): - """This class can be used to conveniently create a WSGI environment - for testing purposes. It can be used to quickly create WSGI environments - or request objects from arbitrary data. - - The signature of this class is also used in some other places as of - Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`, - :meth:`Client.open`). Because of this most of the functionality is - available through the constructor alone. - - Files and regular form data can be manipulated independently of each - other with the :attr:`form` and :attr:`files` attributes, but are - passed with the same argument to the constructor: `data`. - - `data` can be any of these values: - - - a `str` or `bytes` object: The object is converted into an - :attr:`input_stream`, the :attr:`content_length` is set and you have to - provide a :attr:`content_type`. - - a `dict` or :class:`MultiDict`: The keys have to be strings. The values - have to be either any of the following objects, or a list of any of the - following objects: - - - a :class:`file`-like object: These are converted into - :class:`FileStorage` objects automatically. - - a `tuple`: The :meth:`~FileMultiDict.add_file` method is called - with the key and the unpacked `tuple` items as positional - arguments. - - a `str`: The string is set as form data for the associated key. - - a file-like object: The object content is loaded in memory and then - handled like a regular `str` or a `bytes`. - - :param path: the path of the request. In the WSGI environment this will - end up as `PATH_INFO`. If the `query_string` is not defined - and there is a question mark in the `path` everything after - it is used as query string. - :param base_url: the base URL is a URL that is used to extract the WSGI - URL scheme, host (server name + server port) and the - script root (`SCRIPT_NAME`). - :param query_string: an optional string or dict with URL parameters. - :param method: the HTTP method to use, defaults to `GET`. - :param input_stream: an optional input stream. Do not specify this and - `data`. As soon as an input stream is set you can't - modify :attr:`args` and :attr:`files` unless you - set the :attr:`input_stream` to `None` again. - :param content_type: The content type for the request. As of 0.5 you - don't have to provide this when specifying files - and form data via `data`. - :param content_length: The content length for the request. You don't - have to specify this when providing data via - `data`. - :param errors_stream: an optional error stream that is used for - `wsgi.errors`. Defaults to :data:`stderr`. - :param multithread: controls `wsgi.multithread`. Defaults to `False`. - :param multiprocess: controls `wsgi.multiprocess`. Defaults to `False`. - :param run_once: controls `wsgi.run_once`. Defaults to `False`. - :param headers: an optional list or :class:`Headers` object of headers. - :param data: a string or dict of form data or a file-object. - See explanation above. - :param json: An object to be serialized and assigned to ``data``. - Defaults the content type to ``"application/json"``. - Serialized with the function assigned to :attr:`json_dumps`. - :param environ_base: an optional dict of environment defaults. - :param environ_overrides: an optional dict of environment overrides. - :param charset: the charset used to encode unicode data. - - .. versionadded:: 0.15 - The ``json`` param and :meth:`json_dumps` method. - - .. versionadded:: 0.15 - The environ has keys ``REQUEST_URI`` and ``RAW_URI`` containing - the path before perecent-decoding. This is not part of the WSGI - PEP, but many WSGI servers include it. - - .. versionchanged:: 0.6 - ``path`` and ``base_url`` can now be unicode strings that are - encoded with :func:`iri_to_uri`. - """ - - #: the server protocol to use. defaults to HTTP/1.1 - server_protocol = "HTTP/1.1" - - #: the wsgi version to use. defaults to (1, 0) - wsgi_version = (1, 0) - - #: the default request class for :meth:`get_request` - request_class = BaseRequest - - import json - - #: The serialization function used when ``json`` is passed. - json_dumps = staticmethod(json.dumps) - del json - - def __init__( - self, - path="/", - base_url=None, - query_string=None, - method="GET", - input_stream=None, - content_type=None, - content_length=None, - errors_stream=None, - multithread=False, - multiprocess=False, - run_once=False, - headers=None, - data=None, - environ_base=None, - environ_overrides=None, - charset="utf-8", - mimetype=None, - json=None, - ): - path_s = make_literal_wrapper(path) - if query_string is not None and path_s("?") in path: - raise ValueError("Query string is defined in the path and as an argument") - if query_string is None and path_s("?") in path: - path, query_string = path.split(path_s("?"), 1) - self.charset = charset - self.path = iri_to_uri(path) - if base_url is not None: - base_url = url_fix(iri_to_uri(base_url, charset), charset) - self.base_url = base_url - if isinstance(query_string, (bytes, text_type)): - self.query_string = query_string - else: - if query_string is None: - query_string = MultiDict() - elif not isinstance(query_string, MultiDict): - query_string = MultiDict(query_string) - self.args = query_string - self.method = method - if headers is None: - headers = Headers() - elif not isinstance(headers, Headers): - headers = Headers(headers) - self.headers = headers - if content_type is not None: - self.content_type = content_type - if errors_stream is None: - errors_stream = sys.stderr - self.errors_stream = errors_stream - self.multithread = multithread - self.multiprocess = multiprocess - self.run_once = run_once - self.environ_base = environ_base - self.environ_overrides = environ_overrides - self.input_stream = input_stream - self.content_length = content_length - self.closed = False - - if json is not None: - if data is not None: - raise TypeError("can't provide both json and data") - - data = self.json_dumps(json) - - if self.content_type is None: - self.content_type = "application/json" - - if data: - if input_stream is not None: - raise TypeError("can't provide input stream and data") - if hasattr(data, "read"): - data = data.read() - if isinstance(data, text_type): - data = data.encode(self.charset) - if isinstance(data, bytes): - self.input_stream = BytesIO(data) - if self.content_length is None: - self.content_length = len(data) - else: - for key, value in _iter_data(data): - if isinstance(value, (tuple, dict)) or hasattr(value, "read"): - self._add_file_from_data(key, value) - else: - self.form.setlistdefault(key).append(value) - - if mimetype is not None: - self.mimetype = mimetype - - @classmethod - def from_environ(cls, environ, **kwargs): - """Turn an environ dict back into a builder. Any extra kwargs - override the args extracted from the environ. - - .. versionadded:: 0.15 - """ - headers = Headers(EnvironHeaders(environ)) - out = { - "path": environ["PATH_INFO"], - "base_url": cls._make_base_url( - environ["wsgi.url_scheme"], headers.pop("Host"), environ["SCRIPT_NAME"] - ), - "query_string": environ["QUERY_STRING"], - "method": environ["REQUEST_METHOD"], - "input_stream": environ["wsgi.input"], - "content_type": headers.pop("Content-Type", None), - "content_length": headers.pop("Content-Length", None), - "errors_stream": environ["wsgi.errors"], - "multithread": environ["wsgi.multithread"], - "multiprocess": environ["wsgi.multiprocess"], - "run_once": environ["wsgi.run_once"], - "headers": headers, - } - out.update(kwargs) - return cls(**out) - - def _add_file_from_data(self, key, value): - """Called in the EnvironBuilder to add files from the data dict.""" - if isinstance(value, tuple): - self.files.add_file(key, *value) - else: - self.files.add_file(key, value) - - @staticmethod - def _make_base_url(scheme, host, script_root): - return url_unparse((scheme, host, script_root, "", "")).rstrip("/") + "/" - - @property - def base_url(self): - """The base URL is used to extract the URL scheme, host name, - port, and root path. - """ - return self._make_base_url(self.url_scheme, self.host, self.script_root) - - @base_url.setter - def base_url(self, value): - if value is None: - scheme = "http" - netloc = "localhost" - script_root = "" - else: - scheme, netloc, script_root, qs, anchor = url_parse(value) - if qs or anchor: - raise ValueError("base url must not contain a query string or fragment") - self.script_root = script_root.rstrip("/") - self.host = netloc - self.url_scheme = scheme - - @property - def content_type(self): - """The content type for the request. Reflected from and to - the :attr:`headers`. Do not set if you set :attr:`files` or - :attr:`form` for auto detection. - """ - ct = self.headers.get("Content-Type") - if ct is None and not self._input_stream: - if self._files: - return "multipart/form-data" - if self._form: - return "application/x-www-form-urlencoded" - return None - return ct - - @content_type.setter - def content_type(self, value): - if value is None: - self.headers.pop("Content-Type", None) - else: - self.headers["Content-Type"] = value - - @property - def mimetype(self): - """The mimetype (content type without charset etc.) - - .. versionadded:: 0.14 - """ - ct = self.content_type - return ct.split(";")[0].strip() if ct else None - - @mimetype.setter - def mimetype(self, value): - self.content_type = get_content_type(value, self.charset) - - @property - def mimetype_params(self): - """ The mimetype parameters as dict. For example if the - content type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - - .. versionadded:: 0.14 - """ - - def on_update(d): - self.headers["Content-Type"] = dump_options_header(self.mimetype, d) - - d = parse_options_header(self.headers.get("content-type", ""))[1] - return CallbackDict(d, on_update) - - @property - def content_length(self): - """The content length as integer. Reflected from and to the - :attr:`headers`. Do not set if you set :attr:`files` or - :attr:`form` for auto detection. - """ - return self.headers.get("Content-Length", type=int) - - @content_length.setter - def content_length(self, value): - if value is None: - self.headers.pop("Content-Length", None) - else: - self.headers["Content-Length"] = str(value) - - def _get_form(self, name, storage): - """Common behavior for getting the :attr:`form` and - :attr:`files` properties. - - :param name: Name of the internal cached attribute. - :param storage: Storage class used for the data. - """ - if self.input_stream is not None: - raise AttributeError("an input stream is defined") - - rv = getattr(self, name) - - if rv is None: - rv = storage() - setattr(self, name, rv) - - return rv - - def _set_form(self, name, value): - """Common behavior for setting the :attr:`form` and - :attr:`files` properties. - - :param name: Name of the internal cached attribute. - :param value: Value to assign to the attribute. - """ - self._input_stream = None - setattr(self, name, value) - - @property - def form(self): - """A :class:`MultiDict` of form values.""" - return self._get_form("_form", MultiDict) - - @form.setter - def form(self, value): - self._set_form("_form", value) - - @property - def files(self): - """A :class:`FileMultiDict` of uploaded files. Use - :meth:`~FileMultiDict.add_file` to add new files. - """ - return self._get_form("_files", FileMultiDict) - - @files.setter - def files(self, value): - self._set_form("_files", value) - - @property - def input_stream(self): - """An optional input stream. If you set this it will clear - :attr:`form` and :attr:`files`. - """ - return self._input_stream - - @input_stream.setter - def input_stream(self, value): - self._input_stream = value - self._form = None - self._files = None - - @property - def query_string(self): - """The query string. If you set this to a string - :attr:`args` will no longer be available. - """ - if self._query_string is None: - if self._args is not None: - return url_encode(self._args, charset=self.charset) - return "" - return self._query_string - - @query_string.setter - def query_string(self, value): - self._query_string = value - self._args = None - - @property - def args(self): - """The URL arguments as :class:`MultiDict`.""" - if self._query_string is not None: - raise AttributeError("a query string is defined") - if self._args is None: - self._args = MultiDict() - return self._args - - @args.setter - def args(self, value): - self._query_string = None - self._args = value - - @property - def server_name(self): - """The server name (read-only, use :attr:`host` to set)""" - return self.host.split(":", 1)[0] - - @property - def server_port(self): - """The server port as integer (read-only, use :attr:`host` to set)""" - pieces = self.host.split(":", 1) - if len(pieces) == 2 and pieces[1].isdigit(): - return int(pieces[1]) - if self.url_scheme == "https": - return 443 - return 80 - - def __del__(self): - try: - self.close() - except Exception: - pass - - def close(self): - """Closes all files. If you put real :class:`file` objects into the - :attr:`files` dict you can call this method to automatically close - them all in one go. - """ - if self.closed: - return - try: - files = itervalues(self.files) - except AttributeError: - files = () - for f in files: - try: - f.close() - except Exception: - pass - self.closed = True - - def get_environ(self): - """Return the built environ. - - .. versionchanged:: 0.15 - The content type and length headers are set based on - input stream detection. Previously this only set the WSGI - keys. - """ - input_stream = self.input_stream - content_length = self.content_length - - mimetype = self.mimetype - content_type = self.content_type - - if input_stream is not None: - start_pos = input_stream.tell() - input_stream.seek(0, 2) - end_pos = input_stream.tell() - input_stream.seek(start_pos) - content_length = end_pos - start_pos - elif mimetype == "multipart/form-data": - values = CombinedMultiDict([self.form, self.files]) - input_stream, content_length, boundary = stream_encode_multipart( - values, charset=self.charset - ) - content_type = mimetype + '; boundary="%s"' % boundary - elif mimetype == "application/x-www-form-urlencoded": - # XXX: py2v3 review - values = url_encode(self.form, charset=self.charset) - values = values.encode("ascii") - content_length = len(values) - input_stream = BytesIO(values) - else: - input_stream = BytesIO() - - result = {} - if self.environ_base: - result.update(self.environ_base) - - def _path_encode(x): - return wsgi_encoding_dance(url_unquote(x, self.charset), self.charset) - - qs = wsgi_encoding_dance(self.query_string) - - result.update( - { - "REQUEST_METHOD": self.method, - "SCRIPT_NAME": _path_encode(self.script_root), - "PATH_INFO": _path_encode(self.path), - "QUERY_STRING": qs, - # Non-standard, added by mod_wsgi, uWSGI - "REQUEST_URI": wsgi_encoding_dance(self.path), - # Non-standard, added by gunicorn - "RAW_URI": wsgi_encoding_dance(self.path), - "SERVER_NAME": self.server_name, - "SERVER_PORT": str(self.server_port), - "HTTP_HOST": self.host, - "SERVER_PROTOCOL": self.server_protocol, - "wsgi.version": self.wsgi_version, - "wsgi.url_scheme": self.url_scheme, - "wsgi.input": input_stream, - "wsgi.errors": self.errors_stream, - "wsgi.multithread": self.multithread, - "wsgi.multiprocess": self.multiprocess, - "wsgi.run_once": self.run_once, - } - ) - - headers = self.headers.copy() - - if content_type is not None: - result["CONTENT_TYPE"] = content_type - headers.set("Content-Type", content_type) - - if content_length is not None: - result["CONTENT_LENGTH"] = str(content_length) - headers.set("Content-Length", content_length) - - for key, value in headers.to_wsgi_list(): - result["HTTP_%s" % key.upper().replace("-", "_")] = value - - if self.environ_overrides: - result.update(self.environ_overrides) - - return result - - def get_request(self, cls=None): - """Returns a request with the data. If the request class is not - specified :attr:`request_class` is used. - - :param cls: The request wrapper to use. - """ - if cls is None: - cls = self.request_class - return cls(self.get_environ()) - - -class ClientRedirectError(Exception): - """If a redirect loop is detected when using follow_redirects=True with - the :cls:`Client`, then this exception is raised. - """ - - -class Client(object): - """This class allows you to send requests to a wrapped application. - - The response wrapper can be a class or factory function that takes - three arguments: app_iter, status and headers. The default response - wrapper just returns a tuple. - - Example:: - - class ClientResponse(BaseResponse): - ... - - client = Client(MyApplication(), response_wrapper=ClientResponse) - - The use_cookies parameter indicates whether cookies should be stored and - sent for subsequent requests. This is True by default, but passing False - will disable this behaviour. - - If you want to request some subdomain of your application you may set - `allow_subdomain_redirects` to `True` as if not no external redirects - are allowed. - - .. versionadded:: 0.5 - `use_cookies` is new in this version. Older versions did not provide - builtin cookie support. - - .. versionadded:: 0.14 - The `mimetype` parameter was added. - - .. versionadded:: 0.15 - The ``json`` parameter. - """ - - def __init__( - self, - application, - response_wrapper=None, - use_cookies=True, - allow_subdomain_redirects=False, - ): - self.application = application - self.response_wrapper = response_wrapper - if use_cookies: - self.cookie_jar = _TestCookieJar() - else: - self.cookie_jar = None - self.allow_subdomain_redirects = allow_subdomain_redirects - - def set_cookie( - self, - server_name, - key, - value="", - max_age=None, - expires=None, - path="/", - domain=None, - secure=None, - httponly=False, - samesite=None, - charset="utf-8", - ): - """Sets a cookie in the client's cookie jar. The server name - is required and has to match the one that is also passed to - the open call. - """ - assert self.cookie_jar is not None, "cookies disabled" - header = dump_cookie( - key, - value, - max_age, - expires, - path, - domain, - secure, - httponly, - charset, - samesite=samesite, - ) - environ = create_environ(path, base_url="http://" + server_name) - headers = [("Set-Cookie", header)] - self.cookie_jar.extract_wsgi(environ, headers) - - def delete_cookie(self, server_name, key, path="/", domain=None): - """Deletes a cookie in the test client.""" - self.set_cookie( - server_name, key, expires=0, max_age=0, path=path, domain=domain - ) - - def run_wsgi_app(self, environ, buffered=False): - """Runs the wrapped WSGI app with the given environment.""" - if self.cookie_jar is not None: - self.cookie_jar.inject_wsgi(environ) - rv = run_wsgi_app(self.application, environ, buffered=buffered) - if self.cookie_jar is not None: - self.cookie_jar.extract_wsgi(environ, rv[2]) - return rv - - def resolve_redirect(self, response, new_location, environ, buffered=False): - """Perform a new request to the location given by the redirect - response to the previous request. - """ - scheme, netloc, path, qs, anchor = url_parse(new_location) - builder = EnvironBuilder.from_environ(environ, query_string=qs) - - to_name_parts = netloc.split(":", 1)[0].split(".") - from_name_parts = builder.server_name.split(".") - - if to_name_parts != [""]: - # The new location has a host, use it for the base URL. - builder.url_scheme = scheme - builder.host = netloc - else: - # A local redirect with autocorrect_location_header=False - # doesn't have a host, so use the request's host. - to_name_parts = from_name_parts - - # Explain why a redirect to a different server name won't be followed. - if to_name_parts != from_name_parts: - if to_name_parts[-len(from_name_parts) :] == from_name_parts: - if not self.allow_subdomain_redirects: - raise RuntimeError("Following subdomain redirects is not enabled.") - else: - raise RuntimeError("Following external redirects is not supported.") - - path_parts = path.split("/") - root_parts = builder.script_root.split("/") - - if path_parts[: len(root_parts)] == root_parts: - # Strip the script root from the path. - builder.path = path[len(builder.script_root) :] - else: - # The new location is not under the script root, so use the - # whole path and clear the previous root. - builder.path = path - builder.script_root = "" - - status_code = int(response[1].split(None, 1)[0]) - - # Only 307 and 308 preserve all of the original request. - if status_code not in {307, 308}: - # HEAD is preserved, everything else becomes GET. - if builder.method != "HEAD": - builder.method = "GET" - - # Clear the body and the headers that describe it. - builder.input_stream = None - builder.content_type = None - builder.content_length = None - builder.headers.pop("Transfer-Encoding", None) - - # Disable the response wrapper while handling redirects. Not - # thread safe, but the client should not be shared anyway. - old_response_wrapper = self.response_wrapper - self.response_wrapper = None - - try: - return self.open(builder, as_tuple=True, buffered=buffered) - finally: - self.response_wrapper = old_response_wrapper - - def open(self, *args, **kwargs): - """Takes the same arguments as the :class:`EnvironBuilder` class with - some additions: You can provide a :class:`EnvironBuilder` or a WSGI - environment as only argument instead of the :class:`EnvironBuilder` - arguments and two optional keyword arguments (`as_tuple`, `buffered`) - that change the type of the return value or the way the application is - executed. - - .. versionchanged:: 0.5 - If a dict is provided as file in the dict for the `data` parameter - the content type has to be called `content_type` now instead of - `mimetype`. This change was made for consistency with - :class:`werkzeug.FileWrapper`. - - The `follow_redirects` parameter was added to :func:`open`. - - Additional parameters: - - :param as_tuple: Returns a tuple in the form ``(environ, result)`` - :param buffered: Set this to True to buffer the application run. - This will automatically close the application for - you as well. - :param follow_redirects: Set this to True if the `Client` should - follow HTTP redirects. - """ - as_tuple = kwargs.pop("as_tuple", False) - buffered = kwargs.pop("buffered", False) - follow_redirects = kwargs.pop("follow_redirects", False) - environ = None - if not kwargs and len(args) == 1: - if isinstance(args[0], EnvironBuilder): - environ = args[0].get_environ() - elif isinstance(args[0], dict): - environ = args[0] - if environ is None: - builder = EnvironBuilder(*args, **kwargs) - try: - environ = builder.get_environ() - finally: - builder.close() - - response = self.run_wsgi_app(environ.copy(), buffered=buffered) - - # handle redirects - redirect_chain = [] - while 1: - status_code = int(response[1].split(None, 1)[0]) - if ( - status_code not in {301, 302, 303, 305, 307, 308} - or not follow_redirects - ): - break - - # Exhaust intermediate response bodies to ensure middleware - # that returns an iterator runs any cleanup code. - if not buffered: - for _ in response[0]: - pass - - new_location = response[2]["location"] - new_redirect_entry = (new_location, status_code) - if new_redirect_entry in redirect_chain: - raise ClientRedirectError("loop detected") - redirect_chain.append(new_redirect_entry) - environ, response = self.resolve_redirect( - response, new_location, environ, buffered=buffered - ) - - if self.response_wrapper is not None: - response = self.response_wrapper(*response) - if as_tuple: - return environ, response - return response - - def get(self, *args, **kw): - """Like open but method is enforced to GET.""" - kw["method"] = "GET" - return self.open(*args, **kw) - - def patch(self, *args, **kw): - """Like open but method is enforced to PATCH.""" - kw["method"] = "PATCH" - return self.open(*args, **kw) - - def post(self, *args, **kw): - """Like open but method is enforced to POST.""" - kw["method"] = "POST" - return self.open(*args, **kw) - - def head(self, *args, **kw): - """Like open but method is enforced to HEAD.""" - kw["method"] = "HEAD" - return self.open(*args, **kw) - - def put(self, *args, **kw): - """Like open but method is enforced to PUT.""" - kw["method"] = "PUT" - return self.open(*args, **kw) - - def delete(self, *args, **kw): - """Like open but method is enforced to DELETE.""" - kw["method"] = "DELETE" - return self.open(*args, **kw) - - def options(self, *args, **kw): - """Like open but method is enforced to OPTIONS.""" - kw["method"] = "OPTIONS" - return self.open(*args, **kw) - - def trace(self, *args, **kw): - """Like open but method is enforced to TRACE.""" - kw["method"] = "TRACE" - return self.open(*args, **kw) - - def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, self.application) - - -def create_environ(*args, **kwargs): - """Create a new WSGI environ dict based on the values passed. The first - parameter should be the path of the request which defaults to '/'. The - second one can either be an absolute path (in that case the host is - localhost:80) or a full path to the request with scheme, netloc port and - the path to the script. - - This accepts the same arguments as the :class:`EnvironBuilder` - constructor. - - .. versionchanged:: 0.5 - This function is now a thin wrapper over :class:`EnvironBuilder` which - was added in 0.5. The `headers`, `environ_base`, `environ_overrides` - and `charset` parameters were added. - """ - builder = EnvironBuilder(*args, **kwargs) - try: - return builder.get_environ() - finally: - builder.close() - - -def run_wsgi_app(app, environ, buffered=False): - """Return a tuple in the form (app_iter, status, headers) of the - application output. This works best if you pass it an application that - returns an iterator all the time. - - Sometimes applications may use the `write()` callable returned - by the `start_response` function. This tries to resolve such edge - cases automatically. But if you don't get the expected output you - should set `buffered` to `True` which enforces buffering. - - If passed an invalid WSGI application the behavior of this function is - undefined. Never pass non-conforming WSGI applications to this function. - - :param app: the application to execute. - :param buffered: set to `True` to enforce buffering. - :return: tuple in the form ``(app_iter, status, headers)`` - """ - environ = _get_environ(environ) - response = [] - buffer = [] - - def start_response(status, headers, exc_info=None): - if exc_info is not None: - reraise(*exc_info) - response[:] = [status, headers] - return buffer.append - - app_rv = app(environ, start_response) - close_func = getattr(app_rv, "close", None) - app_iter = iter(app_rv) - - # when buffering we emit the close call early and convert the - # application iterator into a regular list - if buffered: - try: - app_iter = list(app_iter) - finally: - if close_func is not None: - close_func() - - # otherwise we iterate the application iter until we have a response, chain - # the already received data with the already collected data and wrap it in - # a new `ClosingIterator` if we need to restore a `close` callable from the - # original return value. - else: - for item in app_iter: - buffer.append(item) - if response: - break - if buffer: - app_iter = chain(buffer, app_iter) - if close_func is not None and app_iter is not app_rv: - app_iter = ClosingIterator(app_iter, close_func) - - return app_iter, response[0], Headers(response[1]) diff --git a/venv/lib/python3.7/site-packages/werkzeug/testapp.py b/venv/lib/python3.7/site-packages/werkzeug/testapp.py deleted file mode 100644 index 5ea8549..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/testapp.py +++ /dev/null @@ -1,241 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.testapp - ~~~~~~~~~~~~~~~~ - - Provide a small test application that can be used to test a WSGI server - and check it for WSGI compliance. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import base64 -import os -import sys -from textwrap import wrap - -from . import __version__ as _werkzeug_version -from .utils import escape -from .wrappers import BaseRequest as Request -from .wrappers import BaseResponse as Response - -logo = Response( - base64.b64decode( - """ -R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP///////// -//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv -nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25 -7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq -ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX -m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G -p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo -SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf -78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA -ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA -tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx -w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx -lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45 -Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB -yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd -dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r -idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh -EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8 -ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64 -gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C -JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y -Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9 -YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX -c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb -qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL -cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG -cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2 -KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe -EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb -UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB -Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z -aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn -kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs -=""" - ), - mimetype="image/png", -) - - -TEMPLATE = u"""\ - -WSGI Information - -
    - -

    WSGI Information

    -

    - This page displays all available information about the WSGI server and - the underlying Python interpreter. -

    Python Interpreter

    - - - - - - -
    Python Version - %(python_version)s -
    Platform - %(platform)s [%(os)s] -
    API Version - %(api_version)s -
    Byteorder - %(byteorder)s -
    Werkzeug Version - %(werkzeug_version)s -
    -

    WSGI Environment

    - %(wsgi_env)s
    -

    Installed Eggs

    -

    - The following python packages were installed on the system as - Python eggs: -

      %(python_eggs)s
    -

    System Path

    -

    - The following paths are the current contents of the load path. The - following entries are looked up for Python packages. Note that not - all items in this path are folders. Gray and underlined items are - entries pointing to invalid resources or used by custom import hooks - such as the zip importer. -

    - Items with a bright background were expanded for display from a relative - path. If you encounter such paths in the output you might want to check - your setup as relative paths are usually problematic in multithreaded - environments. -

      %(sys_path)s
    -
    -""" - - -def iter_sys_path(): - if os.name == "posix": - - def strip(x): - prefix = os.path.expanduser("~") - if x.startswith(prefix): - x = "~" + x[len(prefix) :] - return x - - else: - - def strip(x): - return x - - cwd = os.path.abspath(os.getcwd()) - for item in sys.path: - path = os.path.join(cwd, item or os.path.curdir) - yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item - - -def render_testapp(req): - try: - import pkg_resources - except ImportError: - eggs = () - else: - eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower()) - python_eggs = [] - for egg in eggs: - try: - version = egg.version - except (ValueError, AttributeError): - version = "unknown" - python_eggs.append( - "
  • %s [%s]" % (escape(egg.project_name), escape(version)) - ) - - wsgi_env = [] - sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower()) - for key, value in sorted_environ: - wsgi_env.append( - "%s%s" - % (escape(str(key)), " ".join(wrap(escape(repr(value))))) - ) - - sys_path = [] - for item, virtual, expanded in iter_sys_path(): - class_ = [] - if virtual: - class_.append("virtual") - if expanded: - class_.append("exp") - sys_path.append( - "%s" - % (' class="%s"' % " ".join(class_) if class_ else "", escape(item)) - ) - - return ( - TEMPLATE - % { - "python_version": "
    ".join(escape(sys.version).splitlines()), - "platform": escape(sys.platform), - "os": escape(os.name), - "api_version": sys.api_version, - "byteorder": sys.byteorder, - "werkzeug_version": _werkzeug_version, - "python_eggs": "\n".join(python_eggs), - "wsgi_env": "\n".join(wsgi_env), - "sys_path": "\n".join(sys_path), - } - ).encode("utf-8") - - -def test_app(environ, start_response): - """Simple test application that dumps the environment. You can use - it to check if Werkzeug is working properly: - - .. sourcecode:: pycon - - >>> from werkzeug.serving import run_simple - >>> from werkzeug.testapp import test_app - >>> run_simple('localhost', 3000, test_app) - * Running on http://localhost:3000/ - - The application displays important information from the WSGI environment, - the Python interpreter and the installed libraries. - """ - req = Request(environ, populate_request=False) - if req.args.get("resource") == "logo": - response = logo - else: - response = Response(render_testapp(req), mimetype="text/html") - return response(environ, start_response) - - -if __name__ == "__main__": - from .serving import run_simple - - run_simple("localhost", 5000, test_app, use_reloader=True) diff --git a/venv/lib/python3.7/site-packages/werkzeug/urls.py b/venv/lib/python3.7/site-packages/werkzeug/urls.py deleted file mode 100644 index d5e487b..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/urls.py +++ /dev/null @@ -1,1138 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.urls - ~~~~~~~~~~~~~ - - ``werkzeug.urls`` used to provide several wrapper functions for Python 2 - urlparse, whose main purpose were to work around the behavior of the Py2 - stdlib and its lack of unicode support. While this was already a somewhat - inconvenient situation, it got even more complicated because Python 3's - ``urllib.parse`` actually does handle unicode properly. In other words, - this module would wrap two libraries with completely different behavior. So - now this module contains a 2-and-3-compatible backport of Python 3's - ``urllib.parse``, which is mostly API-compatible. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import codecs -import os -import re -from collections import namedtuple - -from ._compat import fix_tuple_repr -from ._compat import implements_to_string -from ._compat import make_literal_wrapper -from ._compat import normalize_string_tuple -from ._compat import PY2 -from ._compat import text_type -from ._compat import to_native -from ._compat import to_unicode -from ._compat import try_coerce_native -from ._internal import _decode_idna -from ._internal import _encode_idna - -# A regular expression for what a valid schema looks like -_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$") - -# Characters that are safe in any part of an URL. -_always_safe = frozenset( - bytearray( - b"abcdefghijklmnopqrstuvwxyz" - b"ABCDEFGHIJKLMNOPQRSTUVWXYZ" - b"0123456789" - b"-._~" - ) -) - -_hexdigits = "0123456789ABCDEFabcdef" -_hextobyte = dict( - ((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits -) -_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)] - - -_URLTuple = fix_tuple_repr( - namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"]) -) - - -class BaseURL(_URLTuple): - """Superclass of :py:class:`URL` and :py:class:`BytesURL`.""" - - __slots__ = () - - def replace(self, **kwargs): - """Return an URL with the same values, except for those parameters - given new values by whichever keyword arguments are specified.""" - return self._replace(**kwargs) - - @property - def host(self): - """The host part of the URL if available, otherwise `None`. The - host is either the hostname or the IP address mentioned in the - URL. It will not contain the port. - """ - return self._split_host()[0] - - @property - def ascii_host(self): - """Works exactly like :attr:`host` but will return a result that - is restricted to ASCII. If it finds a netloc that is not ASCII - it will attempt to idna decode it. This is useful for socket - operations when the URL might include internationalized characters. - """ - rv = self.host - if rv is not None and isinstance(rv, text_type): - try: - rv = _encode_idna(rv) - except UnicodeError: - rv = rv.encode("ascii", "ignore") - return to_native(rv, "ascii", "ignore") - - @property - def port(self): - """The port in the URL as an integer if it was present, `None` - otherwise. This does not fill in default ports. - """ - try: - rv = int(to_native(self._split_host()[1])) - if 0 <= rv <= 65535: - return rv - except (ValueError, TypeError): - pass - - @property - def auth(self): - """The authentication part in the URL if available, `None` - otherwise. - """ - return self._split_netloc()[0] - - @property - def username(self): - """The username if it was part of the URL, `None` otherwise. - This undergoes URL decoding and will always be a unicode string. - """ - rv = self._split_auth()[0] - if rv is not None: - return _url_unquote_legacy(rv) - - @property - def raw_username(self): - """The username if it was part of the URL, `None` otherwise. - Unlike :attr:`username` this one is not being decoded. - """ - return self._split_auth()[0] - - @property - def password(self): - """The password if it was part of the URL, `None` otherwise. - This undergoes URL decoding and will always be a unicode string. - """ - rv = self._split_auth()[1] - if rv is not None: - return _url_unquote_legacy(rv) - - @property - def raw_password(self): - """The password if it was part of the URL, `None` otherwise. - Unlike :attr:`password` this one is not being decoded. - """ - return self._split_auth()[1] - - def decode_query(self, *args, **kwargs): - """Decodes the query part of the URL. Ths is a shortcut for - calling :func:`url_decode` on the query argument. The arguments and - keyword arguments are forwarded to :func:`url_decode` unchanged. - """ - return url_decode(self.query, *args, **kwargs) - - def join(self, *args, **kwargs): - """Joins this URL with another one. This is just a convenience - function for calling into :meth:`url_join` and then parsing the - return value again. - """ - return url_parse(url_join(self, *args, **kwargs)) - - def to_url(self): - """Returns a URL string or bytes depending on the type of the - information stored. This is just a convenience function - for calling :meth:`url_unparse` for this URL. - """ - return url_unparse(self) - - def decode_netloc(self): - """Decodes the netloc part into a string.""" - rv = _decode_idna(self.host or "") - - if ":" in rv: - rv = "[%s]" % rv - port = self.port - if port is not None: - rv = "%s:%d" % (rv, port) - auth = ":".join( - filter( - None, - [ - _url_unquote_legacy(self.raw_username or "", "/:%@"), - _url_unquote_legacy(self.raw_password or "", "/:%@"), - ], - ) - ) - if auth: - rv = "%s@%s" % (auth, rv) - return rv - - def to_uri_tuple(self): - """Returns a :class:`BytesURL` tuple that holds a URI. This will - encode all the information in the URL properly to ASCII using the - rules a web browser would follow. - - It's usually more interesting to directly call :meth:`iri_to_uri` which - will return a string. - """ - return url_parse(iri_to_uri(self).encode("ascii")) - - def to_iri_tuple(self): - """Returns a :class:`URL` tuple that holds a IRI. This will try - to decode as much information as possible in the URL without - losing information similar to how a web browser does it for the - URL bar. - - It's usually more interesting to directly call :meth:`uri_to_iri` which - will return a string. - """ - return url_parse(uri_to_iri(self)) - - def get_file_location(self, pathformat=None): - """Returns a tuple with the location of the file in the form - ``(server, location)``. If the netloc is empty in the URL or - points to localhost, it's represented as ``None``. - - The `pathformat` by default is autodetection but needs to be set - when working with URLs of a specific system. The supported values - are ``'windows'`` when working with Windows or DOS paths and - ``'posix'`` when working with posix paths. - - If the URL does not point to a local file, the server and location - are both represented as ``None``. - - :param pathformat: The expected format of the path component. - Currently ``'windows'`` and ``'posix'`` are - supported. Defaults to ``None`` which is - autodetect. - """ - if self.scheme != "file": - return None, None - - path = url_unquote(self.path) - host = self.netloc or None - - if pathformat is None: - if os.name == "nt": - pathformat = "windows" - else: - pathformat = "posix" - - if pathformat == "windows": - if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:": - path = path[1:2] + ":" + path[3:] - windows_share = path[:3] in ("\\" * 3, "/" * 3) - import ntpath - - path = ntpath.normpath(path) - # Windows shared drives are represented as ``\\host\\directory``. - # That results in a URL like ``file://///host/directory``, and a - # path like ``///host/directory``. We need to special-case this - # because the path contains the hostname. - if windows_share and host is None: - parts = path.lstrip("\\").split("\\", 1) - if len(parts) == 2: - host, path = parts - else: - host = parts[0] - path = "" - elif pathformat == "posix": - import posixpath - - path = posixpath.normpath(path) - else: - raise TypeError("Invalid path format %s" % repr(pathformat)) - - if host in ("127.0.0.1", "::1", "localhost"): - host = None - - return host, path - - def _split_netloc(self): - if self._at in self.netloc: - return self.netloc.split(self._at, 1) - return None, self.netloc - - def _split_auth(self): - auth = self._split_netloc()[0] - if not auth: - return None, None - if self._colon not in auth: - return auth, None - return auth.split(self._colon, 1) - - def _split_host(self): - rv = self._split_netloc()[1] - if not rv: - return None, None - - if not rv.startswith(self._lbracket): - if self._colon in rv: - return rv.split(self._colon, 1) - return rv, None - - idx = rv.find(self._rbracket) - if idx < 0: - return rv, None - - host = rv[1:idx] - rest = rv[idx + 1 :] - if rest.startswith(self._colon): - return host, rest[1:] - return host, None - - -@implements_to_string -class URL(BaseURL): - """Represents a parsed URL. This behaves like a regular tuple but - also has some extra attributes that give further insight into the - URL. - """ - - __slots__ = () - _at = "@" - _colon = ":" - _lbracket = "[" - _rbracket = "]" - - def __str__(self): - return self.to_url() - - def encode_netloc(self): - """Encodes the netloc part to an ASCII safe URL as bytes.""" - rv = self.ascii_host or "" - if ":" in rv: - rv = "[%s]" % rv - port = self.port - if port is not None: - rv = "%s:%d" % (rv, port) - auth = ":".join( - filter( - None, - [ - url_quote(self.raw_username or "", "utf-8", "strict", "/:%"), - url_quote(self.raw_password or "", "utf-8", "strict", "/:%"), - ], - ) - ) - if auth: - rv = "%s@%s" % (auth, rv) - return to_native(rv) - - def encode(self, charset="utf-8", errors="replace"): - """Encodes the URL to a tuple made out of bytes. The charset is - only being used for the path, query and fragment. - """ - return BytesURL( - self.scheme.encode("ascii"), - self.encode_netloc(), - self.path.encode(charset, errors), - self.query.encode(charset, errors), - self.fragment.encode(charset, errors), - ) - - -class BytesURL(BaseURL): - """Represents a parsed URL in bytes.""" - - __slots__ = () - _at = b"@" - _colon = b":" - _lbracket = b"[" - _rbracket = b"]" - - def __str__(self): - return self.to_url().decode("utf-8", "replace") - - def encode_netloc(self): - """Returns the netloc unchanged as bytes.""" - return self.netloc - - def decode(self, charset="utf-8", errors="replace"): - """Decodes the URL to a tuple made out of strings. The charset is - only being used for the path, query and fragment. - """ - return URL( - self.scheme.decode("ascii"), - self.decode_netloc(), - self.path.decode(charset, errors), - self.query.decode(charset, errors), - self.fragment.decode(charset, errors), - ) - - -_unquote_maps = {frozenset(): _hextobyte} - - -def _unquote_to_bytes(string, unsafe=""): - if isinstance(string, text_type): - string = string.encode("utf-8") - - if isinstance(unsafe, text_type): - unsafe = unsafe.encode("utf-8") - - unsafe = frozenset(bytearray(unsafe)) - groups = iter(string.split(b"%")) - result = bytearray(next(groups, b"")) - - try: - hex_to_byte = _unquote_maps[unsafe] - except KeyError: - hex_to_byte = _unquote_maps[unsafe] = { - h: b for h, b in _hextobyte.items() if b not in unsafe - } - - for group in groups: - code = group[:2] - - if code in hex_to_byte: - result.append(hex_to_byte[code]) - result.extend(group[2:]) - else: - result.append(37) # % - result.extend(group) - - return bytes(result) - - -def _url_encode_impl(obj, charset, encode_keys, sort, key): - from .datastructures import iter_multi_items - - iterable = iter_multi_items(obj) - if sort: - iterable = sorted(iterable, key=key) - for key, value in iterable: - if value is None: - continue - if not isinstance(key, bytes): - key = text_type(key).encode(charset) - if not isinstance(value, bytes): - value = text_type(value).encode(charset) - yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value) - - -def _url_unquote_legacy(value, unsafe=""): - try: - return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe) - except UnicodeError: - return url_unquote(value, charset="latin1", unsafe=unsafe) - - -def url_parse(url, scheme=None, allow_fragments=True): - """Parses a URL from a string into a :class:`URL` tuple. If the URL - is lacking a scheme it can be provided as second argument. Otherwise, - it is ignored. Optionally fragments can be stripped from the URL - by setting `allow_fragments` to `False`. - - The inverse of this function is :func:`url_unparse`. - - :param url: the URL to parse. - :param scheme: the default schema to use if the URL is schemaless. - :param allow_fragments: if set to `False` a fragment will be removed - from the URL. - """ - s = make_literal_wrapper(url) - is_text_based = isinstance(url, text_type) - - if scheme is None: - scheme = s("") - netloc = query = fragment = s("") - i = url.find(s(":")) - if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")): - # make sure "iri" is not actually a port number (in which case - # "scheme" is really part of the path) - rest = url[i + 1 :] - if not rest or any(c not in s("0123456789") for c in rest): - # not a port number - scheme, url = url[:i].lower(), rest - - if url[:2] == s("//"): - delim = len(url) - for c in s("/?#"): - wdelim = url.find(c, 2) - if wdelim >= 0: - delim = min(delim, wdelim) - netloc, url = url[2:delim], url[delim:] - if (s("[") in netloc and s("]") not in netloc) or ( - s("]") in netloc and s("[") not in netloc - ): - raise ValueError("Invalid IPv6 URL") - - if allow_fragments and s("#") in url: - url, fragment = url.split(s("#"), 1) - if s("?") in url: - url, query = url.split(s("?"), 1) - - result_type = URL if is_text_based else BytesURL - return result_type(scheme, netloc, url, query, fragment) - - -def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""): - """Precompile the translation table for a URL encoding function. - - Unlike :func:`url_quote`, the generated function only takes the - string to quote. - - :param charset: The charset to encode the result with. - :param errors: How to handle encoding errors. - :param safe: An optional sequence of safe characters to never encode. - :param unsafe: An optional sequence of unsafe characters to always encode. - """ - if isinstance(safe, text_type): - safe = safe.encode(charset, errors) - - if isinstance(unsafe, text_type): - unsafe = unsafe.encode(charset, errors) - - safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe)) - table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)] - - if not PY2: - - def quote(string): - return "".join([table[c] for c in string]) - - else: - - def quote(string): - return "".join([table[c] for c in bytearray(string)]) - - return quote - - -_fast_url_quote = _make_fast_url_quote() -_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+") - - -def _fast_url_quote_plus(string): - return _fast_quote_plus(string).replace(" ", "+") - - -def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""): - """URL encode a single string with a given encoding. - - :param s: the string to quote. - :param charset: the charset to be used. - :param safe: an optional sequence of safe characters. - :param unsafe: an optional sequence of unsafe characters. - - .. versionadded:: 0.9.2 - The `unsafe` parameter was added. - """ - if not isinstance(string, (text_type, bytes, bytearray)): - string = text_type(string) - if isinstance(string, text_type): - string = string.encode(charset, errors) - if isinstance(safe, text_type): - safe = safe.encode(charset, errors) - if isinstance(unsafe, text_type): - unsafe = unsafe.encode(charset, errors) - safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe)) - rv = bytearray() - for char in bytearray(string): - if char in safe: - rv.append(char) - else: - rv.extend(_bytetohex[char]) - return to_native(bytes(rv)) - - -def url_quote_plus(string, charset="utf-8", errors="strict", safe=""): - """URL encode a single string with the given encoding and convert - whitespace to "+". - - :param s: The string to quote. - :param charset: The charset to be used. - :param safe: An optional sequence of safe characters. - """ - return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+") - - -def url_unparse(components): - """The reverse operation to :meth:`url_parse`. This accepts arbitrary - as well as :class:`URL` tuples and returns a URL as a string. - - :param components: the parsed URL as tuple which should be converted - into a URL string. - """ - scheme, netloc, path, query, fragment = normalize_string_tuple(components) - s = make_literal_wrapper(scheme) - url = s("") - - # We generally treat file:///x and file:/x the same which is also - # what browsers seem to do. This also allows us to ignore a schema - # register for netloc utilization or having to differentiate between - # empty and missing netloc. - if netloc or (scheme and path.startswith(s("/"))): - if path and path[:1] != s("/"): - path = s("/") + path - url = s("//") + (netloc or s("")) + path - elif path: - url += path - if scheme: - url = scheme + s(":") + url - if query: - url = url + s("?") + query - if fragment: - url = url + s("#") + fragment - return url - - -def url_unquote(string, charset="utf-8", errors="replace", unsafe=""): - """URL decode a single string with a given encoding. If the charset - is set to `None` no unicode decoding is performed and raw bytes - are returned. - - :param s: the string to unquote. - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param errors: the error handling for the charset decoding. - """ - rv = _unquote_to_bytes(string, unsafe) - if charset is not None: - rv = rv.decode(charset, errors) - return rv - - -def url_unquote_plus(s, charset="utf-8", errors="replace"): - """URL decode a single string with the given `charset` and decode "+" to - whitespace. - - Per default encoding errors are ignored. If you want a different behavior - you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a - :exc:`HTTPUnicodeError` is raised. - - :param s: The string to unquote. - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param errors: The error handling for the `charset` decoding. - """ - if isinstance(s, text_type): - s = s.replace(u"+", u" ") - else: - s = s.replace(b"+", b" ") - return url_unquote(s, charset, errors) - - -def url_fix(s, charset="utf-8"): - r"""Sometimes you get an URL by a user that just isn't a real URL because - it contains unsafe characters like ' ' and so on. This function can fix - some of the problems in a similar way browsers handle data entered by the - user: - - >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)') - 'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)' - - :param s: the string with the URL to fix. - :param charset: The target charset for the URL if the url was given as - unicode string. - """ - # First step is to switch to unicode processing and to convert - # backslashes (which are invalid in URLs anyways) to slashes. This is - # consistent with what Chrome does. - s = to_unicode(s, charset, "replace").replace("\\", "/") - - # For the specific case that we look like a malformed windows URL - # we want to fix this up manually: - if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"): - s = "file:///" + s[7:] - - url = url_parse(s) - path = url_quote(url.path, charset, safe="/%+$!*'(),") - qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),") - anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),") - return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor))) - - -# not-unreserved characters remain quoted when unquoting to IRI -_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe]) - - -def _codec_error_url_quote(e): - """Used in :func:`uri_to_iri` after unquoting to re-quote any - invalid bytes. - """ - out = _fast_url_quote(e.object[e.start : e.end]) - - if PY2: - out = out.decode("utf-8") - - return out, e.end - - -codecs.register_error("werkzeug.url_quote", _codec_error_url_quote) - - -def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"): - """Convert a URI to an IRI. All valid UTF-8 characters are unquoted, - leaving all reserved and invalid characters quoted. If the URL has - a domain, it is decoded from Punycode. - - >>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF") - 'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF' - - :param uri: The URI to convert. - :param charset: The encoding to encode unquoted bytes with. - :param errors: Error handler to use during ``bytes.encode``. By - default, invalid bytes are left quoted. - - .. versionchanged:: 0.15 - All reserved and invalid characters remain quoted. Previously, - only some reserved characters were preserved, and invalid bytes - were replaced instead of left quoted. - - .. versionadded:: 0.6 - """ - if isinstance(uri, tuple): - uri = url_unparse(uri) - - uri = url_parse(to_unicode(uri, charset)) - path = url_unquote(uri.path, charset, errors, _to_iri_unsafe) - query = url_unquote(uri.query, charset, errors, _to_iri_unsafe) - fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe) - return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment)) - - -# reserved characters remain unquoted when quoting to URI -_to_uri_safe = ":/?#[]@!$&'()*+,;=%" - - -def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False): - """Convert an IRI to a URI. All non-ASCII and unsafe characters are - quoted. If the URL has a domain, it is encoded to Punycode. - - >>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF') - 'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF' - - :param iri: The IRI to convert. - :param charset: The encoding of the IRI. - :param errors: Error handler to use during ``bytes.encode``. - :param safe_conversion: Return the URL unchanged if it only contains - ASCII characters and no whitespace. See the explanation below. - - There is a general problem with IRI conversion with some protocols - that are in violation of the URI specification. Consider the - following two IRIs:: - - magnet:?xt=uri:whatever - itms-services://?action=download-manifest - - After parsing, we don't know if the scheme requires the ``//``, - which is dropped if empty, but conveys different meanings in the - final URL if it's present or not. In this case, you can use - ``safe_conversion``, which will return the URL unchanged if it only - contains ASCII characters and no whitespace. This can result in a - URI with unquoted characters if it was not already quoted correctly, - but preserves the URL's semantics. Werkzeug uses this for the - ``Location`` header for redirects. - - .. versionchanged:: 0.15 - All reserved characters remain unquoted. Previously, only some - reserved characters were left unquoted. - - .. versionchanged:: 0.9.6 - The ``safe_conversion`` parameter was added. - - .. versionadded:: 0.6 - """ - if isinstance(iri, tuple): - iri = url_unparse(iri) - - if safe_conversion: - # If we're not sure if it's safe to convert the URL, and it only - # contains ASCII characters, return it unconverted. - try: - native_iri = to_native(iri) - ascii_iri = native_iri.encode("ascii") - - # Only return if it doesn't have whitespace. (Why?) - if len(ascii_iri.split()) == 1: - return native_iri - except UnicodeError: - pass - - iri = url_parse(to_unicode(iri, charset, errors)) - path = url_quote(iri.path, charset, errors, _to_uri_safe) - query = url_quote(iri.query, charset, errors, _to_uri_safe) - fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe) - return to_native( - url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment)) - ) - - -def url_decode( - s, - charset="utf-8", - decode_keys=False, - include_empty=True, - errors="replace", - separator="&", - cls=None, -): - """ - Parse a querystring and return it as :class:`MultiDict`. There is a - difference in key decoding on different Python versions. On Python 3 - keys will always be fully decoded whereas on Python 2, keys will - remain bytestrings if they fit into ASCII. On 2.x keys can be forced - to be unicode by setting `decode_keys` to `True`. - - If the charset is set to `None` no unicode decoding will happen and - raw bytes will be returned. - - Per default a missing value for a key will default to an empty key. If - you don't want that behavior you can set `include_empty` to `False`. - - Per default encoding errors are ignored. If you want a different behavior - you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a - `HTTPUnicodeError` is raised. - - .. versionchanged:: 0.5 - In previous versions ";" and "&" could be used for url decoding. - This changed in 0.5 where only "&" is supported. If you want to - use ";" instead a different `separator` can be provided. - - The `cls` parameter was added. - - :param s: a string with the query string to decode. - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param decode_keys: Used on Python 2.x to control whether keys should - be forced to be unicode objects. If set to `True` - then keys will be unicode in all cases. Otherwise, - they remain `str` if they fit into ASCII. - :param include_empty: Set to `False` if you don't want empty values to - appear in the dict. - :param errors: the decoding error behavior. - :param separator: the pair separator to be used, defaults to ``&`` - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - """ - if cls is None: - from .datastructures import MultiDict - - cls = MultiDict - if isinstance(s, text_type) and not isinstance(separator, text_type): - separator = separator.decode(charset or "ascii") - elif isinstance(s, bytes) and not isinstance(separator, bytes): - separator = separator.encode(charset or "ascii") - return cls( - _url_decode_impl( - s.split(separator), charset, decode_keys, include_empty, errors - ) - ) - - -def url_decode_stream( - stream, - charset="utf-8", - decode_keys=False, - include_empty=True, - errors="replace", - separator="&", - cls=None, - limit=None, - return_iterator=False, -): - """Works like :func:`url_decode` but decodes a stream. The behavior - of stream and limit follows functions like - :func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is - directly fed to the `cls` so you can consume the data while it's - parsed. - - .. versionadded:: 0.8 - - :param stream: a stream with the encoded querystring - :param charset: the charset of the query string. If set to `None` - no unicode decoding will take place. - :param decode_keys: Used on Python 2.x to control whether keys should - be forced to be unicode objects. If set to `True`, - keys will be unicode in all cases. Otherwise, they - remain `str` if they fit into ASCII. - :param include_empty: Set to `False` if you don't want empty values to - appear in the dict. - :param errors: the decoding error behavior. - :param separator: the pair separator to be used, defaults to ``&`` - :param cls: an optional dict class to use. If this is not specified - or `None` the default :class:`MultiDict` is used. - :param limit: the content length of the URL data. Not necessary if - a limited stream is provided. - :param return_iterator: if set to `True` the `cls` argument is ignored - and an iterator over all decoded pairs is - returned - """ - from .wsgi import make_chunk_iter - - pair_iter = make_chunk_iter(stream, separator, limit) - decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors) - - if return_iterator: - return decoder - - if cls is None: - from .datastructures import MultiDict - - cls = MultiDict - - return cls(decoder) - - -def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors): - for pair in pair_iter: - if not pair: - continue - s = make_literal_wrapper(pair) - equal = s("=") - if equal in pair: - key, value = pair.split(equal, 1) - else: - if not include_empty: - continue - key = pair - value = s("") - key = url_unquote_plus(key, charset, errors) - if charset is not None and PY2 and not decode_keys: - key = try_coerce_native(key) - yield key, url_unquote_plus(value, charset, errors) - - -def url_encode( - obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&" -): - """URL encode a dict/`MultiDict`. If a value is `None` it will not appear - in the result string. Per default only values are encoded into the target - charset strings. If `encode_keys` is set to ``True`` unicode keys are - supported too. - - If `sort` is set to `True` the items are sorted by `key` or the default - sorting algorithm. - - .. versionadded:: 0.5 - `sort`, `key`, and `separator` were added. - - :param obj: the object to encode into a query string. - :param charset: the charset of the query string. - :param encode_keys: set to `True` if you have unicode keys. (Ignored on - Python 3.x) - :param sort: set to `True` if you want parameters to be sorted by `key`. - :param separator: the separator to be used for the pairs. - :param key: an optional function to be used for sorting. For more details - check out the :func:`sorted` documentation. - """ - separator = to_native(separator, "ascii") - return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key)) - - -def url_encode_stream( - obj, - stream=None, - charset="utf-8", - encode_keys=False, - sort=False, - key=None, - separator=b"&", -): - """Like :meth:`url_encode` but writes the results to a stream - object. If the stream is `None` a generator over all encoded - pairs is returned. - - .. versionadded:: 0.8 - - :param obj: the object to encode into a query string. - :param stream: a stream to write the encoded object into or `None` if - an iterator over the encoded pairs should be returned. In - that case the separator argument is ignored. - :param charset: the charset of the query string. - :param encode_keys: set to `True` if you have unicode keys. (Ignored on - Python 3.x) - :param sort: set to `True` if you want parameters to be sorted by `key`. - :param separator: the separator to be used for the pairs. - :param key: an optional function to be used for sorting. For more details - check out the :func:`sorted` documentation. - """ - separator = to_native(separator, "ascii") - gen = _url_encode_impl(obj, charset, encode_keys, sort, key) - if stream is None: - return gen - for idx, chunk in enumerate(gen): - if idx: - stream.write(separator) - stream.write(chunk) - - -def url_join(base, url, allow_fragments=True): - """Join a base URL and a possibly relative URL to form an absolute - interpretation of the latter. - - :param base: the base URL for the join operation. - :param url: the URL to join. - :param allow_fragments: indicates whether fragments should be allowed. - """ - if isinstance(base, tuple): - base = url_unparse(base) - if isinstance(url, tuple): - url = url_unparse(url) - - base, url = normalize_string_tuple((base, url)) - s = make_literal_wrapper(base) - - if not base: - return url - if not url: - return base - - bscheme, bnetloc, bpath, bquery, bfragment = url_parse( - base, allow_fragments=allow_fragments - ) - scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments) - if scheme != bscheme: - return url - if netloc: - return url_unparse((scheme, netloc, path, query, fragment)) - netloc = bnetloc - - if path[:1] == s("/"): - segments = path.split(s("/")) - elif not path: - segments = bpath.split(s("/")) - if not query: - query = bquery - else: - segments = bpath.split(s("/"))[:-1] + path.split(s("/")) - - # If the rightmost part is "./" we want to keep the slash but - # remove the dot. - if segments[-1] == s("."): - segments[-1] = s("") - - # Resolve ".." and "." - segments = [segment for segment in segments if segment != s(".")] - while 1: - i = 1 - n = len(segments) - 1 - while i < n: - if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")): - del segments[i - 1 : i + 1] - break - i += 1 - else: - break - - # Remove trailing ".." if the URL is absolute - unwanted_marker = [s(""), s("..")] - while segments[:2] == unwanted_marker: - del segments[1] - - path = s("/").join(segments) - return url_unparse((scheme, netloc, path, query, fragment)) - - -class Href(object): - """Implements a callable that constructs URLs with the given base. The - function can be called with any number of positional and keyword - arguments which than are used to assemble the URL. Works with URLs - and posix paths. - - Positional arguments are appended as individual segments to - the path of the URL: - - >>> href = Href('/foo') - >>> href('bar', 23) - '/foo/bar/23' - >>> href('foo', bar=23) - '/foo/foo?bar=23' - - If any of the arguments (positional or keyword) evaluates to `None` it - will be skipped. If no keyword arguments are given the last argument - can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass), - otherwise the keyword arguments are used for the query parameters, cutting - off the first trailing underscore of the parameter name: - - >>> href(is_=42) - '/foo?is=42' - >>> href({'foo': 'bar'}) - '/foo?foo=bar' - - Combining of both methods is not allowed: - - >>> href({'foo': 'bar'}, bar=42) - Traceback (most recent call last): - ... - TypeError: keyword arguments and query-dicts can't be combined - - Accessing attributes on the href object creates a new href object with - the attribute name as prefix: - - >>> bar_href = href.bar - >>> bar_href("blub") - '/foo/bar/blub' - - If `sort` is set to `True` the items are sorted by `key` or the default - sorting algorithm: - - >>> href = Href("/", sort=True) - >>> href(a=1, b=2, c=3) - '/?a=1&b=2&c=3' - - .. versionadded:: 0.5 - `sort` and `key` were added. - """ - - def __init__(self, base="./", charset="utf-8", sort=False, key=None): - if not base: - base = "./" - self.base = base - self.charset = charset - self.sort = sort - self.key = key - - def __getattr__(self, name): - if name[:2] == "__": - raise AttributeError(name) - base = self.base - if base[-1:] != "/": - base += "/" - return Href(url_join(base, name), self.charset, self.sort, self.key) - - def __call__(self, *path, **query): - if path and isinstance(path[-1], dict): - if query: - raise TypeError("keyword arguments and query-dicts can't be combined") - query, path = path[-1], path[:-1] - elif query: - query = dict( - [(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()] - ) - path = "/".join( - [ - to_unicode(url_quote(x, self.charset), "ascii") - for x in path - if x is not None - ] - ).lstrip("/") - rv = self.base - if path: - if not rv.endswith("/"): - rv += "/" - rv = url_join(rv, "./" + path) - if query: - rv += "?" + to_unicode( - url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii" - ) - return to_native(rv) diff --git a/venv/lib/python3.7/site-packages/werkzeug/useragents.py b/venv/lib/python3.7/site-packages/werkzeug/useragents.py deleted file mode 100644 index 74f2fa4..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/useragents.py +++ /dev/null @@ -1,202 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.useragents - ~~~~~~~~~~~~~~~~~~~ - - This module provides a helper to inspect user agent strings. This module - is far from complete but should work for most of the currently available - browsers. - - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import re - - -class UserAgentParser(object): - """A simple user agent parser. Used by the `UserAgent`.""" - - platforms = ( - (" cros ", "chromeos"), - ("iphone|ios", "iphone"), - ("ipad", "ipad"), - (r"darwin|mac|os\s*x", "macos"), - ("win", "windows"), - (r"android", "android"), - ("netbsd", "netbsd"), - ("openbsd", "openbsd"), - ("freebsd", "freebsd"), - ("dragonfly", "dragonflybsd"), - ("(sun|i86)os", "solaris"), - (r"x11|lin(\b|ux)?", "linux"), - (r"nintendo\s+wii", "wii"), - ("irix", "irix"), - ("hp-?ux", "hpux"), - ("aix", "aix"), - ("sco|unix_sv", "sco"), - ("bsd", "bsd"), - ("amiga", "amiga"), - ("blackberry|playbook", "blackberry"), - ("symbian", "symbian"), - ) - browsers = ( - ("googlebot", "google"), - ("msnbot", "msn"), - ("yahoo", "yahoo"), - ("ask jeeves", "ask"), - (r"aol|america\s+online\s+browser", "aol"), - (r"opera|opr", "opera"), - ("edge", "edge"), - ("chrome|crios", "chrome"), - ("seamonkey", "seamonkey"), - ("firefox|firebird|phoenix|iceweasel", "firefox"), - ("galeon", "galeon"), - ("safari|version", "safari"), - ("webkit", "webkit"), - ("camino", "camino"), - ("konqueror", "konqueror"), - ("k-meleon", "kmeleon"), - ("netscape", "netscape"), - (r"msie|microsoft\s+internet\s+explorer|trident/.+? rv:", "msie"), - ("lynx", "lynx"), - ("links", "links"), - ("Baiduspider", "baidu"), - ("bingbot", "bing"), - ("mozilla", "mozilla"), - ) - - _browser_version_re = r"(?:%s)[/\sa-z(]*(\d+[.\da-z]+)?" - _language_re = re.compile( - r"(?:;\s*|\s+)(\b\w{2}\b(?:-\b\w{2}\b)?)\s*;|" - r"(?:\(|\[|;)\s*(\b\w{2}\b(?:-\b\w{2}\b)?)\s*(?:\]|\)|;)" - ) - - def __init__(self): - self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms] - self.browsers = [ - (b, re.compile(self._browser_version_re % a, re.I)) - for a, b in self.browsers - ] - - def __call__(self, user_agent): - for platform, regex in self.platforms: # noqa: B007 - match = regex.search(user_agent) - if match is not None: - break - else: - platform = None - for browser, regex in self.browsers: # noqa: B007 - match = regex.search(user_agent) - if match is not None: - version = match.group(1) - break - else: - browser = version = None - match = self._language_re.search(user_agent) - if match is not None: - language = match.group(1) or match.group(2) - else: - language = None - return platform, browser, version, language - - -class UserAgent(object): - """Represents a user agent. Pass it a WSGI environment or a user agent - string and you can inspect some of the details from the user agent - string via the attributes. The following attributes exist: - - .. attribute:: string - - the raw user agent string - - .. attribute:: platform - - the browser platform. The following platforms are currently - recognized: - - - `aix` - - `amiga` - - `android` - - `blackberry` - - `bsd` - - `chromeos` - - `dragonflybsd` - - `freebsd` - - `hpux` - - `ipad` - - `iphone` - - `irix` - - `linux` - - `macos` - - `netbsd` - - `openbsd` - - `sco` - - `solaris` - - `symbian` - - `wii` - - `windows` - - .. attribute:: browser - - the name of the browser. The following browsers are currently - recognized: - - - `aol` * - - `ask` * - - `baidu` * - - `bing` * - - `camino` - - `chrome` - - `edge` - - `firefox` - - `galeon` - - `google` * - - `kmeleon` - - `konqueror` - - `links` - - `lynx` - - `mozilla` - - `msie` - - `msn` - - `netscape` - - `opera` - - `safari` - - `seamonkey` - - `webkit` - - `yahoo` * - - (Browsers marked with a star (``*``) are crawlers.) - - .. attribute:: version - - the version of the browser - - .. attribute:: language - - the language of the browser - """ - - _parser = UserAgentParser() - - def __init__(self, environ_or_string): - if isinstance(environ_or_string, dict): - environ_or_string = environ_or_string.get("HTTP_USER_AGENT", "") - self.string = environ_or_string - self.platform, self.browser, self.version, self.language = self._parser( - environ_or_string - ) - - def to_header(self): - return self.string - - def __str__(self): - return self.string - - def __nonzero__(self): - return bool(self.browser) - - __bool__ = __nonzero__ - - def __repr__(self): - return "<%s %r/%s>" % (self.__class__.__name__, self.browser, self.version) diff --git a/venv/lib/python3.7/site-packages/werkzeug/utils.py b/venv/lib/python3.7/site-packages/werkzeug/utils.py deleted file mode 100644 index 59c6f27..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/utils.py +++ /dev/null @@ -1,778 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.utils - ~~~~~~~~~~~~~~ - - This module implements various utilities for WSGI applications. Most of - them are used by the request and response wrappers but especially for - middleware development it makes sense to use them without the wrappers. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import codecs -import os -import pkgutil -import re -import sys - -from ._compat import iteritems -from ._compat import PY2 -from ._compat import reraise -from ._compat import string_types -from ._compat import text_type -from ._compat import unichr -from ._internal import _DictAccessorProperty -from ._internal import _missing -from ._internal import _parse_signature - -try: - from html.entities import name2codepoint -except ImportError: - from htmlentitydefs import name2codepoint - - -_format_re = re.compile(r"\$(?:(%s)|\{(%s)\})" % (("[a-zA-Z_][a-zA-Z0-9_]*",) * 2)) -_entity_re = re.compile(r"&([^;]+);") -_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]") -_windows_device_files = ( - "CON", - "AUX", - "COM1", - "COM2", - "COM3", - "COM4", - "LPT1", - "LPT2", - "LPT3", - "PRN", - "NUL", -) - - -class cached_property(property): - """A decorator that converts a function into a lazy property. The - function wrapped is called the first time to retrieve the result - and then that calculated result is used the next time you access - the value:: - - class Foo(object): - - @cached_property - def foo(self): - # calculate something important here - return 42 - - The class has to have a `__dict__` in order for this property to - work. - """ - - # implementation detail: A subclass of python's builtin property - # decorator, we override __get__ to check for a cached value. If one - # chooses to invoke __get__ by hand the property will still work as - # expected because the lookup logic is replicated in __get__ for - # manual invocation. - - def __init__(self, func, name=None, doc=None): - self.__name__ = name or func.__name__ - self.__module__ = func.__module__ - self.__doc__ = doc or func.__doc__ - self.func = func - - def __set__(self, obj, value): - obj.__dict__[self.__name__] = value - - def __get__(self, obj, type=None): - if obj is None: - return self - value = obj.__dict__.get(self.__name__, _missing) - if value is _missing: - value = self.func(obj) - obj.__dict__[self.__name__] = value - return value - - -def invalidate_cached_property(obj, name): - """Invalidates the cache for a :class:`cached_property`: - - >>> class Test(object): - ... @cached_property - ... def magic_number(self): - ... print("recalculating...") - ... return 42 - ... - >>> var = Test() - >>> var.magic_number - recalculating... - 42 - >>> var.magic_number - 42 - >>> invalidate_cached_property(var, "magic_number") - >>> var.magic_number - recalculating... - 42 - - You must pass the name of the cached property as the second argument. - """ - if not isinstance(getattr(obj.__class__, name, None), cached_property): - raise TypeError( - "Attribute {} of object {} is not a cached_property, " - "cannot be invalidated".format(name, obj) - ) - obj.__dict__[name] = _missing - - -class environ_property(_DictAccessorProperty): - """Maps request attributes to environment variables. This works not only - for the Werzeug request object, but also any other class with an - environ attribute: - - >>> class Test(object): - ... environ = {'key': 'value'} - ... test = environ_property('key') - >>> var = Test() - >>> var.test - 'value' - - If you pass it a second value it's used as default if the key does not - exist, the third one can be a converter that takes a value and converts - it. If it raises :exc:`ValueError` or :exc:`TypeError` the default value - is used. If no default value is provided `None` is used. - - Per default the property is read only. You have to explicitly enable it - by passing ``read_only=False`` to the constructor. - """ - - read_only = True - - def lookup(self, obj): - return obj.environ - - -class header_property(_DictAccessorProperty): - """Like `environ_property` but for headers.""" - - def lookup(self, obj): - return obj.headers - - -class HTMLBuilder(object): - """Helper object for HTML generation. - - Per default there are two instances of that class. The `html` one, and - the `xhtml` one for those two dialects. The class uses keyword parameters - and positional parameters to generate small snippets of HTML. - - Keyword parameters are converted to XML/SGML attributes, positional - arguments are used as children. Because Python accepts positional - arguments before keyword arguments it's a good idea to use a list with the - star-syntax for some children: - - >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ', - ... html.a('bar', href='bar.html')]) - u'

    foo bar

    ' - - This class works around some browser limitations and can not be used for - arbitrary SGML/XML generation. For that purpose lxml and similar - libraries exist. - - Calling the builder escapes the string passed: - - >>> html.p(html("")) - u'

    <foo>

    ' - """ - - _entity_re = re.compile(r"&([^;]+);") - _entities = name2codepoint.copy() - _entities["apos"] = 39 - _empty_elements = { - "area", - "base", - "basefont", - "br", - "col", - "command", - "embed", - "frame", - "hr", - "img", - "input", - "keygen", - "isindex", - "link", - "meta", - "param", - "source", - "wbr", - } - _boolean_attributes = { - "selected", - "checked", - "compact", - "declare", - "defer", - "disabled", - "ismap", - "multiple", - "nohref", - "noresize", - "noshade", - "nowrap", - } - _plaintext_elements = {"textarea"} - _c_like_cdata = {"script", "style"} - - def __init__(self, dialect): - self._dialect = dialect - - def __call__(self, s): - return escape(s) - - def __getattr__(self, tag): - if tag[:2] == "__": - raise AttributeError(tag) - - def proxy(*children, **arguments): - buffer = "<" + tag - for key, value in iteritems(arguments): - if value is None: - continue - if key[-1] == "_": - key = key[:-1] - if key in self._boolean_attributes: - if not value: - continue - if self._dialect == "xhtml": - value = '="' + key + '"' - else: - value = "" - else: - value = '="' + escape(value) + '"' - buffer += " " + key + value - if not children and tag in self._empty_elements: - if self._dialect == "xhtml": - buffer += " />" - else: - buffer += ">" - return buffer - buffer += ">" - - children_as_string = "".join( - [text_type(x) for x in children if x is not None] - ) - - if children_as_string: - if tag in self._plaintext_elements: - children_as_string = escape(children_as_string) - elif tag in self._c_like_cdata and self._dialect == "xhtml": - children_as_string = ( - "/**/" - ) - buffer += children_as_string + "" - return buffer - - return proxy - - def __repr__(self): - return "<%s for %r>" % (self.__class__.__name__, self._dialect) - - -html = HTMLBuilder("html") -xhtml = HTMLBuilder("xhtml") - -# https://cgit.freedesktop.org/xdg/shared-mime-info/tree/freedesktop.org.xml.in -# https://www.iana.org/assignments/media-types/media-types.xhtml -# Types listed in the XDG mime info that have a charset in the IANA registration. -_charset_mimetypes = { - "application/ecmascript", - "application/javascript", - "application/sql", - "application/xml", - "application/xml-dtd", - "application/xml-external-parsed-entity", -} - - -def get_content_type(mimetype, charset): - """Returns the full content type string with charset for a mimetype. - - If the mimetype represents text, the charset parameter will be - appended, otherwise the mimetype is returned unchanged. - - :param mimetype: The mimetype to be used as content type. - :param charset: The charset to be appended for text mimetypes. - :return: The content type. - - .. versionchanged:: 0.15 - Any type that ends with ``+xml`` gets a charset, not just those - that start with ``application/``. Known text types such as - ``application/javascript`` are also given charsets. - """ - if ( - mimetype.startswith("text/") - or mimetype in _charset_mimetypes - or mimetype.endswith("+xml") - ): - mimetype += "; charset=" + charset - - return mimetype - - -def detect_utf_encoding(data): - """Detect which UTF encoding was used to encode the given bytes. - - The latest JSON standard (:rfc:`8259`) suggests that only UTF-8 is - accepted. Older documents allowed 8, 16, or 32. 16 and 32 can be big - or little endian. Some editors or libraries may prepend a BOM. - - :internal: - - :param data: Bytes in unknown UTF encoding. - :return: UTF encoding name - - .. versionadded:: 0.15 - """ - head = data[:4] - - if head[:3] == codecs.BOM_UTF8: - return "utf-8-sig" - - if b"\x00" not in head: - return "utf-8" - - if head in (codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE): - return "utf-32" - - if head[:2] in (codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE): - return "utf-16" - - if len(head) == 4: - if head[:3] == b"\x00\x00\x00": - return "utf-32-be" - - if head[::2] == b"\x00\x00": - return "utf-16-be" - - if head[1:] == b"\x00\x00\x00": - return "utf-32-le" - - if head[1::2] == b"\x00\x00": - return "utf-16-le" - - if len(head) == 2: - return "utf-16-be" if head.startswith(b"\x00") else "utf-16-le" - - return "utf-8" - - -def format_string(string, context): - """String-template format a string: - - >>> format_string('$foo and ${foo}s', dict(foo=42)) - '42 and 42s' - - This does not do any attribute lookup etc. For more advanced string - formattings have a look at the `werkzeug.template` module. - - :param string: the format string. - :param context: a dict with the variables to insert. - """ - - def lookup_arg(match): - x = context[match.group(1) or match.group(2)] - if not isinstance(x, string_types): - x = type(string)(x) - return x - - return _format_re.sub(lookup_arg, string) - - -def secure_filename(filename): - r"""Pass it a filename and it will return a secure version of it. This - filename can then safely be stored on a regular file system and passed - to :func:`os.path.join`. The filename returned is an ASCII only string - for maximum portability. - - On windows systems the function also makes sure that the file is not - named after one of the special device files. - - >>> secure_filename("My cool movie.mov") - 'My_cool_movie.mov' - >>> secure_filename("../../../etc/passwd") - 'etc_passwd' - >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt') - 'i_contain_cool_umlauts.txt' - - The function might return an empty filename. It's your responsibility - to ensure that the filename is unique and that you abort or - generate a random filename if the function returned an empty one. - - .. versionadded:: 0.5 - - :param filename: the filename to secure - """ - if isinstance(filename, text_type): - from unicodedata import normalize - - filename = normalize("NFKD", filename).encode("ascii", "ignore") - if not PY2: - filename = filename.decode("ascii") - for sep in os.path.sep, os.path.altsep: - if sep: - filename = filename.replace(sep, " ") - filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip( - "._" - ) - - # on nt a couple of special files are present in each folder. We - # have to ensure that the target file is not such a filename. In - # this case we prepend an underline - if ( - os.name == "nt" - and filename - and filename.split(".")[0].upper() in _windows_device_files - ): - filename = "_" + filename - - return filename - - -def escape(s): - """Replace special characters "&", "<", ">" and (") to HTML-safe sequences. - - There is a special handling for `None` which escapes to an empty string. - - .. versionchanged:: 0.9 - `quote` is now implicitly on. - - :param s: the string to escape. - :param quote: ignored. - """ - if s is None: - return "" - elif hasattr(s, "__html__"): - return text_type(s.__html__()) - - if not isinstance(s, string_types): - s = text_type(s) - - return ( - s.replace("&", "&") - .replace("<", "<") - .replace(">", ">") - .replace('"', """) - ) - - -def unescape(s): - """The reverse function of `escape`. This unescapes all the HTML - entities, not only the XML entities inserted by `escape`. - - :param s: the string to unescape. - """ - - def handle_match(m): - name = m.group(1) - if name in HTMLBuilder._entities: - return unichr(HTMLBuilder._entities[name]) - try: - if name[:2] in ("#x", "#X"): - return unichr(int(name[2:], 16)) - elif name.startswith("#"): - return unichr(int(name[1:])) - except ValueError: - pass - return u"" - - return _entity_re.sub(handle_match, s) - - -def redirect(location, code=302, Response=None): - """Returns a response object (a WSGI application) that, if called, - redirects the client to the target location. Supported codes are - 301, 302, 303, 305, 307, and 308. 300 is not supported because - it's not a real redirect and 304 because it's the answer for a - request with a request with defined If-Modified-Since headers. - - .. versionadded:: 0.6 - The location can now be a unicode string that is encoded using - the :func:`iri_to_uri` function. - - .. versionadded:: 0.10 - The class used for the Response object can now be passed in. - - :param location: the location the response should redirect to. - :param code: the redirect status code. defaults to 302. - :param class Response: a Response class to use when instantiating a - response. The default is :class:`werkzeug.wrappers.Response` if - unspecified. - """ - if Response is None: - from .wrappers import Response - - display_location = escape(location) - if isinstance(location, text_type): - # Safe conversion is necessary here as we might redirect - # to a broken URI scheme (for instance itms-services). - from .urls import iri_to_uri - - location = iri_to_uri(location, safe_conversion=True) - response = Response( - '\n' - "Redirecting...\n" - "

    Redirecting...

    \n" - "

    You should be redirected automatically to target URL: " - '%s. If not click the link.' - % (escape(location), display_location), - code, - mimetype="text/html", - ) - response.headers["Location"] = location - return response - - -def append_slash_redirect(environ, code=301): - """Redirects to the same URL but with a slash appended. The behavior - of this function is undefined if the path ends with a slash already. - - :param environ: the WSGI environment for the request that triggers - the redirect. - :param code: the status code for the redirect. - """ - new_path = environ["PATH_INFO"].strip("/") + "/" - query_string = environ.get("QUERY_STRING") - if query_string: - new_path += "?" + query_string - return redirect(new_path, code) - - -def import_string(import_name, silent=False): - """Imports an object based on a string. This is useful if you want to - use import paths as endpoints or something similar. An import path can - be specified either in dotted notation (``xml.sax.saxutils.escape``) - or with a colon as object delimiter (``xml.sax.saxutils:escape``). - - If `silent` is True the return value will be `None` if the import fails. - - :param import_name: the dotted name for the object to import. - :param silent: if set to `True` import errors are ignored and - `None` is returned instead. - :return: imported object - """ - # force the import name to automatically convert to strings - # __import__ is not able to handle unicode strings in the fromlist - # if the module is a package - import_name = str(import_name).replace(":", ".") - try: - try: - __import__(import_name) - except ImportError: - if "." not in import_name: - raise - else: - return sys.modules[import_name] - - module_name, obj_name = import_name.rsplit(".", 1) - module = __import__(module_name, globals(), locals(), [obj_name]) - try: - return getattr(module, obj_name) - except AttributeError as e: - raise ImportError(e) - - except ImportError as e: - if not silent: - reraise( - ImportStringError, ImportStringError(import_name, e), sys.exc_info()[2] - ) - - -def find_modules(import_path, include_packages=False, recursive=False): - """Finds all the modules below a package. This can be useful to - automatically import all views / controllers so that their metaclasses / - function decorators have a chance to register themselves on the - application. - - Packages are not returned unless `include_packages` is `True`. This can - also recursively list modules but in that case it will import all the - packages to get the correct load path of that module. - - :param import_path: the dotted name for the package to find child modules. - :param include_packages: set to `True` if packages should be returned, too. - :param recursive: set to `True` if recursion should happen. - :return: generator - """ - module = import_string(import_path) - path = getattr(module, "__path__", None) - if path is None: - raise ValueError("%r is not a package" % import_path) - basename = module.__name__ + "." - for _importer, modname, ispkg in pkgutil.iter_modules(path): - modname = basename + modname - if ispkg: - if include_packages: - yield modname - if recursive: - for item in find_modules(modname, include_packages, True): - yield item - else: - yield modname - - -def validate_arguments(func, args, kwargs, drop_extra=True): - """Checks if the function accepts the arguments and keyword arguments. - Returns a new ``(args, kwargs)`` tuple that can safely be passed to - the function without causing a `TypeError` because the function signature - is incompatible. If `drop_extra` is set to `True` (which is the default) - any extra positional or keyword arguments are dropped automatically. - - The exception raised provides three attributes: - - `missing` - A set of argument names that the function expected but where - missing. - - `extra` - A dict of keyword arguments that the function can not handle but - where provided. - - `extra_positional` - A list of values that where given by positional argument but the - function cannot accept. - - This can be useful for decorators that forward user submitted data to - a view function:: - - from werkzeug.utils import ArgumentValidationError, validate_arguments - - def sanitize(f): - def proxy(request): - data = request.values.to_dict() - try: - args, kwargs = validate_arguments(f, (request,), data) - except ArgumentValidationError: - raise BadRequest('The browser failed to transmit all ' - 'the data expected.') - return f(*args, **kwargs) - return proxy - - :param func: the function the validation is performed against. - :param args: a tuple of positional arguments. - :param kwargs: a dict of keyword arguments. - :param drop_extra: set to `False` if you don't want extra arguments - to be silently dropped. - :return: tuple in the form ``(args, kwargs)``. - """ - parser = _parse_signature(func) - args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5] - if missing: - raise ArgumentValidationError(tuple(missing)) - elif (extra or extra_positional) and not drop_extra: - raise ArgumentValidationError(None, extra, extra_positional) - return tuple(args), kwargs - - -def bind_arguments(func, args, kwargs): - """Bind the arguments provided into a dict. When passed a function, - a tuple of arguments and a dict of keyword arguments `bind_arguments` - returns a dict of names as the function would see it. This can be useful - to implement a cache decorator that uses the function arguments to build - the cache key based on the values of the arguments. - - :param func: the function the arguments should be bound for. - :param args: tuple of positional arguments. - :param kwargs: a dict of keyword arguments. - :return: a :class:`dict` of bound keyword arguments. - """ - ( - args, - kwargs, - missing, - extra, - extra_positional, - arg_spec, - vararg_var, - kwarg_var, - ) = _parse_signature(func)(args, kwargs) - values = {} - for (name, _has_default, _default), value in zip(arg_spec, args): - values[name] = value - if vararg_var is not None: - values[vararg_var] = tuple(extra_positional) - elif extra_positional: - raise TypeError("too many positional arguments") - if kwarg_var is not None: - multikw = set(extra) & set([x[0] for x in arg_spec]) - if multikw: - raise TypeError( - "got multiple values for keyword argument " + repr(next(iter(multikw))) - ) - values[kwarg_var] = extra - elif extra: - raise TypeError("got unexpected keyword argument " + repr(next(iter(extra)))) - return values - - -class ArgumentValidationError(ValueError): - - """Raised if :func:`validate_arguments` fails to validate""" - - def __init__(self, missing=None, extra=None, extra_positional=None): - self.missing = set(missing or ()) - self.extra = extra or {} - self.extra_positional = extra_positional or [] - ValueError.__init__( - self, - "function arguments invalid. (%d missing, %d additional)" - % (len(self.missing), len(self.extra) + len(self.extra_positional)), - ) - - -class ImportStringError(ImportError): - """Provides information about a failed :func:`import_string` attempt.""" - - #: String in dotted notation that failed to be imported. - import_name = None - #: Wrapped exception. - exception = None - - def __init__(self, import_name, exception): - self.import_name = import_name - self.exception = exception - - msg = ( - "import_string() failed for %r. Possible reasons are:\n\n" - "- missing __init__.py in a package;\n" - "- package or module path not included in sys.path;\n" - "- duplicated package or module name taking precedence in " - "sys.path;\n" - "- missing module, class, function or variable;\n\n" - "Debugged import:\n\n%s\n\n" - "Original exception:\n\n%s: %s" - ) - - name = "" - tracked = [] - for part in import_name.replace(":", ".").split("."): - name += (name and ".") + part - imported = import_string(name, silent=True) - if imported: - tracked.append((name, getattr(imported, "__file__", None))) - else: - track = ["- %r found in %r." % (n, i) for n, i in tracked] - track.append("- %r not found." % name) - msg = msg % ( - import_name, - "\n".join(track), - exception.__class__.__name__, - str(exception), - ) - break - - ImportError.__init__(self, msg) - - def __repr__(self): - return "<%s(%r, %r)>" % ( - self.__class__.__name__, - self.import_name, - self.exception, - ) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__init__.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__init__.py deleted file mode 100644 index 56c764a..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__init__.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -werkzeug.wrappers -~~~~~~~~~~~~~~~~~ - -The wrappers are simple request and response objects which you can -subclass to do whatever you want them to do. The request object contains -the information transmitted by the client (webbrowser) and the response -object contains all the information sent back to the browser. - -An important detail is that the request object is created with the WSGI -environ and will act as high-level proxy whereas the response object is an -actual WSGI application. - -Like everything else in Werkzeug these objects will work correctly with -unicode data. Incoming form data parsed by the response object will be -decoded into an unicode object if possible and if it makes sense. - -:copyright: 2007 Pallets -:license: BSD-3-Clause -""" -from .accept import AcceptMixin -from .auth import AuthorizationMixin -from .auth import WWWAuthenticateMixin -from .base_request import BaseRequest -from .base_response import BaseResponse -from .common_descriptors import CommonRequestDescriptorsMixin -from .common_descriptors import CommonResponseDescriptorsMixin -from .etag import ETagRequestMixin -from .etag import ETagResponseMixin -from .request import PlainRequest -from .request import Request -from .request import StreamOnlyMixin -from .response import Response -from .response import ResponseStream -from .response import ResponseStreamMixin -from .user_agent import UserAgentMixin diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index b061b45..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/accept.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/accept.cpython-37.pyc deleted file mode 100644 index 35fe4f1..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/accept.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/auth.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/auth.cpython-37.pyc deleted file mode 100644 index c31ed73..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/auth.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc deleted file mode 100644 index bb17e84..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_request.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc deleted file mode 100644 index 3c373c8..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/base_response.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc deleted file mode 100644 index 15ec49e..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/common_descriptors.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/cors.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/cors.cpython-37.pyc deleted file mode 100644 index 99e1ff8..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/cors.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/etag.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/etag.cpython-37.pyc deleted file mode 100644 index 3743acd..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/etag.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/json.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/json.cpython-37.pyc deleted file mode 100644 index e37eb40..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/json.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/request.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/request.cpython-37.pyc deleted file mode 100644 index 8ecfc21..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/request.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/response.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/response.cpython-37.pyc deleted file mode 100644 index d0f9636..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/response.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc b/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc deleted file mode 100644 index d8f73ff..0000000 Binary files a/venv/lib/python3.7/site-packages/werkzeug/wrappers/__pycache__/user_agent.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/accept.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/accept.py deleted file mode 100644 index d0620a0..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/accept.py +++ /dev/null @@ -1,50 +0,0 @@ -from ..datastructures import CharsetAccept -from ..datastructures import LanguageAccept -from ..datastructures import MIMEAccept -from ..http import parse_accept_header -from ..utils import cached_property - - -class AcceptMixin(object): - """A mixin for classes with an :attr:`~BaseResponse.environ` attribute - to get all the HTTP accept headers as - :class:`~werkzeug.datastructures.Accept` objects (or subclasses - thereof). - """ - - @cached_property - def accept_mimetypes(self): - """List of mimetypes this client supports as - :class:`~werkzeug.datastructures.MIMEAccept` object. - """ - return parse_accept_header(self.environ.get("HTTP_ACCEPT"), MIMEAccept) - - @cached_property - def accept_charsets(self): - """List of charsets this client supports as - :class:`~werkzeug.datastructures.CharsetAccept` object. - """ - return parse_accept_header( - self.environ.get("HTTP_ACCEPT_CHARSET"), CharsetAccept - ) - - @cached_property - def accept_encodings(self): - """List of encodings this client accepts. Encodings in a HTTP term - are compression encodings such as gzip. For charsets have a look at - :attr:`accept_charset`. - """ - return parse_accept_header(self.environ.get("HTTP_ACCEPT_ENCODING")) - - @cached_property - def accept_languages(self): - """List of languages this client accepts as - :class:`~werkzeug.datastructures.LanguageAccept` object. - - .. versionchanged 0.5 - In previous versions this was a regular - :class:`~werkzeug.datastructures.Accept` object. - """ - return parse_accept_header( - self.environ.get("HTTP_ACCEPT_LANGUAGE"), LanguageAccept - ) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/auth.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/auth.py deleted file mode 100644 index 714f755..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/auth.py +++ /dev/null @@ -1,33 +0,0 @@ -from ..http import parse_authorization_header -from ..http import parse_www_authenticate_header -from ..utils import cached_property - - -class AuthorizationMixin(object): - """Adds an :attr:`authorization` property that represents the parsed - value of the `Authorization` header as - :class:`~werkzeug.datastructures.Authorization` object. - """ - - @cached_property - def authorization(self): - """The `Authorization` object in parsed form.""" - header = self.environ.get("HTTP_AUTHORIZATION") - return parse_authorization_header(header) - - -class WWWAuthenticateMixin(object): - """Adds a :attr:`www_authenticate` property to a response object.""" - - @property - def www_authenticate(self): - """The `WWW-Authenticate` header in a parsed form.""" - - def on_update(www_auth): - if not www_auth and "www-authenticate" in self.headers: - del self.headers["www-authenticate"] - elif www_auth: - self.headers["WWW-Authenticate"] = www_auth.to_header() - - header = self.headers.get("www-authenticate") - return parse_www_authenticate_header(header, on_update) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/base_request.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/base_request.py deleted file mode 100644 index 1f21db2..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/base_request.py +++ /dev/null @@ -1,673 +0,0 @@ -from functools import update_wrapper -from io import BytesIO - -from .._compat import to_native -from .._compat import to_unicode -from .._compat import wsgi_decoding_dance -from .._compat import wsgi_get_bytes -from ..datastructures import CombinedMultiDict -from ..datastructures import EnvironHeaders -from ..datastructures import ImmutableList -from ..datastructures import ImmutableMultiDict -from ..datastructures import iter_multi_items -from ..datastructures import MultiDict -from ..formparser import default_stream_factory -from ..formparser import FormDataParser -from ..http import parse_cookie -from ..http import parse_list_header -from ..http import parse_options_header -from ..urls import url_decode -from ..utils import cached_property -from ..utils import environ_property -from ..wsgi import get_content_length -from ..wsgi import get_current_url -from ..wsgi import get_host -from ..wsgi import get_input_stream - - -class BaseRequest(object): - """Very basic request object. This does not implement advanced stuff like - entity tag parsing or cache controls. The request object is created with - the WSGI environment as first argument and will add itself to the WSGI - environment as ``'werkzeug.request'`` unless it's created with - `populate_request` set to False. - - There are a couple of mixins available that add additional functionality - to the request object, there is also a class called `Request` which - subclasses `BaseRequest` and all the important mixins. - - It's a good idea to create a custom subclass of the :class:`BaseRequest` - and add missing functionality either via mixins or direct implementation. - Here an example for such subclasses:: - - from werkzeug.wrappers import BaseRequest, ETagRequestMixin - - class Request(BaseRequest, ETagRequestMixin): - pass - - Request objects are **read only**. As of 0.5 modifications are not - allowed in any place. Unlike the lower level parsing functions the - request object will use immutable objects everywhere possible. - - Per default the request object will assume all the text data is `utf-8` - encoded. Please refer to :doc:`the unicode chapter ` for more - details about customizing the behavior. - - Per default the request object will be added to the WSGI - environment as `werkzeug.request` to support the debugging system. - If you don't want that, set `populate_request` to `False`. - - If `shallow` is `True` the environment is initialized as shallow - object around the environ. Every operation that would modify the - environ in any way (such as consuming form data) raises an exception - unless the `shallow` attribute is explicitly set to `False`. This - is useful for middlewares where you don't want to consume the form - data by accident. A shallow request is not populated to the WSGI - environment. - - .. versionchanged:: 0.5 - read-only mode was enforced by using immutables classes for all - data. - """ - - #: the charset for the request, defaults to utf-8 - charset = "utf-8" - - #: the error handling procedure for errors, defaults to 'replace' - encoding_errors = "replace" - - #: the maximum content length. This is forwarded to the form data - #: parsing function (:func:`parse_form_data`). When set and the - #: :attr:`form` or :attr:`files` attribute is accessed and the - #: parsing fails because more than the specified value is transmitted - #: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised. - #: - #: Have a look at :ref:`dealing-with-request-data` for more details. - #: - #: .. versionadded:: 0.5 - max_content_length = None - - #: the maximum form field size. This is forwarded to the form data - #: parsing function (:func:`parse_form_data`). When set and the - #: :attr:`form` or :attr:`files` attribute is accessed and the - #: data in memory for post data is longer than the specified value a - #: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised. - #: - #: Have a look at :ref:`dealing-with-request-data` for more details. - #: - #: .. versionadded:: 0.5 - max_form_memory_size = None - - #: the class to use for `args` and `form`. The default is an - #: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports - #: multiple values per key. alternatively it makes sense to use an - #: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which - #: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict` - #: which is the fastest but only remembers the last key. It is also - #: possible to use mutable structures, but this is not recommended. - #: - #: .. versionadded:: 0.6 - parameter_storage_class = ImmutableMultiDict - - #: the type to be used for list values from the incoming WSGI environment. - #: By default an :class:`~werkzeug.datastructures.ImmutableList` is used - #: (for example for :attr:`access_list`). - #: - #: .. versionadded:: 0.6 - list_storage_class = ImmutableList - - #: The type to be used for dict values from the incoming WSGI - #: environment. (For example for :attr:`cookies`.) By default an - #: :class:`~werkzeug.datastructures.ImmutableMultiDict` is used. - #: - #: .. versionchanged:: 1.0.0 - #: Changed to ``ImmutableMultiDict`` to support multiple values. - #: - #: .. versionadded:: 0.6 - dict_storage_class = ImmutableMultiDict - - #: The form data parser that shoud be used. Can be replaced to customize - #: the form date parsing. - form_data_parser_class = FormDataParser - - #: Optionally a list of hosts that is trusted by this request. By default - #: all hosts are trusted which means that whatever the client sends the - #: host is will be accepted. - #: - #: Because `Host` and `X-Forwarded-Host` headers can be set to any value by - #: a malicious client, it is recommended to either set this property or - #: implement similar validation in the proxy (if application is being run - #: behind one). - #: - #: .. versionadded:: 0.9 - trusted_hosts = None - - #: Indicates whether the data descriptor should be allowed to read and - #: buffer up the input stream. By default it's enabled. - #: - #: .. versionadded:: 0.9 - disable_data_descriptor = False - - def __init__(self, environ, populate_request=True, shallow=False): - self.environ = environ - if populate_request and not shallow: - self.environ["werkzeug.request"] = self - self.shallow = shallow - - def __repr__(self): - # make sure the __repr__ even works if the request was created - # from an invalid WSGI environment. If we display the request - # in a debug session we don't want the repr to blow up. - args = [] - try: - args.append("'%s'" % to_native(self.url, self.url_charset)) - args.append("[%s]" % self.method) - except Exception: - args.append("(invalid WSGI environ)") - - return "<%s %s>" % (self.__class__.__name__, " ".join(args)) - - @property - def url_charset(self): - """The charset that is assumed for URLs. Defaults to the value - of :attr:`charset`. - - .. versionadded:: 0.6 - """ - return self.charset - - @classmethod - def from_values(cls, *args, **kwargs): - """Create a new request object based on the values provided. If - environ is given missing values are filled from there. This method is - useful for small scripts when you need to simulate a request from an URL. - Do not use this method for unittesting, there is a full featured client - object (:class:`Client`) that allows to create multipart requests, - support for cookies etc. - - This accepts the same options as the - :class:`~werkzeug.test.EnvironBuilder`. - - .. versionchanged:: 0.5 - This method now accepts the same arguments as - :class:`~werkzeug.test.EnvironBuilder`. Because of this the - `environ` parameter is now called `environ_overrides`. - - :return: request object - """ - from ..test import EnvironBuilder - - charset = kwargs.pop("charset", cls.charset) - kwargs["charset"] = charset - builder = EnvironBuilder(*args, **kwargs) - try: - return builder.get_request(cls) - finally: - builder.close() - - @classmethod - def application(cls, f): - """Decorate a function as responder that accepts the request as - the last argument. This works like the :func:`responder` - decorator but the function is passed the request object as the - last argument and the request object will be closed - automatically:: - - @Request.application - def my_wsgi_app(request): - return Response('Hello World!') - - As of Werkzeug 0.14 HTTP exceptions are automatically caught and - converted to responses instead of failing. - - :param f: the WSGI callable to decorate - :return: a new WSGI callable - """ - #: return a callable that wraps the -2nd argument with the request - #: and calls the function with all the arguments up to that one and - #: the request. The return value is then called with the latest - #: two arguments. This makes it possible to use this decorator for - #: both standalone WSGI functions as well as bound methods and - #: partially applied functions. - from ..exceptions import HTTPException - - def application(*args): - request = cls(args[-2]) - with request: - try: - resp = f(*args[:-2] + (request,)) - except HTTPException as e: - resp = e.get_response(args[-2]) - return resp(*args[-2:]) - - return update_wrapper(application, f) - - def _get_file_stream( - self, total_content_length, content_type, filename=None, content_length=None - ): - """Called to get a stream for the file upload. - - This must provide a file-like class with `read()`, `readline()` - and `seek()` methods that is both writeable and readable. - - The default implementation returns a temporary file if the total - content length is higher than 500KB. Because many browsers do not - provide a content length for the files only the total content - length matters. - - :param total_content_length: the total content length of all the - data in the request combined. This value - is guaranteed to be there. - :param content_type: the mimetype of the uploaded file. - :param filename: the filename of the uploaded file. May be `None`. - :param content_length: the length of this file. This value is usually - not provided because webbrowsers do not provide - this value. - """ - return default_stream_factory( - total_content_length=total_content_length, - filename=filename, - content_type=content_type, - content_length=content_length, - ) - - @property - def want_form_data_parsed(self): - """Returns True if the request method carries content. As of - Werkzeug 0.9 this will be the case if a content type is transmitted. - - .. versionadded:: 0.8 - """ - return bool(self.environ.get("CONTENT_TYPE")) - - def make_form_data_parser(self): - """Creates the form data parser. Instantiates the - :attr:`form_data_parser_class` with some parameters. - - .. versionadded:: 0.8 - """ - return self.form_data_parser_class( - self._get_file_stream, - self.charset, - self.encoding_errors, - self.max_form_memory_size, - self.max_content_length, - self.parameter_storage_class, - ) - - def _load_form_data(self): - """Method used internally to retrieve submitted data. After calling - this sets `form` and `files` on the request object to multi dicts - filled with the incoming form data. As a matter of fact the input - stream will be empty afterwards. You can also call this method to - force the parsing of the form data. - - .. versionadded:: 0.8 - """ - # abort early if we have already consumed the stream - if "form" in self.__dict__: - return - - _assert_not_shallow(self) - - if self.want_form_data_parsed: - content_type = self.environ.get("CONTENT_TYPE", "") - content_length = get_content_length(self.environ) - mimetype, options = parse_options_header(content_type) - parser = self.make_form_data_parser() - data = parser.parse( - self._get_stream_for_parsing(), mimetype, content_length, options - ) - else: - data = ( - self.stream, - self.parameter_storage_class(), - self.parameter_storage_class(), - ) - - # inject the values into the instance dict so that we bypass - # our cached_property non-data descriptor. - d = self.__dict__ - d["stream"], d["form"], d["files"] = data - - def _get_stream_for_parsing(self): - """This is the same as accessing :attr:`stream` with the difference - that if it finds cached data from calling :meth:`get_data` first it - will create a new stream out of the cached data. - - .. versionadded:: 0.9.3 - """ - cached_data = getattr(self, "_cached_data", None) - if cached_data is not None: - return BytesIO(cached_data) - return self.stream - - def close(self): - """Closes associated resources of this request object. This - closes all file handles explicitly. You can also use the request - object in a with statement which will automatically close it. - - .. versionadded:: 0.9 - """ - files = self.__dict__.get("files") - for _key, value in iter_multi_items(files or ()): - value.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close() - - @cached_property - def stream(self): - """ - If the incoming form data was not encoded with a known mimetype - the data is stored unmodified in this stream for consumption. Most - of the time it is a better idea to use :attr:`data` which will give - you that data as a string. The stream only returns the data once. - - Unlike :attr:`input_stream` this stream is properly guarded that you - can't accidentally read past the length of the input. Werkzeug will - internally always refer to this stream to read data which makes it - possible to wrap this object with a stream that does filtering. - - .. versionchanged:: 0.9 - This stream is now always available but might be consumed by the - form parser later on. Previously the stream was only set if no - parsing happened. - """ - _assert_not_shallow(self) - return get_input_stream(self.environ) - - input_stream = environ_property( - "wsgi.input", - """The WSGI input stream. - - In general it's a bad idea to use this one because you can - easily read past the boundary. Use the :attr:`stream` - instead.""", - ) - - @cached_property - def args(self): - """The parsed URL parameters (the part in the URL after the question - mark). - - By default an - :class:`~werkzeug.datastructures.ImmutableMultiDict` - is returned from this function. This can be changed by setting - :attr:`parameter_storage_class` to a different type. This might - be necessary if the order of the form data is important. - """ - return url_decode( - wsgi_get_bytes(self.environ.get("QUERY_STRING", "")), - self.url_charset, - errors=self.encoding_errors, - cls=self.parameter_storage_class, - ) - - @cached_property - def data(self): - """ - Contains the incoming request data as string in case it came with - a mimetype Werkzeug does not handle. - """ - - if self.disable_data_descriptor: - raise AttributeError("data descriptor is disabled") - # XXX: this should eventually be deprecated. - - # We trigger form data parsing first which means that the descriptor - # will not cache the data that would otherwise be .form or .files - # data. This restores the behavior that was there in Werkzeug - # before 0.9. New code should use :meth:`get_data` explicitly as - # this will make behavior explicit. - return self.get_data(parse_form_data=True) - - def get_data(self, cache=True, as_text=False, parse_form_data=False): - """This reads the buffered incoming data from the client into one - bytestring. By default this is cached but that behavior can be - changed by setting `cache` to `False`. - - Usually it's a bad idea to call this method without checking the - content length first as a client could send dozens of megabytes or more - to cause memory problems on the server. - - Note that if the form data was already parsed this method will not - return anything as form data parsing does not cache the data like - this method does. To implicitly invoke form data parsing function - set `parse_form_data` to `True`. When this is done the return value - of this method will be an empty string if the form parser handles - the data. This generally is not necessary as if the whole data is - cached (which is the default) the form parser will used the cached - data to parse the form data. Please be generally aware of checking - the content length first in any case before calling this method - to avoid exhausting server memory. - - If `as_text` is set to `True` the return value will be a decoded - unicode string. - - .. versionadded:: 0.9 - """ - rv = getattr(self, "_cached_data", None) - if rv is None: - if parse_form_data: - self._load_form_data() - rv = self.stream.read() - if cache: - self._cached_data = rv - if as_text: - rv = rv.decode(self.charset, self.encoding_errors) - return rv - - @cached_property - def form(self): - """The form parameters. By default an - :class:`~werkzeug.datastructures.ImmutableMultiDict` - is returned from this function. This can be changed by setting - :attr:`parameter_storage_class` to a different type. This might - be necessary if the order of the form data is important. - - Please keep in mind that file uploads will not end up here, but instead - in the :attr:`files` attribute. - - .. versionchanged:: 0.9 - - Previous to Werkzeug 0.9 this would only contain form data for POST - and PUT requests. - """ - self._load_form_data() - return self.form - - @cached_property - def values(self): - """A :class:`werkzeug.datastructures.CombinedMultiDict` that combines - :attr:`args` and :attr:`form`.""" - args = [] - for d in self.args, self.form: - if not isinstance(d, MultiDict): - d = MultiDict(d) - args.append(d) - return CombinedMultiDict(args) - - @cached_property - def files(self): - """:class:`~werkzeug.datastructures.MultiDict` object containing - all uploaded files. Each key in :attr:`files` is the name from the - ````. Each value in :attr:`files` is a - Werkzeug :class:`~werkzeug.datastructures.FileStorage` object. - - It basically behaves like a standard file object you know from Python, - with the difference that it also has a - :meth:`~werkzeug.datastructures.FileStorage.save` function that can - store the file on the filesystem. - - Note that :attr:`files` will only contain data if the request method was - POST, PUT or PATCH and the ``

    `` that posted to the request had - ``enctype="multipart/form-data"``. It will be empty otherwise. - - See the :class:`~werkzeug.datastructures.MultiDict` / - :class:`~werkzeug.datastructures.FileStorage` documentation for - more details about the used data structure. - """ - self._load_form_data() - return self.files - - @cached_property - def cookies(self): - """A :class:`dict` with the contents of all cookies transmitted with - the request.""" - return parse_cookie( - self.environ, - self.charset, - self.encoding_errors, - cls=self.dict_storage_class, - ) - - @cached_property - def headers(self): - """The headers from the WSGI environ as immutable - :class:`~werkzeug.datastructures.EnvironHeaders`. - """ - return EnvironHeaders(self.environ) - - @cached_property - def path(self): - """Requested path as unicode. This works a bit like the regular path - info in the WSGI environment but will always include a leading slash, - even if the URL root is accessed. - """ - raw_path = wsgi_decoding_dance( - self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors - ) - return "/" + raw_path.lstrip("/") - - @cached_property - def full_path(self): - """Requested path as unicode, including the query string.""" - return self.path + u"?" + to_unicode(self.query_string, self.url_charset) - - @cached_property - def script_root(self): - """The root path of the script without the trailing slash.""" - raw_path = wsgi_decoding_dance( - self.environ.get("SCRIPT_NAME") or "", self.charset, self.encoding_errors - ) - return raw_path.rstrip("/") - - @cached_property - def url(self): - """The reconstructed current URL as IRI. - See also: :attr:`trusted_hosts`. - """ - return get_current_url(self.environ, trusted_hosts=self.trusted_hosts) - - @cached_property - def base_url(self): - """Like :attr:`url` but without the querystring - See also: :attr:`trusted_hosts`. - """ - return get_current_url( - self.environ, strip_querystring=True, trusted_hosts=self.trusted_hosts - ) - - @cached_property - def url_root(self): - """The full URL root (with hostname), this is the application - root as IRI. - See also: :attr:`trusted_hosts`. - """ - return get_current_url(self.environ, True, trusted_hosts=self.trusted_hosts) - - @cached_property - def host_url(self): - """Just the host with scheme as IRI. - See also: :attr:`trusted_hosts`. - """ - return get_current_url( - self.environ, host_only=True, trusted_hosts=self.trusted_hosts - ) - - @cached_property - def host(self): - """Just the host including the port if available. - See also: :attr:`trusted_hosts`. - """ - return get_host(self.environ, trusted_hosts=self.trusted_hosts) - - query_string = environ_property( - "QUERY_STRING", - "", - read_only=True, - load_func=wsgi_get_bytes, - doc="The URL parameters as raw bytestring.", - ) - method = environ_property( - "REQUEST_METHOD", - "GET", - read_only=True, - load_func=lambda x: x.upper(), - doc="The request method. (For example ``'GET'`` or ``'POST'``).", - ) - - @cached_property - def access_route(self): - """If a forwarded header exists this is a list of all ip addresses - from the client ip to the last proxy server. - """ - if "HTTP_X_FORWARDED_FOR" in self.environ: - return self.list_storage_class( - parse_list_header(self.environ["HTTP_X_FORWARDED_FOR"]) - ) - elif "REMOTE_ADDR" in self.environ: - return self.list_storage_class([self.environ["REMOTE_ADDR"]]) - return self.list_storage_class() - - @property - def remote_addr(self): - """The remote address of the client.""" - return self.environ.get("REMOTE_ADDR") - - remote_user = environ_property( - "REMOTE_USER", - doc="""If the server supports user authentication, and the - script is protected, this attribute contains the username the - user has authenticated as.""", - ) - scheme = environ_property( - "wsgi.url_scheme", - doc=""" - URL scheme (http or https). - - .. versionadded:: 0.7""", - ) - is_secure = property( - lambda self: self.environ["wsgi.url_scheme"] == "https", - doc="`True` if the request is secure.", - ) - is_multithread = environ_property( - "wsgi.multithread", - doc="""boolean that is `True` if the application is served by a - multithreaded WSGI server.""", - ) - is_multiprocess = environ_property( - "wsgi.multiprocess", - doc="""boolean that is `True` if the application is served by a - WSGI server that spawns multiple processes.""", - ) - is_run_once = environ_property( - "wsgi.run_once", - doc="""boolean that is `True` if the application will be - executed only once in a process lifetime. This is the case for - CGI for example, but it's not guaranteed that the execution only - happens one time.""", - ) - - -def _assert_not_shallow(request): - if request.shallow: - raise RuntimeError( - "A shallow request tried to consume form data. If you really" - " want to do that, set `shallow` to False." - ) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/base_response.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/base_response.py deleted file mode 100644 index 00b9640..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/base_response.py +++ /dev/null @@ -1,700 +0,0 @@ -import warnings - -from .._compat import integer_types -from .._compat import string_types -from .._compat import text_type -from .._compat import to_bytes -from .._compat import to_native -from ..datastructures import Headers -from ..http import dump_cookie -from ..http import HTTP_STATUS_CODES -from ..http import remove_entity_headers -from ..urls import iri_to_uri -from ..urls import url_join -from ..utils import get_content_type -from ..wsgi import ClosingIterator -from ..wsgi import get_current_url - - -def _run_wsgi_app(*args): - """This function replaces itself to ensure that the test module is not - imported unless required. DO NOT USE! - """ - global _run_wsgi_app - from ..test import run_wsgi_app as _run_wsgi_app - - return _run_wsgi_app(*args) - - -def _warn_if_string(iterable): - """Helper for the response objects to check if the iterable returned - to the WSGI server is not a string. - """ - if isinstance(iterable, string_types): - warnings.warn( - "Response iterable was set to a string. This will appear to" - " work but means that the server will send the data to the" - " client one character at a time. This is almost never" - " intended behavior, use 'response.data' to assign strings" - " to the response object.", - stacklevel=2, - ) - - -def _iter_encoded(iterable, charset): - for item in iterable: - if isinstance(item, text_type): - yield item.encode(charset) - else: - yield item - - -def _clean_accept_ranges(accept_ranges): - if accept_ranges is True: - return "bytes" - elif accept_ranges is False: - return "none" - elif isinstance(accept_ranges, text_type): - return to_native(accept_ranges) - raise ValueError("Invalid accept_ranges value") - - -class BaseResponse(object): - """Base response class. The most important fact about a response object - is that it's a regular WSGI application. It's initialized with a couple - of response parameters (headers, body, status code etc.) and will start a - valid WSGI response when called with the environ and start response - callable. - - Because it's a WSGI application itself processing usually ends before the - actual response is sent to the server. This helps debugging systems - because they can catch all the exceptions before responses are started. - - Here a small example WSGI application that takes advantage of the - response objects:: - - from werkzeug.wrappers import BaseResponse as Response - - def index(): - return Response('Index page') - - def application(environ, start_response): - path = environ.get('PATH_INFO') or '/' - if path == '/': - response = index() - else: - response = Response('Not Found', status=404) - return response(environ, start_response) - - Like :class:`BaseRequest` which object is lacking a lot of functionality - implemented in mixins. This gives you a better control about the actual - API of your response objects, so you can create subclasses and add custom - functionality. A full featured response object is available as - :class:`Response` which implements a couple of useful mixins. - - To enforce a new type of already existing responses you can use the - :meth:`force_type` method. This is useful if you're working with different - subclasses of response objects and you want to post process them with a - known interface. - - Per default the response object will assume all the text data is `utf-8` - encoded. Please refer to :doc:`the unicode chapter ` for more - details about customizing the behavior. - - Response can be any kind of iterable or string. If it's a string it's - considered being an iterable with one item which is the string passed. - Headers can be a list of tuples or a - :class:`~werkzeug.datastructures.Headers` object. - - Special note for `mimetype` and `content_type`: For most mime types - `mimetype` and `content_type` work the same, the difference affects - only 'text' mimetypes. If the mimetype passed with `mimetype` is a - mimetype starting with `text/`, the charset parameter of the response - object is appended to it. In contrast the `content_type` parameter is - always added as header unmodified. - - .. versionchanged:: 0.5 - the `direct_passthrough` parameter was added. - - :param response: a string or response iterable. - :param status: a string with a status or an integer with the status code. - :param headers: a list of headers or a - :class:`~werkzeug.datastructures.Headers` object. - :param mimetype: the mimetype for the response. See notice above. - :param content_type: the content type for the response. See notice above. - :param direct_passthrough: if set to `True` :meth:`iter_encoded` is not - called before iteration which makes it - possible to pass special iterators through - unchanged (see :func:`wrap_file` for more - details.) - """ - - #: the charset of the response. - charset = "utf-8" - - #: the default status if none is provided. - default_status = 200 - - #: the default mimetype if none is provided. - default_mimetype = "text/plain" - - #: if set to `False` accessing properties on the response object will - #: not try to consume the response iterator and convert it into a list. - #: - #: .. versionadded:: 0.6.2 - #: - #: That attribute was previously called `implicit_seqence_conversion`. - #: (Notice the typo). If you did use this feature, you have to adapt - #: your code to the name change. - implicit_sequence_conversion = True - - #: Should this response object correct the location header to be RFC - #: conformant? This is true by default. - #: - #: .. versionadded:: 0.8 - autocorrect_location_header = True - - #: Should this response object automatically set the content-length - #: header if possible? This is true by default. - #: - #: .. versionadded:: 0.8 - automatically_set_content_length = True - - #: Warn if a cookie header exceeds this size. The default, 4093, should be - #: safely `supported by most browsers `_. A cookie larger than - #: this size will still be sent, but it may be ignored or handled - #: incorrectly by some browsers. Set to 0 to disable this check. - #: - #: .. versionadded:: 0.13 - #: - #: .. _`cookie`: http://browsercookielimits.squawky.net/ - max_cookie_size = 4093 - - def __init__( - self, - response=None, - status=None, - headers=None, - mimetype=None, - content_type=None, - direct_passthrough=False, - ): - if isinstance(headers, Headers): - self.headers = headers - elif not headers: - self.headers = Headers() - else: - self.headers = Headers(headers) - - if content_type is None: - if mimetype is None and "content-type" not in self.headers: - mimetype = self.default_mimetype - if mimetype is not None: - mimetype = get_content_type(mimetype, self.charset) - content_type = mimetype - if content_type is not None: - self.headers["Content-Type"] = content_type - if status is None: - status = self.default_status - if isinstance(status, integer_types): - self.status_code = status - else: - self.status = status - - self.direct_passthrough = direct_passthrough - self._on_close = [] - - # we set the response after the headers so that if a class changes - # the charset attribute, the data is set in the correct charset. - if response is None: - self.response = [] - elif isinstance(response, (text_type, bytes, bytearray)): - self.set_data(response) - else: - self.response = response - - def call_on_close(self, func): - """Adds a function to the internal list of functions that should - be called as part of closing down the response. Since 0.7 this - function also returns the function that was passed so that this - can be used as a decorator. - - .. versionadded:: 0.6 - """ - self._on_close.append(func) - return func - - def __repr__(self): - if self.is_sequence: - body_info = "%d bytes" % sum(map(len, self.iter_encoded())) - else: - body_info = "streamed" if self.is_streamed else "likely-streamed" - return "<%s %s [%s]>" % (self.__class__.__name__, body_info, self.status) - - @classmethod - def force_type(cls, response, environ=None): - """Enforce that the WSGI response is a response object of the current - type. Werkzeug will use the :class:`BaseResponse` internally in many - situations like the exceptions. If you call :meth:`get_response` on an - exception you will get back a regular :class:`BaseResponse` object, even - if you are using a custom subclass. - - This method can enforce a given response type, and it will also - convert arbitrary WSGI callables into response objects if an environ - is provided:: - - # convert a Werkzeug response object into an instance of the - # MyResponseClass subclass. - response = MyResponseClass.force_type(response) - - # convert any WSGI application into a response object - response = MyResponseClass.force_type(response, environ) - - This is especially useful if you want to post-process responses in - the main dispatcher and use functionality provided by your subclass. - - Keep in mind that this will modify response objects in place if - possible! - - :param response: a response object or wsgi application. - :param environ: a WSGI environment object. - :return: a response object. - """ - if not isinstance(response, BaseResponse): - if environ is None: - raise TypeError( - "cannot convert WSGI application into response" - " objects without an environ" - ) - response = BaseResponse(*_run_wsgi_app(response, environ)) - response.__class__ = cls - return response - - @classmethod - def from_app(cls, app, environ, buffered=False): - """Create a new response object from an application output. This - works best if you pass it an application that returns a generator all - the time. Sometimes applications may use the `write()` callable - returned by the `start_response` function. This tries to resolve such - edge cases automatically. But if you don't get the expected output - you should set `buffered` to `True` which enforces buffering. - - :param app: the WSGI application to execute. - :param environ: the WSGI environment to execute against. - :param buffered: set to `True` to enforce buffering. - :return: a response object. - """ - return cls(*_run_wsgi_app(app, environ, buffered)) - - @property - def status_code(self): - """The HTTP status code as a number.""" - return self._status_code - - @status_code.setter - def status_code(self, code): - self._status_code = code - try: - self._status = "%d %s" % (code, HTTP_STATUS_CODES[code].upper()) - except KeyError: - self._status = "%d UNKNOWN" % code - - @property - def status(self): - """The HTTP status code as a string.""" - return self._status - - @status.setter - def status(self, value): - try: - self._status = to_native(value) - except AttributeError: - raise TypeError("Invalid status argument") - - try: - self._status_code = int(self._status.split(None, 1)[0]) - except ValueError: - self._status_code = 0 - self._status = "0 %s" % self._status - except IndexError: - raise ValueError("Empty status argument") - - def get_data(self, as_text=False): - """The string representation of the request body. Whenever you call - this property the request iterable is encoded and flattened. This - can lead to unwanted behavior if you stream big data. - - This behavior can be disabled by setting - :attr:`implicit_sequence_conversion` to `False`. - - If `as_text` is set to `True` the return value will be a decoded - unicode string. - - .. versionadded:: 0.9 - """ - self._ensure_sequence() - rv = b"".join(self.iter_encoded()) - if as_text: - rv = rv.decode(self.charset) - return rv - - def set_data(self, value): - """Sets a new string as response. The value set must be either a - unicode or bytestring. If a unicode string is set it's encoded - automatically to the charset of the response (utf-8 by default). - - .. versionadded:: 0.9 - """ - # if an unicode string is set, it's encoded directly so that we - # can set the content length - if isinstance(value, text_type): - value = value.encode(self.charset) - else: - value = bytes(value) - self.response = [value] - if self.automatically_set_content_length: - self.headers["Content-Length"] = str(len(value)) - - data = property( - get_data, - set_data, - doc="A descriptor that calls :meth:`get_data` and :meth:`set_data`.", - ) - - def calculate_content_length(self): - """Returns the content length if available or `None` otherwise.""" - try: - self._ensure_sequence() - except RuntimeError: - return None - return sum(len(x) for x in self.iter_encoded()) - - def _ensure_sequence(self, mutable=False): - """This method can be called by methods that need a sequence. If - `mutable` is true, it will also ensure that the response sequence - is a standard Python list. - - .. versionadded:: 0.6 - """ - if self.is_sequence: - # if we need a mutable object, we ensure it's a list. - if mutable and not isinstance(self.response, list): - self.response = list(self.response) - return - if self.direct_passthrough: - raise RuntimeError( - "Attempted implicit sequence conversion but the" - " response object is in direct passthrough mode." - ) - if not self.implicit_sequence_conversion: - raise RuntimeError( - "The response object required the iterable to be a" - " sequence, but the implicit conversion was disabled." - " Call make_sequence() yourself." - ) - self.make_sequence() - - def make_sequence(self): - """Converts the response iterator in a list. By default this happens - automatically if required. If `implicit_sequence_conversion` is - disabled, this method is not automatically called and some properties - might raise exceptions. This also encodes all the items. - - .. versionadded:: 0.6 - """ - if not self.is_sequence: - # if we consume an iterable we have to ensure that the close - # method of the iterable is called if available when we tear - # down the response - close = getattr(self.response, "close", None) - self.response = list(self.iter_encoded()) - if close is not None: - self.call_on_close(close) - - def iter_encoded(self): - """Iter the response encoded with the encoding of the response. - If the response object is invoked as WSGI application the return - value of this method is used as application iterator unless - :attr:`direct_passthrough` was activated. - """ - if __debug__: - _warn_if_string(self.response) - # Encode in a separate function so that self.response is fetched - # early. This allows us to wrap the response with the return - # value from get_app_iter or iter_encoded. - return _iter_encoded(self.response, self.charset) - - def set_cookie( - self, - key, - value="", - max_age=None, - expires=None, - path="/", - domain=None, - secure=False, - httponly=False, - samesite=None, - ): - """Sets a cookie. The parameters are the same as in the cookie `Morsel` - object in the Python standard library but it accepts unicode data, too. - - A warning is raised if the size of the cookie header exceeds - :attr:`max_cookie_size`, but the header will still be set. - - :param key: the key (name) of the cookie to be set. - :param value: the value of the cookie. - :param max_age: should be a number of seconds, or `None` (default) if - the cookie should last only as long as the client's - browser session. - :param expires: should be a `datetime` object or UNIX timestamp. - :param path: limits the cookie to a given path, per default it will - span the whole domain. - :param domain: if you want to set a cross-domain cookie. For example, - ``domain=".example.com"`` will set a cookie that is - readable by the domain ``www.example.com``, - ``foo.example.com`` etc. Otherwise, a cookie will only - be readable by the domain that set it. - :param secure: If `True`, the cookie will only be available via HTTPS - :param httponly: disallow JavaScript to access the cookie. This is an - extension to the cookie standard and probably not - supported by all browsers. - :param samesite: Limits the scope of the cookie such that it will only - be attached to requests if those requests are - "same-site". - """ - self.headers.add( - "Set-Cookie", - dump_cookie( - key, - value=value, - max_age=max_age, - expires=expires, - path=path, - domain=domain, - secure=secure, - httponly=httponly, - charset=self.charset, - max_size=self.max_cookie_size, - samesite=samesite, - ), - ) - - def delete_cookie(self, key, path="/", domain=None): - """Delete a cookie. Fails silently if key doesn't exist. - - :param key: the key (name) of the cookie to be deleted. - :param path: if the cookie that should be deleted was limited to a - path, the path has to be defined here. - :param domain: if the cookie that should be deleted was limited to a - domain, that domain has to be defined here. - """ - self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain) - - @property - def is_streamed(self): - """If the response is streamed (the response is not an iterable with - a length information) this property is `True`. In this case streamed - means that there is no information about the number of iterations. - This is usually `True` if a generator is passed to the response object. - - This is useful for checking before applying some sort of post - filtering that should not take place for streamed responses. - """ - try: - len(self.response) - except (TypeError, AttributeError): - return True - return False - - @property - def is_sequence(self): - """If the iterator is buffered, this property will be `True`. A - response object will consider an iterator to be buffered if the - response attribute is a list or tuple. - - .. versionadded:: 0.6 - """ - return isinstance(self.response, (tuple, list)) - - def close(self): - """Close the wrapped response if possible. You can also use the object - in a with statement which will automatically close it. - - .. versionadded:: 0.9 - Can now be used in a with statement. - """ - if hasattr(self.response, "close"): - self.response.close() - for func in self._on_close: - func() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close() - - def freeze(self): - """Call this method if you want to make your response object ready for - being pickled. This buffers the generator if there is one. It will - also set the `Content-Length` header to the length of the body. - - .. versionchanged:: 0.6 - The `Content-Length` header is now set. - """ - # we explicitly set the length to a list of the *encoded* response - # iterator. Even if the implicit sequence conversion is disabled. - self.response = list(self.iter_encoded()) - self.headers["Content-Length"] = str(sum(map(len, self.response))) - - def get_wsgi_headers(self, environ): - """This is automatically called right before the response is started - and returns headers modified for the given environment. It returns a - copy of the headers from the response with some modifications applied - if necessary. - - For example the location header (if present) is joined with the root - URL of the environment. Also the content length is automatically set - to zero here for certain status codes. - - .. versionchanged:: 0.6 - Previously that function was called `fix_headers` and modified - the response object in place. Also since 0.6, IRIs in location - and content-location headers are handled properly. - - Also starting with 0.6, Werkzeug will attempt to set the content - length if it is able to figure it out on its own. This is the - case if all the strings in the response iterable are already - encoded and the iterable is buffered. - - :param environ: the WSGI environment of the request. - :return: returns a new :class:`~werkzeug.datastructures.Headers` - object. - """ - headers = Headers(self.headers) - location = None - content_location = None - content_length = None - status = self.status_code - - # iterate over the headers to find all values in one go. Because - # get_wsgi_headers is used each response that gives us a tiny - # speedup. - for key, value in headers: - ikey = key.lower() - if ikey == u"location": - location = value - elif ikey == u"content-location": - content_location = value - elif ikey == u"content-length": - content_length = value - - # make sure the location header is an absolute URL - if location is not None: - old_location = location - if isinstance(location, text_type): - # Safe conversion is necessary here as we might redirect - # to a broken URI scheme (for instance itms-services). - location = iri_to_uri(location, safe_conversion=True) - - if self.autocorrect_location_header: - current_url = get_current_url(environ, strip_querystring=True) - if isinstance(current_url, text_type): - current_url = iri_to_uri(current_url) - location = url_join(current_url, location) - if location != old_location: - headers["Location"] = location - - # make sure the content location is a URL - if content_location is not None and isinstance(content_location, text_type): - headers["Content-Location"] = iri_to_uri(content_location) - - if 100 <= status < 200 or status == 204: - # Per section 3.3.2 of RFC 7230, "a server MUST NOT send a - # Content-Length header field in any response with a status - # code of 1xx (Informational) or 204 (No Content)." - headers.remove("Content-Length") - elif status == 304: - remove_entity_headers(headers) - - # if we can determine the content length automatically, we - # should try to do that. But only if this does not involve - # flattening the iterator or encoding of unicode strings in - # the response. We however should not do that if we have a 304 - # response. - if ( - self.automatically_set_content_length - and self.is_sequence - and content_length is None - and status not in (204, 304) - and not (100 <= status < 200) - ): - try: - content_length = sum(len(to_bytes(x, "ascii")) for x in self.response) - except UnicodeError: - # aha, something non-bytestringy in there, too bad, we - # can't safely figure out the length of the response. - pass - else: - headers["Content-Length"] = str(content_length) - - return headers - - def get_app_iter(self, environ): - """Returns the application iterator for the given environ. Depending - on the request method and the current status code the return value - might be an empty response rather than the one from the response. - - If the request method is `HEAD` or the status code is in a range - where the HTTP specification requires an empty response, an empty - iterable is returned. - - .. versionadded:: 0.6 - - :param environ: the WSGI environment of the request. - :return: a response iterable. - """ - status = self.status_code - if ( - environ["REQUEST_METHOD"] == "HEAD" - or 100 <= status < 200 - or status in (204, 304) - ): - iterable = () - elif self.direct_passthrough: - if __debug__: - _warn_if_string(self.response) - return self.response - else: - iterable = self.iter_encoded() - return ClosingIterator(iterable, self.close) - - def get_wsgi_response(self, environ): - """Returns the final WSGI response as tuple. The first item in - the tuple is the application iterator, the second the status and - the third the list of headers. The response returned is created - specially for the given environment. For example if the request - method in the WSGI environment is ``'HEAD'`` the response will - be empty and only the headers and status code will be present. - - .. versionadded:: 0.6 - - :param environ: the WSGI environment of the request. - :return: an ``(app_iter, status, headers)`` tuple. - """ - headers = self.get_wsgi_headers(environ) - app_iter = self.get_app_iter(environ) - return app_iter, self.status, headers.to_wsgi_list() - - def __call__(self, environ, start_response): - """Process this response as WSGI application. - - :param environ: the WSGI environment. - :param start_response: the response callable provided by the WSGI - server. - :return: an application iterator - """ - app_iter, status, headers = self.get_wsgi_response(environ) - start_response(status, headers) - return app_iter diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/common_descriptors.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/common_descriptors.py deleted file mode 100644 index f169959..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/common_descriptors.py +++ /dev/null @@ -1,341 +0,0 @@ -from datetime import datetime -from datetime import timedelta - -from .._compat import string_types -from ..datastructures import CallbackDict -from ..http import dump_age -from ..http import dump_csp_header -from ..http import dump_header -from ..http import dump_options_header -from ..http import http_date -from ..http import parse_age -from ..http import parse_csp_header -from ..http import parse_date -from ..http import parse_options_header -from ..http import parse_set_header -from ..utils import cached_property -from ..utils import environ_property -from ..utils import get_content_type -from ..utils import header_property -from ..wsgi import get_content_length - - -class CommonRequestDescriptorsMixin(object): - """A mixin for :class:`BaseRequest` subclasses. Request objects that - mix this class in will automatically get descriptors for a couple of - HTTP headers with automatic type conversion. - - .. versionadded:: 0.5 - """ - - content_type = environ_property( - "CONTENT_TYPE", - doc="""The Content-Type entity-header field indicates the media - type of the entity-body sent to the recipient or, in the case of - the HEAD method, the media type that would have been sent had - the request been a GET.""", - ) - - @cached_property - def content_length(self): - """The Content-Length entity-header field indicates the size of the - entity-body in bytes or, in the case of the HEAD method, the size of - the entity-body that would have been sent had the request been a - GET. - """ - return get_content_length(self.environ) - - content_encoding = environ_property( - "HTTP_CONTENT_ENCODING", - doc="""The Content-Encoding entity-header field is used as a - modifier to the media-type. When present, its value indicates - what additional content codings have been applied to the - entity-body, and thus what decoding mechanisms must be applied - in order to obtain the media-type referenced by the Content-Type - header field. - - .. versionadded:: 0.9""", - ) - content_md5 = environ_property( - "HTTP_CONTENT_MD5", - doc="""The Content-MD5 entity-header field, as defined in - RFC 1864, is an MD5 digest of the entity-body for the purpose of - providing an end-to-end message integrity check (MIC) of the - entity-body. (Note: a MIC is good for detecting accidental - modification of the entity-body in transit, but is not proof - against malicious attacks.) - - .. versionadded:: 0.9""", - ) - referrer = environ_property( - "HTTP_REFERER", - doc="""The Referer[sic] request-header field allows the client - to specify, for the server's benefit, the address (URI) of the - resource from which the Request-URI was obtained (the - "referrer", although the header field is misspelled).""", - ) - date = environ_property( - "HTTP_DATE", - None, - parse_date, - doc="""The Date general-header field represents the date and - time at which the message was originated, having the same - semantics as orig-date in RFC 822.""", - ) - max_forwards = environ_property( - "HTTP_MAX_FORWARDS", - None, - int, - doc="""The Max-Forwards request-header field provides a - mechanism with the TRACE and OPTIONS methods to limit the number - of proxies or gateways that can forward the request to the next - inbound server.""", - ) - - def _parse_content_type(self): - if not hasattr(self, "_parsed_content_type"): - self._parsed_content_type = parse_options_header( - self.environ.get("CONTENT_TYPE", "") - ) - - @property - def mimetype(self): - """Like :attr:`content_type`, but without parameters (eg, without - charset, type etc.) and always lowercase. For example if the content - type is ``text/HTML; charset=utf-8`` the mimetype would be - ``'text/html'``. - """ - self._parse_content_type() - return self._parsed_content_type[0].lower() - - @property - def mimetype_params(self): - """The mimetype parameters as dict. For example if the content - type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - """ - self._parse_content_type() - return self._parsed_content_type[1] - - @cached_property - def pragma(self): - """The Pragma general-header field is used to include - implementation-specific directives that might apply to any recipient - along the request/response chain. All pragma directives specify - optional behavior from the viewpoint of the protocol; however, some - systems MAY require that behavior be consistent with the directives. - """ - return parse_set_header(self.environ.get("HTTP_PRAGMA", "")) - - -class CommonResponseDescriptorsMixin(object): - """A mixin for :class:`BaseResponse` subclasses. Response objects that - mix this class in will automatically get descriptors for a couple of - HTTP headers with automatic type conversion. - """ - - @property - def mimetype(self): - """The mimetype (content type without charset etc.)""" - ct = self.headers.get("content-type") - if ct: - return ct.split(";")[0].strip() - - @mimetype.setter - def mimetype(self, value): - self.headers["Content-Type"] = get_content_type(value, self.charset) - - @property - def mimetype_params(self): - """The mimetype parameters as dict. For example if the - content type is ``text/html; charset=utf-8`` the params would be - ``{'charset': 'utf-8'}``. - - .. versionadded:: 0.5 - """ - - def on_update(d): - self.headers["Content-Type"] = dump_options_header(self.mimetype, d) - - d = parse_options_header(self.headers.get("content-type", ""))[1] - return CallbackDict(d, on_update) - - location = header_property( - "Location", - doc="""The Location response-header field is used to redirect - the recipient to a location other than the Request-URI for - completion of the request or identification of a new - resource.""", - ) - age = header_property( - "Age", - None, - parse_age, - dump_age, - doc="""The Age response-header field conveys the sender's - estimate of the amount of time since the response (or its - revalidation) was generated at the origin server. - - Age values are non-negative decimal integers, representing time - in seconds.""", - ) - content_type = header_property( - "Content-Type", - doc="""The Content-Type entity-header field indicates the media - type of the entity-body sent to the recipient or, in the case of - the HEAD method, the media type that would have been sent had - the request been a GET.""", - ) - content_length = header_property( - "Content-Length", - None, - int, - str, - doc="""The Content-Length entity-header field indicates the size - of the entity-body, in decimal number of OCTETs, sent to the - recipient or, in the case of the HEAD method, the size of the - entity-body that would have been sent had the request been a - GET.""", - ) - content_location = header_property( - "Content-Location", - doc="""The Content-Location entity-header field MAY be used to - supply the resource location for the entity enclosed in the - message when that entity is accessible from a location separate - from the requested resource's URI.""", - ) - content_encoding = header_property( - "Content-Encoding", - doc="""The Content-Encoding entity-header field is used as a - modifier to the media-type. When present, its value indicates - what additional content codings have been applied to the - entity-body, and thus what decoding mechanisms must be applied - in order to obtain the media-type referenced by the Content-Type - header field.""", - ) - content_md5 = header_property( - "Content-MD5", - doc="""The Content-MD5 entity-header field, as defined in - RFC 1864, is an MD5 digest of the entity-body for the purpose of - providing an end-to-end message integrity check (MIC) of the - entity-body. (Note: a MIC is good for detecting accidental - modification of the entity-body in transit, but is not proof - against malicious attacks.)""", - ) - content_security_policy = header_property( - "Content-Security-Policy", - None, - parse_csp_header, - dump_csp_header, - doc="""The Content-Security-Policy header adds an additional layer of - security to help detect and mitigate certain types of attacks.""", - ) - content_security_policy_report_only = header_property( - "Content-Security-Policy-Report-Only", - None, - parse_csp_header, - dump_csp_header, - doc="""The Content-Security-Policy-Report-Only header adds a csp policy - that is not enforced but is reported thereby helping detect - certain types of attacks.""", - ) - date = header_property( - "Date", - None, - parse_date, - http_date, - doc="""The Date general-header field represents the date and - time at which the message was originated, having the same - semantics as orig-date in RFC 822.""", - ) - expires = header_property( - "Expires", - None, - parse_date, - http_date, - doc="""The Expires entity-header field gives the date/time after - which the response is considered stale. A stale cache entry may - not normally be returned by a cache.""", - ) - last_modified = header_property( - "Last-Modified", - None, - parse_date, - http_date, - doc="""The Last-Modified entity-header field indicates the date - and time at which the origin server believes the variant was - last modified.""", - ) - - @property - def retry_after(self): - """The Retry-After response-header field can be used with a - 503 (Service Unavailable) response to indicate how long the - service is expected to be unavailable to the requesting client. - - Time in seconds until expiration or date. - """ - value = self.headers.get("retry-after") - if value is None: - return - elif value.isdigit(): - return datetime.utcnow() + timedelta(seconds=int(value)) - return parse_date(value) - - @retry_after.setter - def retry_after(self, value): - if value is None: - if "retry-after" in self.headers: - del self.headers["retry-after"] - return - elif isinstance(value, datetime): - value = http_date(value) - else: - value = str(value) - self.headers["Retry-After"] = value - - def _set_property(name, doc=None): # noqa: B902 - def fget(self): - def on_update(header_set): - if not header_set and name in self.headers: - del self.headers[name] - elif header_set: - self.headers[name] = header_set.to_header() - - return parse_set_header(self.headers.get(name), on_update) - - def fset(self, value): - if not value: - del self.headers[name] - elif isinstance(value, string_types): - self.headers[name] = value - else: - self.headers[name] = dump_header(value) - - return property(fget, fset, doc=doc) - - vary = _set_property( - "Vary", - doc="""The Vary field value indicates the set of request-header - fields that fully determines, while the response is fresh, - whether a cache is permitted to use the response to reply to a - subsequent request without revalidation.""", - ) - content_language = _set_property( - "Content-Language", - doc="""The Content-Language entity-header field describes the - natural language(s) of the intended audience for the enclosed - entity. Note that this might not be equivalent to all the - languages used within the entity-body.""", - ) - allow = _set_property( - "Allow", - doc="""The Allow entity-header field lists the set of methods - supported by the resource identified by the Request-URI. The - purpose of this field is strictly to inform the recipient of - valid methods associated with the resource. An Allow header - field MUST be present in a 405 (Method Not Allowed) - response.""", - ) - - del _set_property diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/cors.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/cors.py deleted file mode 100644 index 790e50e..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/cors.py +++ /dev/null @@ -1,102 +0,0 @@ -from ..http import dump_header -from ..http import parse_set_header -from ..utils import environ_property -from ..utils import header_property - - -class CORSRequestMixin(object): - """A mixin for :class:`~werkzeug.wrappers.BaseRequest` subclasses - that adds descriptors for Cross Origin Resource Sharing (CORS) - headers. - - .. versionadded:: 1.0 - """ - - origin = environ_property( - "HTTP_ORIGIN", - doc=( - "The host that the request originated from. Set" - " :attr:`~CORSResponseMixin.access_control_allow_origin` on" - " the response to indicate which origins are allowed." - ), - ) - - access_control_request_headers = environ_property( - "HTTP_ACCESS_CONTROL_REQUEST_HEADERS", - load_func=parse_set_header, - doc=( - "Sent with a preflight request to indicate which headers" - " will be sent with the cross origin request. Set" - " :attr:`~CORSResponseMixin.access_control_allow_headers`" - " on the response to indicate which headers are allowed." - ), - ) - - access_control_request_method = environ_property( - "HTTP_ACCESS_CONTROL_REQUEST_METHOD", - doc=( - "Sent with a preflight request to indicate which method" - " will be used for the cross origin request. Set" - " :attr:`~CORSResponseMixin.access_control_allow_methods`" - " on the response to indicate which methods are allowed." - ), - ) - - -class CORSResponseMixin(object): - """A mixin for :class:`~werkzeug.wrappers.BaseResponse` subclasses - that adds descriptors for Cross Origin Resource Sharing (CORS) - headers. - - .. versionadded:: 1.0 - """ - - @property - def access_control_allow_credentials(self): - """Whether credentials can be shared by the browser to - JavaScript code. As part of the preflight request it indicates - whether credentials can be used on the cross origin request. - """ - return "Access-Control-Allow-Credentials" in self.headers - - @access_control_allow_credentials.setter - def access_control_allow_credentials(self, value): - if value is True: - self.headers["Access-Control-Allow-Credentials"] = "true" - else: - self.headers.pop("Access-Control-Allow-Credentials", None) - - access_control_allow_headers = header_property( - "Access-Control-Allow-Headers", - load_func=parse_set_header, - dump_func=dump_header, - doc="Which headers can be sent with the cross origin request.", - ) - - access_control_allow_methods = header_property( - "Access-Control-Allow-Methods", - load_func=parse_set_header, - dump_func=dump_header, - doc="Which methods can be used for the cross origin request.", - ) - - access_control_allow_origin = header_property( - "Access-Control-Allow-Origin", - load_func=parse_set_header, - dump_func=dump_header, - doc="The origins that may make cross origin requests.", - ) - - access_control_expose_headers = header_property( - "Access-Control-Expose-Headers", - load_func=parse_set_header, - dump_func=dump_header, - doc="Which headers can be shared by the browser to JavaScript code.", - ) - - access_control_max_age = header_property( - "Access-Control-Max-Age", - load_func=int, - dump_func=str, - doc="The maximum age in seconds the access control settings can be cached for.", - ) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/etag.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/etag.py deleted file mode 100644 index 460629b..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/etag.py +++ /dev/null @@ -1,304 +0,0 @@ -from .._compat import string_types -from .._internal import _get_environ -from ..datastructures import ContentRange -from ..datastructures import RequestCacheControl -from ..datastructures import ResponseCacheControl -from ..http import generate_etag -from ..http import http_date -from ..http import is_resource_modified -from ..http import parse_cache_control_header -from ..http import parse_content_range_header -from ..http import parse_date -from ..http import parse_etags -from ..http import parse_if_range_header -from ..http import parse_range_header -from ..http import quote_etag -from ..http import unquote_etag -from ..utils import cached_property -from ..utils import header_property -from ..wrappers.base_response import _clean_accept_ranges -from ..wsgi import _RangeWrapper - - -class ETagRequestMixin(object): - """Add entity tag and cache descriptors to a request object or object with - a WSGI environment available as :attr:`~BaseRequest.environ`. This not - only provides access to etags but also to the cache control header. - """ - - @cached_property - def cache_control(self): - """A :class:`~werkzeug.datastructures.RequestCacheControl` object - for the incoming cache control headers. - """ - cache_control = self.environ.get("HTTP_CACHE_CONTROL") - return parse_cache_control_header(cache_control, None, RequestCacheControl) - - @cached_property - def if_match(self): - """An object containing all the etags in the `If-Match` header. - - :rtype: :class:`~werkzeug.datastructures.ETags` - """ - return parse_etags(self.environ.get("HTTP_IF_MATCH")) - - @cached_property - def if_none_match(self): - """An object containing all the etags in the `If-None-Match` header. - - :rtype: :class:`~werkzeug.datastructures.ETags` - """ - return parse_etags(self.environ.get("HTTP_IF_NONE_MATCH")) - - @cached_property - def if_modified_since(self): - """The parsed `If-Modified-Since` header as datetime object.""" - return parse_date(self.environ.get("HTTP_IF_MODIFIED_SINCE")) - - @cached_property - def if_unmodified_since(self): - """The parsed `If-Unmodified-Since` header as datetime object.""" - return parse_date(self.environ.get("HTTP_IF_UNMODIFIED_SINCE")) - - @cached_property - def if_range(self): - """The parsed `If-Range` header. - - .. versionadded:: 0.7 - - :rtype: :class:`~werkzeug.datastructures.IfRange` - """ - return parse_if_range_header(self.environ.get("HTTP_IF_RANGE")) - - @cached_property - def range(self): - """The parsed `Range` header. - - .. versionadded:: 0.7 - - :rtype: :class:`~werkzeug.datastructures.Range` - """ - return parse_range_header(self.environ.get("HTTP_RANGE")) - - -class ETagResponseMixin(object): - """Adds extra functionality to a response object for etag and cache - handling. This mixin requires an object with at least a `headers` - object that implements a dict like interface similar to - :class:`~werkzeug.datastructures.Headers`. - - If you want the :meth:`freeze` method to automatically add an etag, you - have to mixin this method before the response base class. The default - response class does not do that. - """ - - @property - def cache_control(self): - """The Cache-Control general-header field is used to specify - directives that MUST be obeyed by all caching mechanisms along the - request/response chain. - """ - - def on_update(cache_control): - if not cache_control and "cache-control" in self.headers: - del self.headers["cache-control"] - elif cache_control: - self.headers["Cache-Control"] = cache_control.to_header() - - return parse_cache_control_header( - self.headers.get("cache-control"), on_update, ResponseCacheControl - ) - - def _wrap_response(self, start, length): - """Wrap existing Response in case of Range Request context.""" - if self.status_code == 206: - self.response = _RangeWrapper(self.response, start, length) - - def _is_range_request_processable(self, environ): - """Return ``True`` if `Range` header is present and if underlying - resource is considered unchanged when compared with `If-Range` header. - """ - return ( - "HTTP_IF_RANGE" not in environ - or not is_resource_modified( - environ, - self.headers.get("etag"), - None, - self.headers.get("last-modified"), - ignore_if_range=False, - ) - ) and "HTTP_RANGE" in environ - - def _process_range_request(self, environ, complete_length=None, accept_ranges=None): - """Handle Range Request related headers (RFC7233). If `Accept-Ranges` - header is valid, and Range Request is processable, we set the headers - as described by the RFC, and wrap the underlying response in a - RangeWrapper. - - Returns ``True`` if Range Request can be fulfilled, ``False`` otherwise. - - :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable` - if `Range` header could not be parsed or satisfied. - """ - from ..exceptions import RequestedRangeNotSatisfiable - - if ( - accept_ranges is None - or complete_length is None - or not self._is_range_request_processable(environ) - ): - return False - - parsed_range = parse_range_header(environ.get("HTTP_RANGE")) - - if parsed_range is None: - raise RequestedRangeNotSatisfiable(complete_length) - - range_tuple = parsed_range.range_for_length(complete_length) - content_range_header = parsed_range.to_content_range_header(complete_length) - - if range_tuple is None or content_range_header is None: - raise RequestedRangeNotSatisfiable(complete_length) - - content_length = range_tuple[1] - range_tuple[0] - self.headers["Content-Length"] = content_length - self.headers["Accept-Ranges"] = accept_ranges - self.content_range = content_range_header - self.status_code = 206 - self._wrap_response(range_tuple[0], content_length) - return True - - def make_conditional( - self, request_or_environ, accept_ranges=False, complete_length=None - ): - """Make the response conditional to the request. This method works - best if an etag was defined for the response already. The `add_etag` - method can be used to do that. If called without etag just the date - header is set. - - This does nothing if the request method in the request or environ is - anything but GET or HEAD. - - For optimal performance when handling range requests, it's recommended - that your response data object implements `seekable`, `seek` and `tell` - methods as described by :py:class:`io.IOBase`. Objects returned by - :meth:`~werkzeug.wsgi.wrap_file` automatically implement those methods. - - It does not remove the body of the response because that's something - the :meth:`__call__` function does for us automatically. - - Returns self so that you can do ``return resp.make_conditional(req)`` - but modifies the object in-place. - - :param request_or_environ: a request object or WSGI environment to be - used to make the response conditional - against. - :param accept_ranges: This parameter dictates the value of - `Accept-Ranges` header. If ``False`` (default), - the header is not set. If ``True``, it will be set - to ``"bytes"``. If ``None``, it will be set to - ``"none"``. If it's a string, it will use this - value. - :param complete_length: Will be used only in valid Range Requests. - It will set `Content-Range` complete length - value and compute `Content-Length` real value. - This parameter is mandatory for successful - Range Requests completion. - :raises: :class:`~werkzeug.exceptions.RequestedRangeNotSatisfiable` - if `Range` header could not be parsed or satisfied. - """ - environ = _get_environ(request_or_environ) - if environ["REQUEST_METHOD"] in ("GET", "HEAD"): - # if the date is not in the headers, add it now. We however - # will not override an already existing header. Unfortunately - # this header will be overriden by many WSGI servers including - # wsgiref. - if "date" not in self.headers: - self.headers["Date"] = http_date() - accept_ranges = _clean_accept_ranges(accept_ranges) - is206 = self._process_range_request(environ, complete_length, accept_ranges) - if not is206 and not is_resource_modified( - environ, - self.headers.get("etag"), - None, - self.headers.get("last-modified"), - ): - if parse_etags(environ.get("HTTP_IF_MATCH")): - self.status_code = 412 - else: - self.status_code = 304 - if ( - self.automatically_set_content_length - and "content-length" not in self.headers - ): - length = self.calculate_content_length() - if length is not None: - self.headers["Content-Length"] = length - return self - - def add_etag(self, overwrite=False, weak=False): - """Add an etag for the current response if there is none yet.""" - if overwrite or "etag" not in self.headers: - self.set_etag(generate_etag(self.get_data()), weak) - - def set_etag(self, etag, weak=False): - """Set the etag, and override the old one if there was one.""" - self.headers["ETag"] = quote_etag(etag, weak) - - def get_etag(self): - """Return a tuple in the form ``(etag, is_weak)``. If there is no - ETag the return value is ``(None, None)``. - """ - return unquote_etag(self.headers.get("ETag")) - - def freeze(self, no_etag=False): - """Call this method if you want to make your response object ready for - pickeling. This buffers the generator if there is one. This also - sets the etag unless `no_etag` is set to `True`. - """ - if not no_etag: - self.add_etag() - super(ETagResponseMixin, self).freeze() - - accept_ranges = header_property( - "Accept-Ranges", - doc="""The `Accept-Ranges` header. Even though the name would - indicate that multiple values are supported, it must be one - string token only. - - The values ``'bytes'`` and ``'none'`` are common. - - .. versionadded:: 0.7""", - ) - - @property - def content_range(self): - """The ``Content-Range`` header as a - :class:`~werkzeug.datastructures.ContentRange` object. Available - even if the header is not set. - - .. versionadded:: 0.7 - """ - - def on_update(rng): - if not rng: - del self.headers["content-range"] - else: - self.headers["Content-Range"] = rng.to_header() - - rv = parse_content_range_header(self.headers.get("content-range"), on_update) - # always provide a content range object to make the descriptor - # more user friendly. It provides an unset() method that can be - # used to remove the header quickly. - if rv is None: - rv = ContentRange(None, None, None, on_update=on_update) - return rv - - @content_range.setter - def content_range(self, value): - if not value: - del self.headers["content-range"] - elif isinstance(value, string_types): - self.headers["Content-Range"] = value - else: - self.headers["Content-Range"] = value.to_header() diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/json.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/json.py deleted file mode 100644 index 6d5dc33..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/json.py +++ /dev/null @@ -1,145 +0,0 @@ -from __future__ import absolute_import - -import datetime -import uuid - -from .._compat import text_type -from ..exceptions import BadRequest -from ..utils import detect_utf_encoding - -try: - import simplejson as _json -except ImportError: - import json as _json - - -class _JSONModule(object): - @staticmethod - def _default(o): - if isinstance(o, datetime.date): - return o.isoformat() - - if isinstance(o, uuid.UUID): - return str(o) - - if hasattr(o, "__html__"): - return text_type(o.__html__()) - - raise TypeError() - - @classmethod - def dumps(cls, obj, **kw): - kw.setdefault("separators", (",", ":")) - kw.setdefault("default", cls._default) - kw.setdefault("sort_keys", True) - return _json.dumps(obj, **kw) - - @staticmethod - def loads(s, **kw): - if isinstance(s, bytes): - # Needed for Python < 3.6 - encoding = detect_utf_encoding(s) - s = s.decode(encoding) - - return _json.loads(s, **kw) - - -class JSONMixin(object): - """Mixin to parse :attr:`data` as JSON. Can be mixed in for both - :class:`~werkzeug.wrappers.Request` and - :class:`~werkzeug.wrappers.Response` classes. - - If `simplejson`_ is installed it is preferred over Python's built-in - :mod:`json` module. - - .. _simplejson: https://simplejson.readthedocs.io/en/latest/ - """ - - #: A module or other object that has ``dumps`` and ``loads`` - #: functions that match the API of the built-in :mod:`json` module. - json_module = _JSONModule - - @property - def json(self): - """The parsed JSON data if :attr:`mimetype` indicates JSON - (:mimetype:`application/json`, see :meth:`is_json`). - - Calls :meth:`get_json` with default arguments. - """ - return self.get_json() - - @property - def is_json(self): - """Check if the mimetype indicates JSON data, either - :mimetype:`application/json` or :mimetype:`application/*+json`. - """ - mt = self.mimetype - return ( - mt == "application/json" - or mt.startswith("application/") - and mt.endswith("+json") - ) - - def _get_data_for_json(self, cache): - try: - return self.get_data(cache=cache) - except TypeError: - # Response doesn't have cache param. - return self.get_data() - - # Cached values for ``(silent=False, silent=True)``. Initialized - # with sentinel values. - _cached_json = (Ellipsis, Ellipsis) - - def get_json(self, force=False, silent=False, cache=True): - """Parse :attr:`data` as JSON. - - If the mimetype does not indicate JSON - (:mimetype:`application/json`, see :meth:`is_json`), this - returns ``None``. - - If parsing fails, :meth:`on_json_loading_failed` is called and - its return value is used as the return value. - - :param force: Ignore the mimetype and always try to parse JSON. - :param silent: Silence parsing errors and return ``None`` - instead. - :param cache: Store the parsed JSON to return for subsequent - calls. - """ - if cache and self._cached_json[silent] is not Ellipsis: - return self._cached_json[silent] - - if not (force or self.is_json): - return None - - data = self._get_data_for_json(cache=cache) - - try: - rv = self.json_module.loads(data) - except ValueError as e: - if silent: - rv = None - - if cache: - normal_rv, _ = self._cached_json - self._cached_json = (normal_rv, rv) - else: - rv = self.on_json_loading_failed(e) - - if cache: - _, silent_rv = self._cached_json - self._cached_json = (rv, silent_rv) - else: - if cache: - self._cached_json = (rv, rv) - - return rv - - def on_json_loading_failed(self, e): - """Called if :meth:`get_json` parsing fails and isn't silenced. - If this method returns a value, it is used as the return value - for :meth:`get_json`. The default implementation raises - :exc:`~werkzeug.exceptions.BadRequest`. - """ - raise BadRequest("Failed to decode JSON object: {0}".format(e)) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/request.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/request.py deleted file mode 100644 index 5c2fe10..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/request.py +++ /dev/null @@ -1,49 +0,0 @@ -from .accept import AcceptMixin -from .auth import AuthorizationMixin -from .base_request import BaseRequest -from .common_descriptors import CommonRequestDescriptorsMixin -from .cors import CORSRequestMixin -from .etag import ETagRequestMixin -from .user_agent import UserAgentMixin - - -class Request( - BaseRequest, - AcceptMixin, - ETagRequestMixin, - UserAgentMixin, - AuthorizationMixin, - CORSRequestMixin, - CommonRequestDescriptorsMixin, -): - """Full featured request object implementing the following mixins: - - - :class:`AcceptMixin` for accept header parsing - - :class:`ETagRequestMixin` for etag and cache control handling - - :class:`UserAgentMixin` for user agent introspection - - :class:`AuthorizationMixin` for http auth handling - - :class:`~werkzeug.wrappers.cors.CORSRequestMixin` for Cross - Origin Resource Sharing headers - - :class:`CommonRequestDescriptorsMixin` for common headers - - """ - - -class StreamOnlyMixin(object): - """If mixed in before the request object this will change the behavior - of it to disable handling of form parsing. This disables the - :attr:`files`, :attr:`form` attributes and will just provide a - :attr:`stream` attribute that however is always available. - - .. versionadded:: 0.9 - """ - - disable_data_descriptor = True - want_form_data_parsed = False - - -class PlainRequest(StreamOnlyMixin, Request): - """A request object without special form parsing capabilities. - - .. versionadded:: 0.9 - """ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/response.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/response.py deleted file mode 100644 index 8f190f7..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/response.py +++ /dev/null @@ -1,84 +0,0 @@ -from ..utils import cached_property -from .auth import WWWAuthenticateMixin -from .base_response import BaseResponse -from .common_descriptors import CommonResponseDescriptorsMixin -from .cors import CORSResponseMixin -from .etag import ETagResponseMixin - - -class ResponseStream(object): - """A file descriptor like object used by the :class:`ResponseStreamMixin` to - represent the body of the stream. It directly pushes into the response - iterable of the response object. - """ - - mode = "wb+" - - def __init__(self, response): - self.response = response - self.closed = False - - def write(self, value): - if self.closed: - raise ValueError("I/O operation on closed file") - self.response._ensure_sequence(mutable=True) - self.response.response.append(value) - self.response.headers.pop("Content-Length", None) - return len(value) - - def writelines(self, seq): - for item in seq: - self.write(item) - - def close(self): - self.closed = True - - def flush(self): - if self.closed: - raise ValueError("I/O operation on closed file") - - def isatty(self): - if self.closed: - raise ValueError("I/O operation on closed file") - return False - - def tell(self): - self.response._ensure_sequence() - return sum(map(len, self.response.response)) - - @property - def encoding(self): - return self.response.charset - - -class ResponseStreamMixin(object): - """Mixin for :class:`BaseResponse` subclasses. Classes that inherit from - this mixin will automatically get a :attr:`stream` property that provides - a write-only interface to the response iterable. - """ - - @cached_property - def stream(self): - """The response iterable as write-only stream.""" - return ResponseStream(self) - - -class Response( - BaseResponse, - ETagResponseMixin, - WWWAuthenticateMixin, - CORSResponseMixin, - ResponseStreamMixin, - CommonResponseDescriptorsMixin, -): - """Full featured response object implementing the following mixins: - - - :class:`ETagResponseMixin` for etag and cache control handling - - :class:`WWWAuthenticateMixin` for HTTP authentication support - - :class:`~werkzeug.wrappers.cors.CORSResponseMixin` for Cross - Origin Resource Sharing headers - - :class:`ResponseStreamMixin` to add support for the ``stream`` - property - - :class:`CommonResponseDescriptorsMixin` for various HTTP - descriptors - """ diff --git a/venv/lib/python3.7/site-packages/werkzeug/wrappers/user_agent.py b/venv/lib/python3.7/site-packages/werkzeug/wrappers/user_agent.py deleted file mode 100644 index a32d8ac..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wrappers/user_agent.py +++ /dev/null @@ -1,14 +0,0 @@ -from ..useragents import UserAgent -from ..utils import cached_property - - -class UserAgentMixin(object): - """Adds a `user_agent` attribute to the request object which - contains the parsed user agent of the browser that triggered the - request as a :class:`~werkzeug.useragents.UserAgent` object. - """ - - @cached_property - def user_agent(self): - """The current user agent.""" - return UserAgent(self.environ) diff --git a/venv/lib/python3.7/site-packages/werkzeug/wsgi.py b/venv/lib/python3.7/site-packages/werkzeug/wsgi.py deleted file mode 100644 index aa4e713..0000000 --- a/venv/lib/python3.7/site-packages/werkzeug/wsgi.py +++ /dev/null @@ -1,1000 +0,0 @@ -# -*- coding: utf-8 -*- -""" - werkzeug.wsgi - ~~~~~~~~~~~~~ - - This module implements WSGI related helpers. - - :copyright: 2007 Pallets - :license: BSD-3-Clause -""" -import io -import re -from functools import partial -from functools import update_wrapper -from itertools import chain - -from ._compat import BytesIO -from ._compat import implements_iterator -from ._compat import make_literal_wrapper -from ._compat import string_types -from ._compat import text_type -from ._compat import to_bytes -from ._compat import to_unicode -from ._compat import try_coerce_native -from ._compat import wsgi_get_bytes -from ._internal import _encode_idna -from .urls import uri_to_iri -from .urls import url_join -from .urls import url_parse -from .urls import url_quote - - -def responder(f): - """Marks a function as responder. Decorate a function with it and it - will automatically call the return value as WSGI application. - - Example:: - - @responder - def application(environ, start_response): - return Response('Hello World!') - """ - return update_wrapper(lambda *a: f(*a)(*a[-2:]), f) - - -def get_current_url( - environ, - root_only=False, - strip_querystring=False, - host_only=False, - trusted_hosts=None, -): - """A handy helper function that recreates the full URL as IRI for the - current request or parts of it. Here's an example: - - >>> from werkzeug.test import create_environ - >>> env = create_environ("/?param=foo", "http://localhost/script") - >>> get_current_url(env) - 'http://localhost/script/?param=foo' - >>> get_current_url(env, root_only=True) - 'http://localhost/script/' - >>> get_current_url(env, host_only=True) - 'http://localhost/' - >>> get_current_url(env, strip_querystring=True) - 'http://localhost/script/' - - This optionally it verifies that the host is in a list of trusted hosts. - If the host is not in there it will raise a - :exc:`~werkzeug.exceptions.SecurityError`. - - Note that the string returned might contain unicode characters as the - representation is an IRI not an URI. If you need an ASCII only - representation you can use the :func:`~werkzeug.urls.iri_to_uri` - function: - - >>> from werkzeug.urls import iri_to_uri - >>> iri_to_uri(get_current_url(env)) - 'http://localhost/script/?param=foo' - - :param environ: the WSGI environment to get the current URL from. - :param root_only: set `True` if you only want the root URL. - :param strip_querystring: set to `True` if you don't want the querystring. - :param host_only: set to `True` if the host URL should be returned. - :param trusted_hosts: a list of trusted hosts, see :func:`host_is_trusted` - for more information. - """ - tmp = [environ["wsgi.url_scheme"], "://", get_host(environ, trusted_hosts)] - cat = tmp.append - if host_only: - return uri_to_iri("".join(tmp) + "/") - cat(url_quote(wsgi_get_bytes(environ.get("SCRIPT_NAME", ""))).rstrip("/")) - cat("/") - if not root_only: - cat(url_quote(wsgi_get_bytes(environ.get("PATH_INFO", "")).lstrip(b"/"))) - if not strip_querystring: - qs = get_query_string(environ) - if qs: - cat("?" + qs) - return uri_to_iri("".join(tmp)) - - -def host_is_trusted(hostname, trusted_list): - """Checks if a host is trusted against a list. This also takes care - of port normalization. - - .. versionadded:: 0.9 - - :param hostname: the hostname to check - :param trusted_list: a list of hostnames to check against. If a - hostname starts with a dot it will match against - all subdomains as well. - """ - if not hostname: - return False - - if isinstance(trusted_list, string_types): - trusted_list = [trusted_list] - - def _normalize(hostname): - if ":" in hostname: - hostname = hostname.rsplit(":", 1)[0] - return _encode_idna(hostname) - - try: - hostname = _normalize(hostname) - except UnicodeError: - return False - for ref in trusted_list: - if ref.startswith("."): - ref = ref[1:] - suffix_match = True - else: - suffix_match = False - try: - ref = _normalize(ref) - except UnicodeError: - return False - if ref == hostname: - return True - if suffix_match and hostname.endswith(b"." + ref): - return True - return False - - -def get_host(environ, trusted_hosts=None): - """Return the host for the given WSGI environment. This first checks - the ``Host`` header. If it's not present, then ``SERVER_NAME`` and - ``SERVER_PORT`` are used. The host will only contain the port if it - is different than the standard port for the protocol. - - Optionally, verify that the host is trusted using - :func:`host_is_trusted` and raise a - :exc:`~werkzeug.exceptions.SecurityError` if it is not. - - :param environ: The WSGI environment to get the host from. - :param trusted_hosts: A list of trusted hosts. - :return: Host, with port if necessary. - :raise ~werkzeug.exceptions.SecurityError: If the host is not - trusted. - """ - if "HTTP_HOST" in environ: - rv = environ["HTTP_HOST"] - if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"): - rv = rv[:-3] - elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"): - rv = rv[:-4] - else: - rv = environ["SERVER_NAME"] - if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in ( - ("https", "443"), - ("http", "80"), - ): - rv += ":" + environ["SERVER_PORT"] - if trusted_hosts is not None: - if not host_is_trusted(rv, trusted_hosts): - from .exceptions import SecurityError - - raise SecurityError('Host "%s" is not trusted' % rv) - return rv - - -def get_content_length(environ): - """Returns the content length from the WSGI environment as - integer. If it's not available or chunked transfer encoding is used, - ``None`` is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environ to fetch the content length from. - """ - if environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": - return None - - content_length = environ.get("CONTENT_LENGTH") - if content_length is not None: - try: - return max(0, int(content_length)) - except (ValueError, TypeError): - pass - - -def get_input_stream(environ, safe_fallback=True): - """Returns the input stream from the WSGI environment and wraps it - in the most sensible way possible. The stream returned is not the - raw WSGI stream in most cases but one that is safe to read from - without taking into account the content length. - - If content length is not set, the stream will be empty for safety reasons. - If the WSGI server supports chunked or infinite streams, it should set - the ``wsgi.input_terminated`` value in the WSGI environ to indicate that. - - .. versionadded:: 0.9 - - :param environ: the WSGI environ to fetch the stream from. - :param safe_fallback: use an empty stream as a safe fallback when the - content length is not set. Disabling this allows infinite streams, - which can be a denial-of-service risk. - """ - stream = environ["wsgi.input"] - content_length = get_content_length(environ) - - # A wsgi extension that tells us if the input is terminated. In - # that case we return the stream unchanged as we know we can safely - # read it until the end. - if environ.get("wsgi.input_terminated"): - return stream - - # If the request doesn't specify a content length, returning the stream is - # potentially dangerous because it could be infinite, malicious or not. If - # safe_fallback is true, return an empty stream instead for safety. - if content_length is None: - return BytesIO() if safe_fallback else stream - - # Otherwise limit the stream to the content length - return LimitedStream(stream, content_length) - - -def get_query_string(environ): - """Returns the `QUERY_STRING` from the WSGI environment. This also takes - care about the WSGI decoding dance on Python 3 environments as a - native string. The string returned will be restricted to ASCII - characters. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the query string from. - """ - qs = wsgi_get_bytes(environ.get("QUERY_STRING", "")) - # QUERY_STRING really should be ascii safe but some browsers - # will send us some unicode stuff (I am looking at you IE). - # In that case we want to urllib quote it badly. - return try_coerce_native(url_quote(qs, safe=":&%=+$!*'(),")) - - -def get_path_info(environ, charset="utf-8", errors="replace"): - """Returns the `PATH_INFO` from the WSGI environment and properly - decodes it. This also takes care about the WSGI decoding dance - on Python 3 environments. if the `charset` is set to `None` a - bytestring is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the path from. - :param charset: the charset for the path info, or `None` if no - decoding should be performed. - :param errors: the decoding error handling. - """ - path = wsgi_get_bytes(environ.get("PATH_INFO", "")) - return to_unicode(path, charset, errors, allow_none_charset=True) - - -def get_script_name(environ, charset="utf-8", errors="replace"): - """Returns the `SCRIPT_NAME` from the WSGI environment and properly - decodes it. This also takes care about the WSGI decoding dance - on Python 3 environments. if the `charset` is set to `None` a - bytestring is returned. - - .. versionadded:: 0.9 - - :param environ: the WSGI environment object to get the path from. - :param charset: the charset for the path, or `None` if no - decoding should be performed. - :param errors: the decoding error handling. - """ - path = wsgi_get_bytes(environ.get("SCRIPT_NAME", "")) - return to_unicode(path, charset, errors, allow_none_charset=True) - - -def pop_path_info(environ, charset="utf-8", errors="replace"): - """Removes and returns the next segment of `PATH_INFO`, pushing it onto - `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. - - If the `charset` is set to `None` a bytestring is returned. - - If there are empty segments (``'/foo//bar``) these are ignored but - properly pushed to the `SCRIPT_NAME`: - - >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} - >>> pop_path_info(env) - 'a' - >>> env['SCRIPT_NAME'] - '/foo/a' - >>> pop_path_info(env) - 'b' - >>> env['SCRIPT_NAME'] - '/foo/a/b' - - .. versionadded:: 0.5 - - .. versionchanged:: 0.9 - The path is now decoded and a charset and encoding - parameter can be provided. - - :param environ: the WSGI environment that is modified. - """ - path = environ.get("PATH_INFO") - if not path: - return None - - script_name = environ.get("SCRIPT_NAME", "") - - # shift multiple leading slashes over - old_path = path - path = path.lstrip("/") - if path != old_path: - script_name += "/" * (len(old_path) - len(path)) - - if "/" not in path: - environ["PATH_INFO"] = "" - environ["SCRIPT_NAME"] = script_name + path - rv = wsgi_get_bytes(path) - else: - segment, path = path.split("/", 1) - environ["PATH_INFO"] = "/" + path - environ["SCRIPT_NAME"] = script_name + segment - rv = wsgi_get_bytes(segment) - - return to_unicode(rv, charset, errors, allow_none_charset=True) - - -def peek_path_info(environ, charset="utf-8", errors="replace"): - """Returns the next segment on the `PATH_INFO` or `None` if there - is none. Works like :func:`pop_path_info` without modifying the - environment: - - >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} - >>> peek_path_info(env) - 'a' - >>> peek_path_info(env) - 'a' - - If the `charset` is set to `None` a bytestring is returned. - - .. versionadded:: 0.5 - - .. versionchanged:: 0.9 - The path is now decoded and a charset and encoding - parameter can be provided. - - :param environ: the WSGI environment that is checked. - """ - segments = environ.get("PATH_INFO", "").lstrip("/").split("/", 1) - if segments: - return to_unicode( - wsgi_get_bytes(segments[0]), charset, errors, allow_none_charset=True - ) - - -def extract_path_info( - environ_or_baseurl, - path_or_url, - charset="utf-8", - errors="werkzeug.url_quote", - collapse_http_schemes=True, -): - """Extracts the path info from the given URL (or WSGI environment) and - path. The path info returned is a unicode string, not a bytestring - suitable for a WSGI environment. The URLs might also be IRIs. - - If the path info could not be determined, `None` is returned. - - Some examples: - - >>> extract_path_info('http://example.com/app', '/app/hello') - u'/hello' - >>> extract_path_info('http://example.com/app', - ... 'https://example.com/app/hello') - u'/hello' - >>> extract_path_info('http://example.com/app', - ... 'https://example.com/app/hello', - ... collapse_http_schemes=False) is None - True - - Instead of providing a base URL you can also pass a WSGI environment. - - :param environ_or_baseurl: a WSGI environment dict, a base URL or - base IRI. This is the root of the - application. - :param path_or_url: an absolute path from the server root, a - relative path (in which case it's the path info) - or a full URL. Also accepts IRIs and unicode - parameters. - :param charset: the charset for byte data in URLs - :param errors: the error handling on decode - :param collapse_http_schemes: if set to `False` the algorithm does - not assume that http and https on the - same server point to the same - resource. - - .. versionchanged:: 0.15 - The ``errors`` parameter defaults to leaving invalid bytes - quoted instead of replacing them. - - .. versionadded:: 0.6 - """ - - def _normalize_netloc(scheme, netloc): - parts = netloc.split(u"@", 1)[-1].split(u":", 1) - if len(parts) == 2: - netloc, port = parts - if (scheme == u"http" and port == u"80") or ( - scheme == u"https" and port == u"443" - ): - port = None - else: - netloc = parts[0] - port = None - if port is not None: - netloc += u":" + port - return netloc - - # make sure whatever we are working on is a IRI and parse it - path = uri_to_iri(path_or_url, charset, errors) - if isinstance(environ_or_baseurl, dict): - environ_or_baseurl = get_current_url(environ_or_baseurl, root_only=True) - base_iri = uri_to_iri(environ_or_baseurl, charset, errors) - base_scheme, base_netloc, base_path = url_parse(base_iri)[:3] - cur_scheme, cur_netloc, cur_path, = url_parse(url_join(base_iri, path))[:3] - - # normalize the network location - base_netloc = _normalize_netloc(base_scheme, base_netloc) - cur_netloc = _normalize_netloc(cur_scheme, cur_netloc) - - # is that IRI even on a known HTTP scheme? - if collapse_http_schemes: - for scheme in base_scheme, cur_scheme: - if scheme not in (u"http", u"https"): - return None - else: - if not (base_scheme in (u"http", u"https") and base_scheme == cur_scheme): - return None - - # are the netlocs compatible? - if base_netloc != cur_netloc: - return None - - # are we below the application path? - base_path = base_path.rstrip(u"/") - if not cur_path.startswith(base_path): - return None - - return u"/" + cur_path[len(base_path) :].lstrip(u"/") - - -@implements_iterator -class ClosingIterator(object): - """The WSGI specification requires that all middlewares and gateways - respect the `close` callback of the iterable returned by the application. - Because it is useful to add another close action to a returned iterable - and adding a custom iterable is a boring task this class can be used for - that:: - - return ClosingIterator(app(environ, start_response), [cleanup_session, - cleanup_locals]) - - If there is just one close function it can be passed instead of the list. - - A closing iterator is not needed if the application uses response objects - and finishes the processing if the response is started:: - - try: - return response(environ, start_response) - finally: - cleanup_session() - cleanup_locals() - """ - - def __init__(self, iterable, callbacks=None): - iterator = iter(iterable) - self._next = partial(next, iterator) - if callbacks is None: - callbacks = [] - elif callable(callbacks): - callbacks = [callbacks] - else: - callbacks = list(callbacks) - iterable_close = getattr(iterable, "close", None) - if iterable_close: - callbacks.insert(0, iterable_close) - self._callbacks = callbacks - - def __iter__(self): - return self - - def __next__(self): - return self._next() - - def close(self): - for callback in self._callbacks: - callback() - - -def wrap_file(environ, file, buffer_size=8192): - """Wraps a file. This uses the WSGI server's file wrapper if available - or otherwise the generic :class:`FileWrapper`. - - .. versionadded:: 0.5 - - If the file wrapper from the WSGI server is used it's important to not - iterate over it from inside the application but to pass it through - unchanged. If you want to pass out a file wrapper inside a response - object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`. - - More information about file wrappers are available in :pep:`333`. - - :param file: a :class:`file`-like object with a :meth:`~file.read` method. - :param buffer_size: number of bytes for one iteration. - """ - return environ.get("wsgi.file_wrapper", FileWrapper)(file, buffer_size) - - -@implements_iterator -class FileWrapper(object): - """This class can be used to convert a :class:`file`-like object into - an iterable. It yields `buffer_size` blocks until the file is fully - read. - - You should not use this class directly but rather use the - :func:`wrap_file` function that uses the WSGI server's file wrapper - support if it's available. - - .. versionadded:: 0.5 - - If you're using this object together with a :class:`BaseResponse` you have - to use the `direct_passthrough` mode. - - :param file: a :class:`file`-like object with a :meth:`~file.read` method. - :param buffer_size: number of bytes for one iteration. - """ - - def __init__(self, file, buffer_size=8192): - self.file = file - self.buffer_size = buffer_size - - def close(self): - if hasattr(self.file, "close"): - self.file.close() - - def seekable(self): - if hasattr(self.file, "seekable"): - return self.file.seekable() - if hasattr(self.file, "seek"): - return True - return False - - def seek(self, *args): - if hasattr(self.file, "seek"): - self.file.seek(*args) - - def tell(self): - if hasattr(self.file, "tell"): - return self.file.tell() - return None - - def __iter__(self): - return self - - def __next__(self): - data = self.file.read(self.buffer_size) - if data: - return data - raise StopIteration() - - -@implements_iterator -class _RangeWrapper(object): - # private for now, but should we make it public in the future ? - - """This class can be used to convert an iterable object into - an iterable that will only yield a piece of the underlying content. - It yields blocks until the underlying stream range is fully read. - The yielded blocks will have a size that can't exceed the original - iterator defined block size, but that can be smaller. - - If you're using this object together with a :class:`BaseResponse` you have - to use the `direct_passthrough` mode. - - :param iterable: an iterable object with a :meth:`__next__` method. - :param start_byte: byte from which read will start. - :param byte_range: how many bytes to read. - """ - - def __init__(self, iterable, start_byte=0, byte_range=None): - self.iterable = iter(iterable) - self.byte_range = byte_range - self.start_byte = start_byte - self.end_byte = None - if byte_range is not None: - self.end_byte = self.start_byte + self.byte_range - self.read_length = 0 - self.seekable = hasattr(iterable, "seekable") and iterable.seekable() - self.end_reached = False - - def __iter__(self): - return self - - def _next_chunk(self): - try: - chunk = next(self.iterable) - self.read_length += len(chunk) - return chunk - except StopIteration: - self.end_reached = True - raise - - def _first_iteration(self): - chunk = None - if self.seekable: - self.iterable.seek(self.start_byte) - self.read_length = self.iterable.tell() - contextual_read_length = self.read_length - else: - while self.read_length <= self.start_byte: - chunk = self._next_chunk() - if chunk is not None: - chunk = chunk[self.start_byte - self.read_length :] - contextual_read_length = self.start_byte - return chunk, contextual_read_length - - def _next(self): - if self.end_reached: - raise StopIteration() - chunk = None - contextual_read_length = self.read_length - if self.read_length == 0: - chunk, contextual_read_length = self._first_iteration() - if chunk is None: - chunk = self._next_chunk() - if self.end_byte is not None and self.read_length >= self.end_byte: - self.end_reached = True - return chunk[: self.end_byte - contextual_read_length] - return chunk - - def __next__(self): - chunk = self._next() - if chunk: - return chunk - self.end_reached = True - raise StopIteration() - - def close(self): - if hasattr(self.iterable, "close"): - self.iterable.close() - - -def _make_chunk_iter(stream, limit, buffer_size): - """Helper for the line and chunk iter functions.""" - if isinstance(stream, (bytes, bytearray, text_type)): - raise TypeError( - "Passed a string or byte object instead of true iterator or stream." - ) - if not hasattr(stream, "read"): - for item in stream: - if item: - yield item - return - if not isinstance(stream, LimitedStream) and limit is not None: - stream = LimitedStream(stream, limit) - _read = stream.read - while 1: - item = _read(buffer_size) - if not item: - break - yield item - - -def make_line_iter(stream, limit=None, buffer_size=10 * 1024, cap_at_buffer=False): - """Safely iterates line-based over an input stream. If the input stream - is not a :class:`LimitedStream` the `limit` parameter is mandatory. - - This uses the stream's :meth:`~file.read` method internally as opposite - to the :meth:`~file.readline` method that is unsafe and can only be used - in violation of the WSGI specification. The same problem applies to the - `__iter__` function of the input stream which calls :meth:`~file.readline` - without arguments. - - If you need line-by-line processing it's strongly recommended to iterate - over the input stream using this helper function. - - .. versionchanged:: 0.8 - This function now ensures that the limit was reached. - - .. versionadded:: 0.9 - added support for iterators as input stream. - - .. versionadded:: 0.11.10 - added support for the `cap_at_buffer` parameter. - - :param stream: the stream or iterate to iterate over. - :param limit: the limit in bytes for the stream. (Usually - content length. Not necessary if the `stream` - is a :class:`LimitedStream`. - :param buffer_size: The optional buffer size. - :param cap_at_buffer: if this is set chunks are split if they are longer - than the buffer size. Internally this is implemented - that the buffer size might be exhausted by a factor - of two however. - """ - _iter = _make_chunk_iter(stream, limit, buffer_size) - - first_item = next(_iter, "") - if not first_item: - return - - s = make_literal_wrapper(first_item) - empty = s("") - cr = s("\r") - lf = s("\n") - crlf = s("\r\n") - - _iter = chain((first_item,), _iter) - - def _iter_basic_lines(): - _join = empty.join - buffer = [] - while 1: - new_data = next(_iter, "") - if not new_data: - break - new_buf = [] - buf_size = 0 - for item in chain(buffer, new_data.splitlines(True)): - new_buf.append(item) - buf_size += len(item) - if item and item[-1:] in crlf: - yield _join(new_buf) - new_buf = [] - elif cap_at_buffer and buf_size >= buffer_size: - rv = _join(new_buf) - while len(rv) >= buffer_size: - yield rv[:buffer_size] - rv = rv[buffer_size:] - new_buf = [rv] - buffer = new_buf - if buffer: - yield _join(buffer) - - # This hackery is necessary to merge 'foo\r' and '\n' into one item - # of 'foo\r\n' if we were unlucky and we hit a chunk boundary. - previous = empty - for item in _iter_basic_lines(): - if item == lf and previous[-1:] == cr: - previous += item - item = empty - if previous: - yield previous - previous = item - if previous: - yield previous - - -def make_chunk_iter( - stream, separator, limit=None, buffer_size=10 * 1024, cap_at_buffer=False -): - """Works like :func:`make_line_iter` but accepts a separator - which divides chunks. If you want newline based processing - you should use :func:`make_line_iter` instead as it - supports arbitrary newline markers. - - .. versionadded:: 0.8 - - .. versionadded:: 0.9 - added support for iterators as input stream. - - .. versionadded:: 0.11.10 - added support for the `cap_at_buffer` parameter. - - :param stream: the stream or iterate to iterate over. - :param separator: the separator that divides chunks. - :param limit: the limit in bytes for the stream. (Usually - content length. Not necessary if the `stream` - is otherwise already limited). - :param buffer_size: The optional buffer size. - :param cap_at_buffer: if this is set chunks are split if they are longer - than the buffer size. Internally this is implemented - that the buffer size might be exhausted by a factor - of two however. - """ - _iter = _make_chunk_iter(stream, limit, buffer_size) - - first_item = next(_iter, "") - if not first_item: - return - - _iter = chain((first_item,), _iter) - if isinstance(first_item, text_type): - separator = to_unicode(separator) - _split = re.compile(r"(%s)" % re.escape(separator)).split - _join = u"".join - else: - separator = to_bytes(separator) - _split = re.compile(b"(" + re.escape(separator) + b")").split - _join = b"".join - - buffer = [] - while 1: - new_data = next(_iter, "") - if not new_data: - break - chunks = _split(new_data) - new_buf = [] - buf_size = 0 - for item in chain(buffer, chunks): - if item == separator: - yield _join(new_buf) - new_buf = [] - buf_size = 0 - else: - buf_size += len(item) - new_buf.append(item) - - if cap_at_buffer and buf_size >= buffer_size: - rv = _join(new_buf) - while len(rv) >= buffer_size: - yield rv[:buffer_size] - rv = rv[buffer_size:] - new_buf = [rv] - buf_size = len(rv) - - buffer = new_buf - if buffer: - yield _join(buffer) - - -@implements_iterator -class LimitedStream(io.IOBase): - """Wraps a stream so that it doesn't read more than n bytes. If the - stream is exhausted and the caller tries to get more bytes from it - :func:`on_exhausted` is called which by default returns an empty - string. The return value of that function is forwarded - to the reader function. So if it returns an empty string - :meth:`read` will return an empty string as well. - - The limit however must never be higher than what the stream can - output. Otherwise :meth:`readlines` will try to read past the - limit. - - .. admonition:: Note on WSGI compliance - - calls to :meth:`readline` and :meth:`readlines` are not - WSGI compliant because it passes a size argument to the - readline methods. Unfortunately the WSGI PEP is not safely - implementable without a size argument to :meth:`readline` - because there is no EOF marker in the stream. As a result - of that the use of :meth:`readline` is discouraged. - - For the same reason iterating over the :class:`LimitedStream` - is not portable. It internally calls :meth:`readline`. - - We strongly suggest using :meth:`read` only or using the - :func:`make_line_iter` which safely iterates line-based - over a WSGI input stream. - - :param stream: the stream to wrap. - :param limit: the limit for the stream, must not be longer than - what the string can provide if the stream does not - end with `EOF` (like `wsgi.input`) - """ - - def __init__(self, stream, limit): - self._read = stream.read - self._readline = stream.readline - self._pos = 0 - self.limit = limit - - def __iter__(self): - return self - - @property - def is_exhausted(self): - """If the stream is exhausted this attribute is `True`.""" - return self._pos >= self.limit - - def on_exhausted(self): - """This is called when the stream tries to read past the limit. - The return value of this function is returned from the reading - function. - """ - # Read null bytes from the stream so that we get the - # correct end of stream marker. - return self._read(0) - - def on_disconnect(self): - """What should happen if a disconnect is detected? The return - value of this function is returned from read functions in case - the client went away. By default a - :exc:`~werkzeug.exceptions.ClientDisconnected` exception is raised. - """ - from .exceptions import ClientDisconnected - - raise ClientDisconnected() - - def exhaust(self, chunk_size=1024 * 64): - """Exhaust the stream. This consumes all the data left until the - limit is reached. - - :param chunk_size: the size for a chunk. It will read the chunk - until the stream is exhausted and throw away - the results. - """ - to_read = self.limit - self._pos - chunk = chunk_size - while to_read > 0: - chunk = min(to_read, chunk) - self.read(chunk) - to_read -= chunk - - def read(self, size=None): - """Read `size` bytes or if size is not provided everything is read. - - :param size: the number of bytes read. - """ - if self._pos >= self.limit: - return self.on_exhausted() - if size is None or size == -1: # -1 is for consistence with file - size = self.limit - to_read = min(self.limit - self._pos, size) - try: - read = self._read(to_read) - except (IOError, ValueError): - return self.on_disconnect() - if to_read and len(read) != to_read: - return self.on_disconnect() - self._pos += len(read) - return read - - def readline(self, size=None): - """Reads one line from the stream.""" - if self._pos >= self.limit: - return self.on_exhausted() - if size is None: - size = self.limit - self._pos - else: - size = min(size, self.limit - self._pos) - try: - line = self._readline(size) - except (ValueError, IOError): - return self.on_disconnect() - if size and not line: - return self.on_disconnect() - self._pos += len(line) - return line - - def readlines(self, size=None): - """Reads a file into a list of strings. It calls :meth:`readline` - until the file is read to the end. It does support the optional - `size` argument if the underlying stream supports it for - `readline`. - """ - last_pos = self._pos - result = [] - if size is not None: - end = min(self.limit, last_pos + size) - else: - end = self.limit - while 1: - if size is not None: - size -= last_pos - self._pos - if self._pos >= end: - break - result.append(self.readline(size)) - if size is not None: - last_pos = self._pos - return result - - def tell(self): - """Returns the position of the stream. - - .. versionadded:: 0.9 - """ - return self._pos - - def __next__(self): - line = self.readline() - if not line: - raise StopIteration() - return line - - def readable(self): - return True diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/DESCRIPTION.rst b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/DESCRIPTION.rst deleted file mode 100644 index bb6bd75..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,18 +0,0 @@ -Decorator for flup's gzip compression WSGI middleware -===================================================== - -Usage example:: - - from wsgigzip import gzip - - @gzip() - def index(environ, start_response): - start_response('200 OK', [('Content-type', 'text/plain')]) - return ['Home Page'] - - if __name__ == '__main__': - from wsgiref.simple_server import make_server - http = make_server('', 8080, index) - http.serve_forever() - - diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/INSTALLER b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/METADATA b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/METADATA deleted file mode 100644 index 8bd171f..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/METADATA +++ /dev/null @@ -1,38 +0,0 @@ -Metadata-Version: 2.0 -Name: wsgigzip -Version: 0.1.4 -Summary: Decorator for flup's gzip compression WSGI middleware. -Home-page: https://bitbucket.org/lcrees/wsgigzip/ -Author: Lynn C. Rees -Author-email: lcrees@gmail.com -License: BSD -Keywords: WSGI middleware compression gzip flup decorator -Platform: UNKNOWN -Classifier: Development Status :: 4 - Beta -Classifier: Environment :: Web Environment -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 3 -Classifier: Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware -Requires: setuptools - -Decorator for flup's gzip compression WSGI middleware -===================================================== - -Usage example:: - - from wsgigzip import gzip - - @gzip() - def index(environ, start_response): - start_response('200 OK', [('Content-type', 'text/plain')]) - return ['Home Page'] - - if __name__ == '__main__': - from wsgiref.simple_server import make_server - http = make_server('', 8080, index) - http.serve_forever() - - diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/RECORD b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/RECORD deleted file mode 100644 index 3f41d0e..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/RECORD +++ /dev/null @@ -1,11 +0,0 @@ -wsgigzip-0.1.4.dist-info/DESCRIPTION.rst,sha256=R-PcgZhdIoPzlvQcxBeHzNaUkT3lwVwSmFiqWGj5W4E,466 -wsgigzip-0.1.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -wsgigzip-0.1.4.dist-info/METADATA,sha256=Ah_GD4nu94UUN5W9teYsDMjkgo7a_Pjgubx2a8IErAo,1182 -wsgigzip-0.1.4.dist-info/RECORD,, -wsgigzip-0.1.4.dist-info/WHEEL,sha256=rNo05PbNqwnXiIHFsYm0m22u4Zm6YJtugFG2THx4w3g,92 -wsgigzip-0.1.4.dist-info/metadata.json,sha256=tzoxHfA7HJ19G4jENTm20GlB_t18vD6Efzq5jWiEbnc,874 -wsgigzip-0.1.4.dist-info/top_level.txt,sha256=9GgubjIgz-A6aAE-TTyj596IY6nXE6-A-7Nhw1Yioyo,9 -wsgigzip/__init__.py,sha256=vcBMo1vEvNx0UhY9lZTqH_EF6ORMoUo1aXH11Zsgamc,2293 -wsgigzip/__pycache__/__init__.cpython-37.pyc,, -wsgigzip/__pycache__/gzip.cpython-37.pyc,, -wsgigzip/gzip.py,sha256=CzAvSL2q2r7bPX6myUmRKybG91u_onbdvg9LrNLwS-E,10006 diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/WHEEL b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/WHEEL deleted file mode 100644 index bb7f7db..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/metadata.json b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/metadata.json deleted file mode 100644 index 48140b3..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Web Environment", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware"], "extensions": {"python.details": {"contacts": [{"email": "lcrees@gmail.com", "name": "Lynn C. Rees", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitbucket.org/lcrees/wsgigzip/"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["WSGI", "middleware", "compression", "gzip", "flup", "decorator"], "license": "BSD", "metadata_version": "2.0", "name": "wsgigzip", "requires": "setuptools", "summary": "Decorator for flup's gzip compression WSGI middleware.", "version": "0.1.4"} \ No newline at end of file diff --git a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/top_level.txt b/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/top_level.txt deleted file mode 100644 index ba4da26..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip-0.1.4.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -wsgigzip diff --git a/venv/lib/python3.7/site-packages/wsgigzip/__init__.py b/venv/lib/python3.7/site-packages/wsgigzip/__init__.py deleted file mode 100644 index 26080d4..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2006-2015 L. C. Rees. All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# 3. Neither the name of the Portable Site Information Project nor the names -# of its contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -'''Decorator for flup's gzip WSGI middleware.''' - -from wsgigzip.gzip import GzipMiddleware as GzipMiddleware - -text_types = ['text/xml', 'application/xml', 'application/xhtml+xml', - 'text/html', 'text/plain'] - -def gzip(mime_types=None, compress_level=9): - ''' - @param mime_types Mimetypes that middleware should compress (default: None) - @param compress_level Zlib compression level 0-9, 9 is max (default: 9) - ''' - if mime_types is None: - mime_types = ['text/xml', 'application/xml', 'application/xhtml+xml', - 'text/html', 'text/plain'] - def decorator(application): - return GzipMiddleware(application, mime_types, compress_level) - return decorator diff --git a/venv/lib/python3.7/site-packages/wsgigzip/__pycache__/__init__.cpython-37.pyc b/venv/lib/python3.7/site-packages/wsgigzip/__pycache__/__init__.cpython-37.pyc deleted file mode 100644 index d2a1bd5..0000000 Binary files a/venv/lib/python3.7/site-packages/wsgigzip/__pycache__/__init__.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/wsgigzip/__pycache__/gzip.cpython-37.pyc b/venv/lib/python3.7/site-packages/wsgigzip/__pycache__/gzip.cpython-37.pyc deleted file mode 100644 index 10819a5..0000000 Binary files a/venv/lib/python3.7/site-packages/wsgigzip/__pycache__/gzip.cpython-37.pyc and /dev/null differ diff --git a/venv/lib/python3.7/site-packages/wsgigzip/gzip.py b/venv/lib/python3.7/site-packages/wsgigzip/gzip.py deleted file mode 100644 index 3a4e12b..0000000 --- a/venv/lib/python3.7/site-packages/wsgigzip/gzip.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (c) 2005 Allan Saddi -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -# SUCH DAMAGE. -# -# $Id: gzip.py 2188 2006-12-05 22:11:45Z asaddi $ - -"""WSGI response gzipper middleware. - -This gzip middleware component differentiates itself from others in that -it (hopefully) follows the spec more closely. Namely with regard to the -application iterator and buffering. (It doesn't buffer.) See -`Middleware Handling of Block Boundaries`_. - -Of course this all comes with a price... just LOOK at this mess! :) - -The inner workings of gzip and the gzip file format were gleaned from gzip.py. - -.. _Middleware Handling of Block Boundaries: http://www.python.org/dev/peps/pep-0333/#middleware-handling-of-block-boundaries -""" - -__author__ = 'Allan Saddi ' -__version__ = '$Revision: 2188 $' - -import struct -import time -import zlib -import re - -__all__ = ['GzipMiddleware'] - -try: - long(0) - P2 = True - def next(iter): - return iter.next() -except: - P2 = False - def long(value): - return int(value) - - -def _gzip_header(): - """Returns a gzip header (with no filename).""" - # See GzipFile._write_gzip_header in gzip.py - return b'\037\213' \ - b'\010' \ - b'\0' + \ - struct.pack(' 0: - self._size += length - self._crc = zlib.crc32(data, self._crc) - out += self._compress.compress(data) - return out - - def gzip_trailer(self): - """Returns the gzip trailer.""" - # See GzipFile.close in gzip.py - return self._compress.flush() + \ - struct.pack('